2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 * File: vm/vm_shared_memory_server.c
33 * Author: Chris Youngworth
35 * Support routines for an in-kernel shared memory allocator
40 #include <mach/mach_types.h>
41 #include <mach/kern_return.h>
42 #include <mach/vm_inherit.h>
43 #include <mach/vm_map.h>
44 #include <machine/cpu_capabilities.h>
46 #include <kern/kern_types.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/thread.h>
49 #include <kern/zalloc.h>
50 #include <kern/kalloc.h>
52 #include <ipc/ipc_types.h>
53 #include <ipc/ipc_port.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_protos.h>
60 #include <mach/mach_vm.h>
61 #include <mach/shared_memory_server.h>
62 #include <vm/vm_shared_memory_server.h>
64 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR
;
68 int lsf_alloc_debug
= 0;
69 #define LSF_DEBUG(args) \
75 #define LSF_ALLOC_DEBUG(args) \
77 if (lsf_alloc_debug) { \
82 #define LSF_DEBUG(args)
83 #define LSF_ALLOC_DEBUG(args)
86 /* forward declarations */
88 shared_region_object_create(
90 ipc_port_t
*object_handle
);
93 shared_region_mapping_dealloc_lock(
94 shared_region_mapping_t shared_region
,
101 ipc_port_t
*text_region_handle
,
102 vm_size_t text_region_size
,
103 ipc_port_t
*data_region_handle
,
104 vm_size_t data_region_size
,
105 vm_offset_t
*file_mapping_array
);
108 shared_file_header_init(
109 shared_file_info_t
*shared_file_header
);
111 static load_struct_t
*
113 queue_head_t
*hash_table
,
115 vm_offset_t recognizableOffset
,
119 shared_region_task_mappings_t sm_info
);
121 static load_struct_t
*
123 load_struct_t
*target_entry
, /* optional */
125 vm_offset_t base_offset
,
126 shared_region_task_mappings_t sm_info
);
130 load_struct_t
*entry
,
131 shared_region_task_mappings_t sm_info
);
135 unsigned int map_cnt
,
136 struct shared_file_mapping_np
*mappings
,
137 shared_region_task_mappings_t sm_info
,
138 mach_vm_offset_t
*base_offset_p
);
142 struct shared_file_mapping_np
*mappings
,
145 memory_object_size_t file_size
,
146 shared_region_task_mappings_t sm_info
,
147 mach_vm_offset_t base_offset
,
148 mach_vm_offset_t
*slide_p
);
153 vm_offset_t base_offset
,
154 shared_region_task_mappings_t sm_info
);
158 load_struct_t
*target_entry
, /* optional */
160 vm_offset_t base_offset
,
161 shared_region_task_mappings_t sm_info
,
165 #define load_file_hash(file_object, size) \
166 ((((natural_t)file_object) & 0xffffff) % size)
169 vm_offset_t shared_file_mapping_array
= 0;
171 shared_region_mapping_t default_environment_shared_regions
= NULL
;
172 static decl_mutex_data(,default_regions_list_lock_data
)
174 #define default_regions_list_lock() \
175 mutex_lock(&default_regions_list_lock_data)
176 #define default_regions_list_lock_try() \
177 mutex_try(&default_regions_list_lock_data)
178 #define default_regions_list_unlock() \
179 mutex_unlock(&default_regions_list_lock_data)
182 ipc_port_t sfma_handle
= NULL
;
185 int shared_file_available_hash_ele
;
187 /* com region support */
188 ipc_port_t com_region_handle32
= NULL
;
189 ipc_port_t com_region_handle64
= NULL
;
190 vm_map_t com_region_map32
= NULL
;
191 vm_map_t com_region_map64
= NULL
;
192 vm_size_t com_region_size32
= _COMM_PAGE32_AREA_LENGTH
;
193 vm_size_t com_region_size64
= _COMM_PAGE64_AREA_LENGTH
;
194 shared_region_mapping_t com_mapping_resource
= NULL
;
198 int shared_region_debug
= 0;
203 vm_get_shared_region(
205 shared_region_mapping_t
*shared_region
)
207 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
208 if (*shared_region
) {
209 assert((*shared_region
)->ref_count
> 0);
211 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
212 task
, *shared_region
));
217 vm_set_shared_region(
219 shared_region_mapping_t shared_region
)
221 shared_region_mapping_t old_region
;
223 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
224 "shared_region=%p[%x,%x,%x])\n",
226 shared_region
? shared_region
->fs_base
: 0,
227 shared_region
? shared_region
->system
: 0,
228 shared_region
? shared_region
->flags
: 0));
230 assert(shared_region
->ref_count
> 0);
233 old_region
= task
->system_shared_region
;
235 SHARED_REGION_TRACE_INFO
,
236 ("shared_region: %p set_region(task=%p)"
237 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
238 current_thread(), task
,
240 old_region
? old_region
->fs_base
: 0,
241 old_region
? old_region
->system
: 0,
242 old_region
? old_region
->flags
: 0,
244 shared_region
? shared_region
->fs_base
: 0,
245 shared_region
? shared_region
->system
: 0,
246 shared_region
? shared_region
->flags
: 0));
248 task
->system_shared_region
= shared_region
;
253 * shared_region_object_chain_detach:
255 * Mark the shared region as being detached or standalone. This means
256 * that we won't keep track of which file is mapped and how, for this shared
257 * region. And we don't have a "shadow" shared region.
258 * This is used when we clone a private shared region and we intend to remove
259 * some mappings from it. It won't need to maintain mappings info because it's
260 * now private. It can't have a "shadow" shared region because we don't want
261 * to see the shadow of the mappings we're about to remove.
264 shared_region_object_chain_detached(
265 shared_region_mapping_t target_region
)
267 shared_region_mapping_lock(target_region
);
268 target_region
->flags
|= SHARED_REGION_STANDALONE
;
269 shared_region_mapping_unlock(target_region
);
273 * shared_region_object_chain_attach:
275 * Link "target_region" to "object_chain_region". "object_chain_region"
276 * is treated as a shadow of "target_region" for the purpose of looking up
277 * mappings. Since the "target_region" preserves all the mappings of the
278 * older "object_chain_region", we won't duplicate all the mappings info and
279 * we'll just lookup the next region in the "object_chain" if we can't find
280 * what we're looking for in the "target_region". See lsf_hash_lookup().
283 shared_region_object_chain_attach(
284 shared_region_mapping_t target_region
,
285 shared_region_mapping_t object_chain_region
)
287 shared_region_object_chain_t object_ele
;
289 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
290 "target_region=%p, object_chain_region=%p\n",
291 target_region
, object_chain_region
));
292 assert(target_region
->ref_count
> 0);
293 assert(object_chain_region
->ref_count
> 0);
294 if(target_region
->object_chain
)
296 object_ele
= (shared_region_object_chain_t
)
297 kalloc(sizeof (struct shared_region_object_chain
));
298 shared_region_mapping_lock(object_chain_region
);
299 target_region
->object_chain
= object_ele
;
300 object_ele
->object_chain_region
= object_chain_region
;
301 object_ele
->next
= object_chain_region
->object_chain
;
302 object_ele
->depth
= object_chain_region
->depth
;
303 object_chain_region
->depth
++;
304 target_region
->alternate_next
= object_chain_region
->alternate_next
;
305 shared_region_mapping_unlock(object_chain_region
);
309 /* LP64todo - need 64-bit safe version */
311 shared_region_mapping_create(
312 ipc_port_t text_region
,
314 ipc_port_t data_region
,
316 vm_offset_t region_mappings
,
317 vm_offset_t client_base
,
318 shared_region_mapping_t
*shared_region
,
319 vm_offset_t alt_base
,
320 vm_offset_t alt_next
,
324 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
325 *shared_region
= (shared_region_mapping_t
)
326 kalloc(sizeof (struct shared_region_mapping
));
327 if(*shared_region
== NULL
) {
328 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
332 shared_region_mapping_lock_init((*shared_region
));
333 (*shared_region
)->text_region
= text_region
;
334 (*shared_region
)->text_size
= text_size
;
335 (*shared_region
)->fs_base
= fs_base
;
336 (*shared_region
)->system
= system
;
337 (*shared_region
)->data_region
= data_region
;
338 (*shared_region
)->data_size
= data_size
;
339 (*shared_region
)->region_mappings
= region_mappings
;
340 (*shared_region
)->client_base
= client_base
;
341 (*shared_region
)->ref_count
= 1;
342 (*shared_region
)->next
= NULL
;
343 (*shared_region
)->object_chain
= NULL
;
344 (*shared_region
)->self
= *shared_region
;
345 (*shared_region
)->flags
= 0;
346 (*shared_region
)->depth
= 0;
347 (*shared_region
)->default_env_list
= NULL
;
348 (*shared_region
)->alternate_base
= alt_base
;
349 (*shared_region
)->alternate_next
= alt_next
;
350 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
355 /* LP64todo - need 64-bit safe version */
357 shared_region_mapping_info(
358 shared_region_mapping_t shared_region
,
359 ipc_port_t
*text_region
,
360 vm_size_t
*text_size
,
361 ipc_port_t
*data_region
,
362 vm_size_t
*data_size
,
363 vm_offset_t
*region_mappings
,
364 vm_offset_t
*client_base
,
365 vm_offset_t
*alt_base
,
366 vm_offset_t
*alt_next
,
367 unsigned int *fs_base
,
368 unsigned int *system
,
370 shared_region_mapping_t
*next
)
372 shared_region_mapping_lock(shared_region
);
374 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
376 assert(shared_region
->ref_count
> 0);
377 *text_region
= shared_region
->text_region
;
378 *text_size
= shared_region
->text_size
;
379 *data_region
= shared_region
->data_region
;
380 *data_size
= shared_region
->data_size
;
381 *region_mappings
= shared_region
->region_mappings
;
382 *client_base
= shared_region
->client_base
;
383 *alt_base
= shared_region
->alternate_base
;
384 *alt_next
= shared_region
->alternate_next
;
385 *flags
= shared_region
->flags
;
386 *fs_base
= shared_region
->fs_base
;
387 *system
= shared_region
->system
;
388 *next
= shared_region
->next
;
390 shared_region_mapping_unlock(shared_region
);
396 shared_region_mapping_ref(
397 shared_region_mapping_t shared_region
)
399 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
400 "ref_count=%d + 1\n",
402 shared_region
? shared_region
->ref_count
: 0));
403 if(shared_region
== NULL
)
405 assert(shared_region
->ref_count
> 0);
406 hw_atomic_add(&shared_region
->ref_count
, 1);
411 shared_region_mapping_dealloc_lock(
412 shared_region_mapping_t shared_region
,
416 struct shared_region_task_mappings sm_info
;
417 shared_region_mapping_t next
= NULL
;
418 unsigned int ref_count
;
420 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
421 "(shared_region=%p,%d,%d) ref_count=%d\n",
422 shared_region
, need_sfh_lock
, need_drl_lock
,
423 shared_region
? shared_region
->ref_count
: 0));
424 while (shared_region
) {
425 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
427 shared_region
, shared_region
->ref_count
));
428 assert(shared_region
->ref_count
> 0);
430 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
431 shared_region_mapping_lock(shared_region
);
433 sm_info
.text_region
= shared_region
->text_region
;
434 sm_info
.text_size
= shared_region
->text_size
;
435 sm_info
.data_region
= shared_region
->data_region
;
436 sm_info
.data_size
= shared_region
->data_size
;
437 sm_info
.region_mappings
= shared_region
->region_mappings
;
438 sm_info
.client_base
= shared_region
->client_base
;
439 sm_info
.alternate_base
= shared_region
->alternate_base
;
440 sm_info
.alternate_next
= shared_region
->alternate_next
;
441 sm_info
.flags
= shared_region
->flags
;
442 sm_info
.self
= (vm_offset_t
)shared_region
;
444 if(shared_region
->region_mappings
) {
445 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
447 if(((vm_named_entry_t
)
448 (shared_region
->text_region
->ip_kobject
))
449 ->backing
.map
->pmap
) {
450 pmap_remove(((vm_named_entry_t
)
451 (shared_region
->text_region
->ip_kobject
))
454 sm_info
.client_base
+ sm_info
.text_size
);
456 ipc_port_release_send(shared_region
->text_region
);
457 if(shared_region
->data_region
)
458 ipc_port_release_send(shared_region
->data_region
);
459 if (shared_region
->object_chain
) {
460 next
= shared_region
->object_chain
->object_chain_region
;
461 kfree(shared_region
->object_chain
,
462 sizeof (struct shared_region_object_chain
));
466 shared_region_mapping_unlock(shared_region
);
468 ("shared_region_mapping_dealloc_lock(%p): "
471 bzero((void *)shared_region
,
472 sizeof (*shared_region
)); /* FBDP debug */
474 sizeof (struct shared_region_mapping
));
475 shared_region
= next
;
477 /* Stale indicates that a system region is no */
478 /* longer in the default environment list. */
479 if((ref_count
== 1) &&
480 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
481 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
483 ("shared_region_mapping_dealloc_lock"
484 "(%p): removing stale\n",
486 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
491 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
497 * Stub function; always indicates that the lock needs to be taken in the
498 * call to lsf_remove_regions_mappings_lock().
501 shared_region_mapping_dealloc(
502 shared_region_mapping_t shared_region
)
504 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
505 "(shared_region=%p)\n",
508 assert(shared_region
->ref_count
> 0);
510 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
515 shared_region_object_create(
517 ipc_port_t
*object_handle
)
519 vm_named_entry_t user_entry
;
520 ipc_port_t user_handle
;
525 user_entry
= (vm_named_entry_t
)
526 kalloc(sizeof (struct vm_named_entry
));
527 if(user_entry
== NULL
) {
530 named_entry_lock_init(user_entry
);
531 user_handle
= ipc_port_alloc_kernel();
534 ip_lock(user_handle
);
536 /* make a sonce right */
537 user_handle
->ip_sorights
++;
538 ip_reference(user_handle
);
540 user_handle
->ip_destination
= IP_NULL
;
541 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
542 user_handle
->ip_receiver
= ipc_space_kernel
;
544 /* make a send right */
545 user_handle
->ip_mscount
++;
546 user_handle
->ip_srights
++;
547 ip_reference(user_handle
);
549 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
550 /* nsrequest unlocks user_handle */
552 /* Create a named object based on a submap of specified size */
554 new_map
= vm_map_create(pmap_create(0, FALSE
), 0, size
, TRUE
);
555 user_entry
->backing
.map
= new_map
;
556 user_entry
->internal
= TRUE
;
557 user_entry
->is_sub_map
= TRUE
;
558 user_entry
->is_pager
= FALSE
;
559 user_entry
->offset
= 0;
560 user_entry
->protection
= VM_PROT_ALL
;
561 user_entry
->size
= size
;
562 user_entry
->ref_count
= 1;
564 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
566 *object_handle
= user_handle
;
570 /* called for the non-default, private branch shared region support */
571 /* system default fields for fs_base and system supported are not */
572 /* relevant as the system default flag is not set */
574 shared_file_create_system_region(
575 shared_region_mapping_t
*shared_region
,
579 ipc_port_t text_handle
;
580 ipc_port_t data_handle
;
583 vm_offset_t mapping_array
;
586 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
588 text_size
= 0x10000000;
589 data_size
= 0x10000000;
591 kret
= shared_file_init(&text_handle
,
592 text_size
, &data_handle
, data_size
, &mapping_array
);
594 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
595 "shared_file_init failed kret=0x%x\n",
599 kret
= shared_region_mapping_create(text_handle
, text_size
,
600 data_handle
, data_size
,
602 GLOBAL_SHARED_TEXT_SEGMENT
,
604 SHARED_ALTERNATE_LOAD_BASE
,
605 SHARED_ALTERNATE_LOAD_BASE
,
609 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
610 "shared_region_mapping_create failed "
615 (*shared_region
)->flags
= 0;
616 if(com_mapping_resource
) {
617 shared_region_mapping_ref(com_mapping_resource
);
618 (*shared_region
)->next
= com_mapping_resource
;
621 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
622 "-> shared_region=%p\n",
628 * load a new default for a specified environment into the default share
629 * regions list. If a previous default exists for the envrionment specification
630 * it is returned along with its reference. It is expected that the new
631 * sytem region structure passes a reference.
634 shared_region_mapping_t
635 update_default_shared_region(
636 shared_region_mapping_t new_system_region
)
638 shared_region_mapping_t old_system_region
;
639 unsigned int fs_base
;
642 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
644 assert(new_system_region
->ref_count
> 0);
645 fs_base
= new_system_region
->fs_base
;
646 system
= new_system_region
->system
;
647 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
648 default_regions_list_lock();
649 old_system_region
= default_environment_shared_regions
;
651 if((old_system_region
!= NULL
) &&
652 (old_system_region
->fs_base
== fs_base
) &&
653 (old_system_region
->system
== system
)) {
654 new_system_region
->default_env_list
=
655 old_system_region
->default_env_list
;
656 old_system_region
->default_env_list
= NULL
;
657 default_environment_shared_regions
= new_system_region
;
658 old_system_region
->flags
|= SHARED_REGION_STALE
;
659 default_regions_list_unlock();
660 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
662 new_system_region
, old_system_region
));
663 assert(old_system_region
->ref_count
> 0);
664 return old_system_region
;
666 if (old_system_region
) {
667 while(old_system_region
->default_env_list
!= NULL
) {
668 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
669 (old_system_region
->default_env_list
->system
== system
)) {
670 shared_region_mapping_t tmp_system_region
;
673 old_system_region
->default_env_list
;
674 new_system_region
->default_env_list
=
675 tmp_system_region
->default_env_list
;
676 tmp_system_region
->default_env_list
= NULL
;
677 old_system_region
->default_env_list
=
679 old_system_region
= tmp_system_region
;
680 old_system_region
->flags
|= SHARED_REGION_STALE
;
681 default_regions_list_unlock();
682 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
683 ": old=%p stale 2\n",
686 assert(old_system_region
->ref_count
> 0);
687 return old_system_region
;
689 old_system_region
= old_system_region
->default_env_list
;
692 /* If we get here, we are at the end of the system list and we */
693 /* did not find a pre-existing entry */
694 if(old_system_region
) {
695 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
696 "adding after old=%p\n",
697 new_system_region
, old_system_region
));
698 assert(old_system_region
->ref_count
> 0);
699 old_system_region
->default_env_list
= new_system_region
;
701 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
704 default_environment_shared_regions
= new_system_region
;
706 assert(new_system_region
->ref_count
> 0);
707 default_regions_list_unlock();
712 * lookup a system_shared_region for the environment specified. If one is
713 * found, it is returned along with a reference against the structure
716 shared_region_mapping_t
717 lookup_default_shared_region(
718 unsigned int fs_base
,
721 shared_region_mapping_t system_region
;
722 default_regions_list_lock();
723 system_region
= default_environment_shared_regions
;
725 SHARED_REGION_DEBUG(("lookup_default_shared_region"
726 "(base=0x%x, system=0x%x)\n",
728 while(system_region
!= NULL
) {
729 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
730 ": system_region=%p base=0x%x system=0x%x"
732 fs_base
, system
, system_region
,
733 system_region
->fs_base
,
734 system_region
->system
,
735 system_region
->ref_count
));
736 assert(system_region
->ref_count
> 0);
737 if((system_region
->fs_base
== fs_base
) &&
738 (system_region
->system
== system
)) {
741 system_region
= system_region
->default_env_list
;
744 shared_region_mapping_ref(system_region
);
745 default_regions_list_unlock();
746 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
748 return system_region
;
752 * remove a system_region default if it appears in the default regions list.
753 * Drop a reference on removal.
756 __private_extern__
void
757 remove_default_shared_region_lock(
758 shared_region_mapping_t system_region
,
762 shared_region_mapping_t old_system_region
;
764 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
765 "(system_region=%p, %d, %d)\n",
766 system_region
, need_sfh_lock
, need_drl_lock
));
768 default_regions_list_lock();
770 old_system_region
= default_environment_shared_regions
;
772 if(old_system_region
== NULL
) {
773 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
774 "-> default_env=NULL\n",
777 default_regions_list_unlock();
782 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
784 system_region
, old_system_region
));
785 assert(old_system_region
->ref_count
> 0);
786 if (old_system_region
== system_region
) {
787 default_environment_shared_regions
788 = old_system_region
->default_env_list
;
789 old_system_region
->default_env_list
= NULL
;
790 old_system_region
->flags
|= SHARED_REGION_STALE
;
791 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
792 "old=%p ref_count=%d STALE\n",
793 system_region
, old_system_region
,
794 old_system_region
->ref_count
));
795 shared_region_mapping_dealloc_lock(old_system_region
,
799 default_regions_list_unlock();
804 while(old_system_region
->default_env_list
!= NULL
) {
805 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
806 "old=%p->default_env=%p\n",
807 system_region
, old_system_region
,
808 old_system_region
->default_env_list
));
809 assert(old_system_region
->default_env_list
->ref_count
> 0);
810 if(old_system_region
->default_env_list
== system_region
) {
811 shared_region_mapping_t dead_region
;
812 dead_region
= old_system_region
->default_env_list
;
813 old_system_region
->default_env_list
=
814 dead_region
->default_env_list
;
815 dead_region
->default_env_list
= NULL
;
816 dead_region
->flags
|= SHARED_REGION_STALE
;
818 ("remove_default_shared_region_lock(%p): "
819 "dead=%p ref_count=%d stale\n",
820 system_region
, dead_region
,
821 dead_region
->ref_count
));
822 shared_region_mapping_dealloc_lock(dead_region
,
826 default_regions_list_unlock();
830 old_system_region
= old_system_region
->default_env_list
;
833 default_regions_list_unlock();
838 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
839 * the only caller. Remove this stub function and the corresponding symbol
843 remove_default_shared_region(
844 shared_region_mapping_t system_region
)
846 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
849 assert(system_region
->ref_count
> 0);
851 remove_default_shared_region_lock(system_region
, 1, 1);
855 remove_all_shared_regions(void)
857 shared_region_mapping_t system_region
;
858 shared_region_mapping_t next_system_region
;
860 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
861 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
862 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
863 default_regions_list_lock();
864 system_region
= default_environment_shared_regions
;
866 if(system_region
== NULL
) {
867 default_regions_list_unlock();
871 while(system_region
!= NULL
) {
872 next_system_region
= system_region
->default_env_list
;
873 system_region
->default_env_list
= NULL
;
874 system_region
->flags
|= SHARED_REGION_STALE
;
875 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
876 "%p ref_count=%d stale\n",
877 system_region
, system_region
->ref_count
));
878 assert(system_region
->ref_count
> 0);
879 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
880 system_region
= next_system_region
;
882 default_environment_shared_regions
= NULL
;
883 default_regions_list_unlock();
884 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
885 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
886 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
889 /* shared_com_boot_time_init initializes the common page shared data and */
890 /* text region. This region is semi independent of the split libs */
891 /* and so its policies have to be handled differently by the code that */
892 /* manipulates the mapping of shared region environments. However, */
893 /* the shared region delivery system supports both */
894 void shared_com_boot_time_init(void); /* forward */
896 shared_com_boot_time_init(void)
899 vm_named_entry_t named_entry
;
901 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
902 if(com_region_handle32
) {
903 panic("shared_com_boot_time_init: "
904 "com_region_handle32 already set\n");
906 if(com_region_handle64
) {
907 panic("shared_com_boot_time_init: "
908 "com_region_handle64 already set\n");
911 /* create com page regions, 1 each for 32 and 64-bit code */
912 if((kret
= shared_region_object_create(
914 &com_region_handle32
))) {
915 panic("shared_com_boot_time_init: "
916 "unable to create 32-bit comm page\n");
919 if((kret
= shared_region_object_create(
921 &com_region_handle64
))) {
922 panic("shared_com_boot_time_init: "
923 "unable to create 64-bit comm page\n");
927 /* now set export the underlying region/map */
928 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
929 com_region_map32
= named_entry
->backing
.map
;
930 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
931 com_region_map64
= named_entry
->backing
.map
;
933 /* wrap the com region in its own shared file mapping structure */
934 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
935 kret
= shared_region_mapping_create(com_region_handle32
,
938 _COMM_PAGE_BASE_ADDRESS
,
939 &com_mapping_resource
,
941 ENV_DEFAULT_ROOT
, cpu_type());
943 panic("shared_region_mapping_create failed for commpage");
948 shared_file_boot_time_init(
949 unsigned int fs_base
,
952 mach_port_t text_region_handle
;
953 mach_port_t data_region_handle
;
954 long text_region_size
;
955 long data_region_size
;
956 shared_region_mapping_t new_system_region
;
957 shared_region_mapping_t old_default_env
;
959 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
960 "(base=0x%x,system=0x%x)\n",
962 text_region_size
= 0x10000000;
963 data_region_size
= 0x10000000;
964 shared_file_init(&text_region_handle
,
968 &shared_file_mapping_array
);
970 shared_region_mapping_create(text_region_handle
,
974 shared_file_mapping_array
,
975 GLOBAL_SHARED_TEXT_SEGMENT
,
977 SHARED_ALTERNATE_LOAD_BASE
,
978 SHARED_ALTERNATE_LOAD_BASE
,
981 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
983 /* grab an extra reference for the caller */
984 /* remember to grab before call to update */
985 shared_region_mapping_ref(new_system_region
);
986 old_default_env
= update_default_shared_region(new_system_region
);
987 /* hold an extra reference because these are the system */
988 /* shared regions. */
990 shared_region_mapping_dealloc(old_default_env
);
991 if(com_mapping_resource
== NULL
) {
992 shared_com_boot_time_init();
994 shared_region_mapping_ref(com_mapping_resource
);
995 new_system_region
->next
= com_mapping_resource
;
996 vm_set_shared_region(current_task(), new_system_region
);
997 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
1002 /* called at boot time, allocates two regions, each 256 megs in size */
1003 /* these regions are later mapped into task spaces, allowing them to */
1004 /* share the contents of the regions. shared_file_init is part of */
1005 /* a shared_memory_server which not only allocates the backing maps */
1006 /* but also coordinates requests for space. */
1009 static kern_return_t
1011 ipc_port_t
*text_region_handle
,
1012 vm_size_t text_region_size
,
1013 ipc_port_t
*data_region_handle
,
1014 vm_size_t data_region_size
,
1015 vm_offset_t
*file_mapping_array
)
1017 shared_file_info_t
*sf_head
;
1018 vm_size_t data_table_size
;
1022 vm_object_t buf_object
;
1023 vm_map_entry_t entry
;
1028 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1029 /* create text and data maps/regions */
1030 kret
= shared_region_object_create(
1032 text_region_handle
);
1036 kret
= shared_region_object_create(
1038 data_region_handle
);
1040 ipc_port_release_send(*text_region_handle
);
1044 data_table_size
= data_region_size
>> 9;
1045 hash_size
= data_region_size
>> 14;
1047 if(shared_file_mapping_array
== 0) {
1048 vm_map_address_t map_addr
;
1049 buf_object
= vm_object_allocate(data_table_size
);
1051 if(vm_map_find_space(kernel_map
, &map_addr
,
1052 data_table_size
, 0, 0, &entry
)
1054 panic("shared_file_init: no space");
1056 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1057 *file_mapping_array
= shared_file_mapping_array
;
1058 vm_map_unlock(kernel_map
);
1059 entry
->object
.vm_object
= buf_object
;
1062 for (b
= *file_mapping_array
, alloced
= 0;
1063 alloced
< (hash_size
+
1064 round_page(sizeof(struct sf_mapping
)));
1065 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1066 vm_object_lock(buf_object
);
1067 p
= vm_page_alloc(buf_object
, alloced
);
1068 if (p
== VM_PAGE_NULL
) {
1069 panic("shared_file_init: no space");
1072 vm_object_unlock(buf_object
);
1073 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1074 VM_PROT_READ
| VM_PROT_WRITE
,
1075 ((unsigned int)(p
->object
->wimg_bits
))
1081 /* initialize loaded file array */
1082 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1083 sf_head
->hash
= (queue_head_t
*)
1084 (((int)*file_mapping_array
) +
1085 sizeof(struct shared_file_info
));
1086 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1087 mutex_init(&(sf_head
->lock
), 0);
1088 sf_head
->hash_init
= FALSE
;
1091 mach_make_memory_entry(kernel_map
, &data_table_size
,
1092 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1095 if (vm_map_wire(kernel_map
,
1096 vm_map_trunc_page(*file_mapping_array
),
1097 vm_map_round_page(*file_mapping_array
+
1099 round_page(sizeof(struct sf_mapping
))),
1100 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1101 panic("shared_file_init: No memory for data table");
1104 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1106 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1107 0, "load_file_server");
1109 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1110 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1111 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1112 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1114 /* initialize the global default environment lock */
1115 mutex_init(&default_regions_list_lock_data
, 0);
1118 *file_mapping_array
= shared_file_mapping_array
;
1121 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1122 return KERN_SUCCESS
;
1125 static kern_return_t
1126 shared_file_header_init(
1127 shared_file_info_t
*shared_file_header
)
1129 vm_size_t hash_table_size
;
1130 vm_size_t hash_table_offset
;
1132 /* wire hash entry pool only as needed, since we are the only */
1133 /* users, we take a few liberties with the population of our */
1135 static int allocable_hash_pages
;
1136 static vm_offset_t hash_cram_address
;
1139 hash_table_size
= shared_file_header
->hash_size
1140 * sizeof (struct queue_entry
);
1141 hash_table_offset
= hash_table_size
+
1142 round_page(sizeof (struct sf_mapping
));
1143 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1144 queue_init(&shared_file_header
->hash
[i
]);
1146 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1148 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1149 + hash_table_offset
;
1150 shared_file_available_hash_ele
= 0;
1152 shared_file_header
->hash_init
= TRUE
;
1154 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1155 int cram_pages
, cram_size
;
1157 cram_pages
= allocable_hash_pages
> 3 ?
1158 3 : allocable_hash_pages
;
1159 cram_size
= cram_pages
* PAGE_SIZE
;
1160 if (vm_map_wire(kernel_map
, hash_cram_address
,
1161 hash_cram_address
+ cram_size
,
1162 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1163 SHARED_REGION_TRACE(
1164 SHARED_REGION_TRACE_ERROR
,
1165 ("shared_region: shared_file_header_init: "
1166 "No memory for data table\n"));
1167 return KERN_NO_SPACE
;
1169 allocable_hash_pages
-= cram_pages
;
1170 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1171 shared_file_available_hash_ele
1172 += cram_size
/sizeof(struct load_file_ele
);
1173 hash_cram_address
+= cram_size
;
1176 return KERN_SUCCESS
;
1180 extern void shared_region_dump_file_entry(
1182 load_struct_t
*entry
); /* forward */
1184 void shared_region_dump_file_entry(
1186 load_struct_t
*entry
)
1189 loaded_mapping_t
*mapping
;
1191 if (trace_level
> shared_region_trace_level
) {
1194 printf("shared region: %p: "
1195 "file_entry %p base_address=0x%x file_offset=0x%x "
1197 current_thread(), entry
,
1198 entry
->base_address
, entry
->file_offset
, entry
->mapping_cnt
);
1199 mapping
= entry
->mappings
;
1200 for (i
= 0; i
< entry
->mapping_cnt
; i
++) {
1201 printf("shared region: %p:\t#%d: "
1202 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1205 mapping
->mapping_offset
,
1207 mapping
->file_offset
,
1208 mapping
->protection
);
1209 mapping
= mapping
->next
;
1213 extern void shared_region_dump_mappings(
1215 struct shared_file_mapping_np
*mappings
,
1217 mach_vm_offset_t base_offset
); /* forward */
1219 void shared_region_dump_mappings(
1221 struct shared_file_mapping_np
*mappings
,
1223 mach_vm_offset_t base_offset
)
1227 if (trace_level
> shared_region_trace_level
) {
1231 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1232 current_thread(), map_cnt
, (uint64_t) base_offset
);
1233 for (i
= 0; i
< map_cnt
; i
++) {
1234 printf("shared region: %p:\t#%d: "
1235 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1239 (uint64_t) mappings
[i
].sfm_address
,
1240 (uint64_t) mappings
[i
].sfm_size
,
1241 (uint64_t) mappings
[i
].sfm_file_offset
,
1242 mappings
[i
].sfm_max_prot
,
1243 mappings
[i
].sfm_init_prot
);
1247 extern void shared_region_dump_conflict_info(
1250 vm_map_offset_t offset
,
1251 vm_map_size_t size
); /* forward */
1254 shared_region_dump_conflict_info(
1257 vm_map_offset_t offset
,
1260 vm_map_entry_t entry
;
1262 memory_object_t mem_object
;
1266 if (trace_level
> shared_region_trace_level
) {
1270 object
= VM_OBJECT_NULL
;
1272 vm_map_lock_read(map
);
1273 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
1274 entry
= entry
->vme_next
;
1277 if (entry
!= vm_map_to_entry(map
)) {
1278 if (entry
->is_sub_map
) {
1279 printf("shared region: %p: conflict with submap "
1280 "at 0x%llx size 0x%llx !?\n",
1287 object
= entry
->object
.vm_object
;
1288 if (object
== VM_OBJECT_NULL
) {
1289 printf("shared region: %p: conflict with NULL object "
1290 "at 0x%llx size 0x%llx !?\n",
1294 object
= VM_OBJECT_NULL
;
1298 vm_object_lock(object
);
1299 while (object
->shadow
!= VM_OBJECT_NULL
) {
1302 shadow
= object
->shadow
;
1303 vm_object_lock(shadow
);
1304 vm_object_unlock(object
);
1308 if (object
->internal
) {
1309 printf("shared region: %p: conflict with anonymous "
1310 "at 0x%llx size 0x%llx\n",
1316 if (! object
->pager_ready
) {
1317 printf("shared region: %p: conflict with uninitialized "
1318 "at 0x%llx size 0x%llx\n",
1325 mem_object
= object
->pager
;
1328 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1330 kr
= vnode_pager_get_object_filename(mem_object
,
1332 if (kr
!= KERN_SUCCESS
) {
1335 printf("shared region: %p: conflict with '%s' "
1336 "at 0x%llx size 0x%llx\n",
1338 filename
? filename
: "<unknown>",
1343 if (object
!= VM_OBJECT_NULL
) {
1344 vm_object_unlock(object
);
1346 vm_map_unlock_read(map
);
1352 * Attempt to map a split library into the shared region. Check if the mappings
1353 * are already in place.
1358 struct shared_file_mapping_np
*mappings
,
1359 memory_object_control_t file_control
,
1360 memory_object_size_t file_size
,
1361 shared_region_task_mappings_t sm_info
,
1362 mach_vm_offset_t base_offset
,
1363 mach_vm_offset_t
*slide_p
)
1365 vm_object_t file_object
;
1366 shared_file_info_t
*shared_file_header
;
1367 load_struct_t
*file_entry
;
1368 loaded_mapping_t
*file_mapping
;
1371 mach_vm_offset_t slide
;
1373 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1375 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1377 mutex_lock(&shared_file_header
->lock
);
1379 /* If this is the first call to this routine, take the opportunity */
1380 /* to initialize the hash table which will be used to look-up */
1381 /* mappings based on the file object */
1383 if(shared_file_header
->hash_init
== FALSE
) {
1384 ret
= shared_file_header_init(shared_file_header
);
1385 if (ret
!= KERN_SUCCESS
) {
1386 SHARED_REGION_TRACE(
1387 SHARED_REGION_TRACE_ERROR
,
1388 ("shared_region: %p: map_shared_file: "
1389 "shared_file_header_init() failed kr=0x%x\n",
1390 current_thread(), ret
));
1391 mutex_unlock(&shared_file_header
->lock
);
1392 return KERN_NO_SPACE
;
1397 /* Find the entry in the map associated with the current mapping */
1398 /* of the file object */
1399 file_object
= memory_object_control_to_vm_object(file_control
);
1401 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1402 (void *) file_object
,
1403 mappings
[0].sfm_file_offset
,
1404 shared_file_header
->hash_size
,
1405 TRUE
, TRUE
, sm_info
);
1407 /* File is loaded, check the load manifest for exact match */
1408 /* we simplify by requiring that the elements be the same */
1409 /* size and in the same order rather than checking for */
1410 /* semantic equivalence. */
1413 file_mapping
= file_entry
->mappings
;
1414 while(file_mapping
!= NULL
) {
1416 SHARED_REGION_TRACE(
1417 SHARED_REGION_TRACE_CONFLICT
,
1418 ("shared_region: %p: map_shared_file: "
1419 "already mapped with "
1420 "more than %d mappings\n",
1421 current_thread(), map_cnt
));
1422 shared_region_dump_file_entry(
1423 SHARED_REGION_TRACE_INFO
,
1425 shared_region_dump_mappings(
1426 SHARED_REGION_TRACE_INFO
,
1427 mappings
, map_cnt
, base_offset
);
1429 mutex_unlock(&shared_file_header
->lock
);
1430 return KERN_INVALID_ARGUMENT
;
1432 if(((mappings
[i
].sfm_address
)
1433 & SHARED_DATA_REGION_MASK
) !=
1434 file_mapping
->mapping_offset
||
1435 mappings
[i
].sfm_size
!= file_mapping
->size
||
1436 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1437 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1438 SHARED_REGION_TRACE(
1439 SHARED_REGION_TRACE_CONFLICT
,
1440 ("shared_region: %p: "
1441 "mapping #%d differs\n",
1442 current_thread(), i
));
1443 shared_region_dump_file_entry(
1444 SHARED_REGION_TRACE_INFO
,
1446 shared_region_dump_mappings(
1447 SHARED_REGION_TRACE_INFO
,
1448 mappings
, map_cnt
, base_offset
);
1452 file_mapping
= file_mapping
->next
;
1456 SHARED_REGION_TRACE(
1457 SHARED_REGION_TRACE_CONFLICT
,
1458 ("shared_region: %p: map_shared_file: "
1459 "already mapped with "
1460 "%d mappings instead of %d\n",
1461 current_thread(), i
, map_cnt
));
1462 shared_region_dump_file_entry(
1463 SHARED_REGION_TRACE_INFO
,
1465 shared_region_dump_mappings(
1466 SHARED_REGION_TRACE_INFO
,
1467 mappings
, map_cnt
, base_offset
);
1469 mutex_unlock(&shared_file_header
->lock
);
1470 return KERN_INVALID_ARGUMENT
;
1473 slide
= file_entry
->base_address
- base_offset
;
1474 if (slide_p
!= NULL
) {
1476 * File already mapped but at different address,
1477 * and the caller is OK with the sliding.
1483 * The caller doesn't want any sliding. The file needs
1484 * to be mapped at the requested address or not mapped.
1488 * The file is already mapped but at a different
1491 * XXX should we attempt to load at
1492 * requested address too ?
1495 SHARED_REGION_TRACE(
1496 SHARED_REGION_TRACE_CONFLICT
,
1497 ("shared_region: %p: "
1498 "map_shared_file: already mapped, "
1499 "would need to slide 0x%llx\n",
1504 * The file is already mapped at the correct
1511 mutex_unlock(&shared_file_header
->lock
);
1514 /* File is not loaded, lets attempt to load it */
1515 ret
= lsf_map(mappings
, map_cnt
,
1516 (void *)file_control
,
1521 if(ret
== KERN_NO_SPACE
) {
1522 shared_region_mapping_t regions
;
1523 shared_region_mapping_t system_region
;
1524 regions
= (shared_region_mapping_t
)sm_info
->self
;
1525 regions
->flags
|= SHARED_REGION_FULL
;
1526 system_region
= lookup_default_shared_region(
1527 regions
->fs_base
, regions
->system
);
1528 if (system_region
== regions
) {
1529 shared_region_mapping_t new_system_shared_region
;
1530 shared_file_boot_time_init(
1531 regions
->fs_base
, regions
->system
);
1532 /* current task must stay with its current */
1533 /* regions, drop count on system_shared_region */
1534 /* and put back our original set */
1535 vm_get_shared_region(current_task(),
1536 &new_system_shared_region
);
1537 shared_region_mapping_dealloc_lock(
1538 new_system_shared_region
, 0, 1);
1539 vm_set_shared_region(current_task(), regions
);
1540 } else if (system_region
!= NULL
) {
1541 shared_region_mapping_dealloc_lock(
1542 system_region
, 0, 1);
1545 mutex_unlock(&shared_file_header
->lock
);
1551 * shared_region_cleanup:
1553 * Deallocates all the mappings in the shared region, except those explicitly
1554 * specified in the "ranges" set of address ranges.
1557 shared_region_cleanup(
1558 unsigned int range_count
,
1559 struct shared_region_range_np
*ranges
,
1560 shared_region_task_mappings_t sm_info
)
1563 ipc_port_t region_handle
;
1564 vm_named_entry_t region_named_entry
;
1565 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1566 unsigned int i_range
;
1567 vm_map_offset_t range_start
, range_end
;
1568 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1569 vm_map_size_t delete_size
;
1571 struct shared_region_range_np tmp_range
;
1572 unsigned int sort_index
, sorted_index
;
1573 vm_map_offset_t sort_min_address
;
1574 unsigned int sort_min_index
;
1577 * Since we want to deallocate the holes between the "ranges",
1578 * sort the array by increasing addresses.
1580 for (sorted_index
= 0;
1581 sorted_index
< range_count
;
1584 /* first remaining entry is our new starting point */
1585 sort_min_index
= sorted_index
;
1586 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1588 /* find the lowest mapping_offset in the remaining entries */
1589 for (sort_index
= sorted_index
+ 1;
1590 sort_index
< range_count
;
1592 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1593 /* lowest address so far... */
1594 sort_min_index
= sort_index
;
1596 ranges
[sort_min_index
].srr_address
;
1600 if (sort_min_index
!= sorted_index
) {
1602 tmp_range
= ranges
[sort_min_index
];
1603 ranges
[sort_min_index
] = ranges
[sorted_index
];
1604 ranges
[sorted_index
] = tmp_range
;
1608 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1609 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1610 text_submap
= region_named_entry
->backing
.map
;
1612 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1613 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1614 data_submap
= region_named_entry
->backing
.map
;
1616 submap
= text_submap
;
1617 next_submap
= submap
;
1618 submap_base
= sm_info
->client_base
;
1620 submap_end
= submap_base
+ sm_info
->text_size
;
1622 i_range
< range_count
;
1625 /* get the next range of addresses to keep */
1626 range_start
= ranges
[i_range
].srr_address
;
1627 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1628 /* align them to page boundaries */
1629 range_start
= vm_map_trunc_page(range_start
);
1630 range_end
= vm_map_round_page(range_end
);
1632 /* make sure we don't go beyond the submap's boundaries */
1633 if (range_start
< submap_base
) {
1634 range_start
= submap_base
;
1635 } else if (range_start
>= submap_end
) {
1636 range_start
= submap_end
;
1638 if (range_end
< submap_base
) {
1639 range_end
= submap_base
;
1640 } else if (range_end
>= submap_end
) {
1641 range_end
= submap_end
;
1644 if (range_start
> submap_base
+ submap_offset
) {
1646 * Deallocate everything between the last offset in the
1647 * submap and the start of this range.
1649 delete_size
= range_start
-
1650 (submap_base
+ submap_offset
);
1651 (void) vm_deallocate(submap
,
1658 /* skip to the end of the range */
1659 submap_offset
+= delete_size
+ (range_end
- range_start
);
1661 if (submap_base
+ submap_offset
>= submap_end
) {
1662 /* get to next submap */
1664 if (submap
== data_submap
) {
1665 /* no other submap after data: done ! */
1669 /* get original range again */
1670 range_start
= ranges
[i_range
].srr_address
;
1671 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1672 range_start
= vm_map_trunc_page(range_start
);
1673 range_end
= vm_map_round_page(range_end
);
1675 if (range_end
> submap_end
) {
1677 * This last range overlaps with the next
1678 * submap. We need to process it again
1679 * after switching submaps. Otherwise, we'll
1680 * just continue with the next range.
1685 if (submap
== text_submap
) {
1687 * Switch to the data submap.
1689 submap
= data_submap
;
1691 submap_base
= sm_info
->client_base
+
1693 submap_end
= submap_base
+ sm_info
->data_size
;
1698 if (submap_base
+ submap_offset
< submap_end
) {
1699 /* delete remainder of this submap, from "offset" to the end */
1700 (void) vm_deallocate(submap
,
1702 submap_end
- submap_base
- submap_offset
);
1703 /* if nothing to keep in data submap, delete it all */
1704 if (submap
== text_submap
) {
1705 submap
= data_submap
;
1707 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1708 submap_end
= submap_base
+ sm_info
->data_size
;
1709 (void) vm_deallocate(data_submap
,
1711 submap_end
- submap_base
);
1719 /* A hash lookup function for the list of loaded files in */
1720 /* shared_memory_server space. */
1722 static load_struct_t
*
1724 queue_head_t
*hash_table
,
1726 vm_offset_t recognizableOffset
,
1729 boolean_t alternate
,
1730 shared_region_task_mappings_t sm_info
)
1732 register queue_t bucket
;
1733 load_struct_t
*entry
;
1734 shared_region_mapping_t target_region
;
1737 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1738 "reg=%d alt=%d sm_info=%p\n",
1739 hash_table
, file_object
, recognizableOffset
, size
,
1740 regular
, alternate
, sm_info
));
1742 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1743 for (entry
= (load_struct_t
*)queue_first(bucket
);
1744 !queue_end(bucket
, &entry
->links
);
1745 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1747 if ((entry
->file_object
== (int)file_object
) &&
1748 (entry
->file_offset
== recognizableOffset
)) {
1749 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1750 depth
= target_region
->depth
;
1751 while(target_region
) {
1752 if((!(sm_info
->self
)) ||
1753 ((target_region
== entry
->regions_instance
) &&
1754 (target_region
->depth
>= entry
->depth
))) {
1756 entry
->base_address
>= sm_info
->alternate_base
) {
1757 LSF_DEBUG(("lsf_hash_lookup: "
1758 "alt=%d found entry %p "
1762 entry
->base_address
,
1763 sm_info
->alternate_base
));
1767 entry
->base_address
< sm_info
->alternate_base
) {
1768 LSF_DEBUG(("lsf_hash_lookup: "
1769 "reg=%d found entry %p "
1773 entry
->base_address
,
1774 sm_info
->alternate_base
));
1778 if(target_region
->object_chain
) {
1779 target_region
= (shared_region_mapping_t
)
1780 target_region
->object_chain
->object_chain_region
;
1781 depth
= target_region
->object_chain
->depth
;
1783 target_region
= NULL
;
1789 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1790 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1791 hash_table
, file_object
, recognizableOffset
, size
,
1792 regular
, alternate
, sm_info
));
1793 return (load_struct_t
*)0;
1796 __private_extern__ load_struct_t
*
1797 lsf_remove_regions_mappings_lock(
1798 shared_region_mapping_t region
,
1799 shared_region_task_mappings_t sm_info
,
1803 register queue_t bucket
;
1804 shared_file_info_t
*shared_file_header
;
1805 load_struct_t
*entry
;
1806 load_struct_t
*next_entry
;
1808 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1810 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1812 region
, sm_info
, shared_file_header
));
1814 mutex_lock(&shared_file_header
->lock
);
1815 if(shared_file_header
->hash_init
== FALSE
) {
1817 mutex_unlock(&shared_file_header
->lock
);
1818 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1819 "(region=%p,sm_info=%p): not inited\n",
1823 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1824 bucket
= &shared_file_header
->hash
[i
];
1825 for (entry
= (load_struct_t
*)queue_first(bucket
);
1826 !queue_end(bucket
, &entry
->links
);) {
1827 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1828 if(region
== entry
->regions_instance
) {
1829 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1830 "entry %p region %p: "
1833 lsf_unload((void *)entry
->file_object
,
1834 entry
->base_address
, sm_info
);
1836 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1837 "entry %p region %p target region %p: "
1839 entry
, entry
->regions_instance
, region
));
1846 mutex_unlock(&shared_file_header
->lock
);
1847 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1849 return NULL
; /* XXX */
1853 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1854 * only caller. Remove this stub function and the corresponding symbol
1855 * export for Merlot.
1858 lsf_remove_regions_mappings(
1859 shared_region_mapping_t region
,
1860 shared_region_task_mappings_t sm_info
)
1862 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1865 /* Removes a map_list, (list of loaded extents) for a file from */
1866 /* the loaded file hash table. */
1868 static load_struct_t
*
1870 load_struct_t
*target_entry
, /* optional: NULL if not relevant */
1872 vm_offset_t base_offset
,
1873 shared_region_task_mappings_t sm_info
)
1875 register queue_t bucket
;
1876 shared_file_info_t
*shared_file_header
;
1877 load_struct_t
*entry
;
1879 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
1880 target_entry
, file_object
, base_offset
, sm_info
));
1882 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1884 bucket
= &shared_file_header
->hash
1885 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1887 for (entry
= (load_struct_t
*)queue_first(bucket
);
1888 !queue_end(bucket
, &entry
->links
);
1889 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1890 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1891 sm_info
->self
== entry
->regions_instance
)) {
1892 if ((target_entry
== NULL
||
1893 entry
== target_entry
) &&
1894 (entry
->file_object
== (int) file_object
) &&
1895 (entry
->base_address
== base_offset
)) {
1896 queue_remove(bucket
, entry
,
1897 load_struct_ptr_t
, links
);
1898 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1904 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1905 return (load_struct_t
*)0;
1908 /* Inserts a new map_list, (list of loaded file extents), into the */
1909 /* server loaded file hash table. */
1913 load_struct_t
*entry
,
1914 shared_region_task_mappings_t sm_info
)
1916 shared_file_info_t
*shared_file_header
;
1918 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1919 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1921 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1922 queue_enter(&shared_file_header
->hash
1923 [load_file_hash(entry
->file_object
,
1924 shared_file_header
->hash_size
)],
1925 entry
, load_struct_ptr_t
, links
);
1933 * Look in the shared region, starting from the end, for a place to fit all the
1934 * mappings while respecting their relative offsets.
1936 static kern_return_t
1938 unsigned int map_cnt
,
1939 struct shared_file_mapping_np
*mappings_in
,
1940 shared_region_task_mappings_t sm_info
,
1941 mach_vm_offset_t
*base_offset_p
)
1943 mach_vm_offset_t max_mapping_offset
;
1945 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
1946 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
1947 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
1948 mach_vm_offset_t base_offset
;
1949 mach_vm_size_t mapping_size
;
1950 mach_vm_offset_t wiggle_room
, wiggle
;
1951 vm_map_t text_map
, data_map
, map
;
1952 vm_named_entry_t region_entry
;
1953 ipc_port_t region_handle
;
1956 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
1957 unsigned int sort_index
, sorted_index
;
1958 vm_map_offset_t sort_min_address
;
1959 unsigned int sort_min_index
;
1962 * Sort the mappings array, so that we can try and fit them in
1963 * in the right order as we progress along the VM maps.
1965 * We can't modify the original array (the original order is
1966 * important when doing lookups of the mappings), so copy it first.
1969 kr
= kmem_alloc(kernel_map
,
1970 (vm_offset_t
*) &mappings
,
1971 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
1972 if (kr
!= KERN_SUCCESS
) {
1973 return KERN_NO_SPACE
;
1976 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
1978 max_mapping_offset
= 0;
1979 for (sorted_index
= 0;
1980 sorted_index
< map_cnt
;
1983 /* first remaining entry is our new starting point */
1984 sort_min_index
= sorted_index
;
1985 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
1986 SHARED_TEXT_REGION_MASK
) +
1987 mappings
[sort_min_index
].sfm_size
);
1988 sort_min_address
= mapping_end_offset
;
1989 /* compute the highest mapping_offset as well... */
1990 if (mapping_end_offset
> max_mapping_offset
) {
1991 max_mapping_offset
= mapping_end_offset
;
1993 /* find the lowest mapping_offset in the remaining entries */
1994 for (sort_index
= sorted_index
+ 1;
1995 sort_index
< map_cnt
;
1998 mapping_end_offset
=
1999 ((mappings
[sort_index
].sfm_address
&
2000 SHARED_TEXT_REGION_MASK
) +
2001 mappings
[sort_index
].sfm_size
);
2003 if (mapping_end_offset
< sort_min_address
) {
2004 /* lowest mapping_offset so far... */
2005 sort_min_index
= sort_index
;
2006 sort_min_address
= mapping_end_offset
;
2009 if (sort_min_index
!= sorted_index
) {
2011 tmp_mapping
= mappings
[sort_min_index
];
2012 mappings
[sort_min_index
] = mappings
[sorted_index
];
2013 mappings
[sorted_index
] = tmp_mapping
;
2018 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
2020 /* start from the end of the shared area */
2021 base_offset
= sm_info
->text_size
;
2023 /* can all the mappings fit ? */
2024 if (max_mapping_offset
> base_offset
) {
2025 kmem_free(kernel_map
,
2026 (vm_offset_t
) mappings
,
2027 map_cnt
* sizeof (mappings
[0]));
2028 return KERN_FAILURE
;
2032 * Align the last mapping to the end of the submaps
2033 * and start from there.
2035 base_offset
-= max_mapping_offset
;
2037 region_handle
= (ipc_port_t
) sm_info
->text_region
;
2038 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2039 text_map
= region_entry
->backing
.map
;
2041 region_handle
= (ipc_port_t
) sm_info
->data_region
;
2042 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2043 data_map
= region_entry
->backing
.map
;
2045 vm_map_lock_read(text_map
);
2046 vm_map_lock_read(data_map
);
2050 * At first, we can wiggle all the way from our starting point
2051 * (base_offset) towards the start of the map (0), if needed.
2053 wiggle_room
= base_offset
;
2055 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
2056 if (mappings
[i
].sfm_size
== 0) {
2057 /* nothing to map here... */
2060 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2061 /* copy-on-write mappings are in the data submap */
2064 /* other mappings are in the text submap */
2067 /* get the offset within the appropriate submap */
2068 mapping_offset
= (mappings
[i
].sfm_address
&
2069 SHARED_TEXT_REGION_MASK
);
2070 mapping_size
= mappings
[i
].sfm_size
;
2071 mapping_end_offset
= mapping_offset
+ mapping_size
;
2072 mapping_offset
= vm_map_trunc_page(mapping_offset
);
2073 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
2074 mapping_size
= mapping_end_offset
- mapping_offset
;
2077 if (vm_map_lookup_entry(map
,
2078 base_offset
+ mapping_offset
,
2081 * The start address for that mapping
2082 * is already mapped: no fit.
2083 * Locate the hole immediately before this map
2086 prev_hole_end
= map_entry
->vme_start
;
2087 prev_entry
= map_entry
->vme_prev
;
2088 if (prev_entry
== vm_map_to_entry(map
)) {
2089 /* no previous entry */
2090 prev_hole_start
= map
->min_offset
;
2092 /* previous entry ends here */
2093 prev_hole_start
= prev_entry
->vme_end
;
2097 * The start address for that mapping is not
2099 * Locate the start and end of the hole
2102 /* map_entry is the previous entry */
2103 if (map_entry
== vm_map_to_entry(map
)) {
2104 /* no previous entry */
2105 prev_hole_start
= map
->min_offset
;
2107 /* previous entry ends there */
2108 prev_hole_start
= map_entry
->vme_end
;
2110 next_entry
= map_entry
->vme_next
;
2111 if (next_entry
== vm_map_to_entry(map
)) {
2113 prev_hole_end
= map
->max_offset
;
2115 prev_hole_end
= next_entry
->vme_start
;
2119 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
2120 /* hole is to our left: try and wiggle to fit */
2121 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
2122 if (wiggle
> base_offset
) {
2123 /* we're getting out of the map */
2127 base_offset
-= wiggle
;
2128 if (wiggle
> wiggle_room
) {
2129 /* can't wiggle that much: start over */
2132 /* account for the wiggling done */
2133 wiggle_room
-= wiggle
;
2137 base_offset
+ mapping_offset
+ mapping_size
) {
2139 * The hole extends further to the right
2140 * than what we need. Ignore the extra space.
2142 prev_hole_end
= (base_offset
+ mapping_offset
+
2147 base_offset
+ mapping_offset
+ mapping_size
) {
2149 * The hole is not big enough to establish
2150 * the mapping right there: wiggle towards
2151 * the beginning of the hole so that the end
2152 * of our mapping fits in the hole...
2154 wiggle
= base_offset
+ mapping_offset
2155 + mapping_size
- prev_hole_end
;
2156 if (wiggle
> base_offset
) {
2157 /* we're getting out of the map */
2161 base_offset
-= wiggle
;
2162 if (wiggle
> wiggle_room
) {
2163 /* can't wiggle that much: start over */
2166 /* account for the wiggling done */
2167 wiggle_room
-= wiggle
;
2169 /* keep searching from this new base */
2173 if (prev_hole_start
> base_offset
+ mapping_offset
) {
2174 /* no hole found: keep looking */
2178 /* compute wiggling room at this hole */
2179 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
2180 if (wiggle
< wiggle_room
) {
2181 /* less wiggle room than before... */
2182 wiggle_room
= wiggle
;
2185 /* found a hole that fits: skip to next mapping */
2187 } /* while we look for a hole */
2188 } /* for each mapping */
2190 *base_offset_p
= base_offset
;
2194 vm_map_unlock_read(text_map
);
2195 vm_map_unlock_read(data_map
);
2197 kmem_free(kernel_map
,
2198 (vm_offset_t
) mappings
,
2199 map_cnt
* sizeof (mappings
[0]));
2207 * Attempt to establish the mappings for a split library into the shared region.
2209 static kern_return_t
2211 struct shared_file_mapping_np
*mappings
,
2214 memory_object_offset_t file_size
,
2215 shared_region_task_mappings_t sm_info
,
2216 mach_vm_offset_t base_offset
,
2217 mach_vm_offset_t
*slide_p
)
2219 load_struct_t
*entry
;
2220 loaded_mapping_t
*file_mapping
;
2221 loaded_mapping_t
**tptr
;
2222 ipc_port_t region_handle
;
2223 vm_named_entry_t region_entry
;
2224 mach_port_t map_port
;
2225 vm_object_t file_object
;
2228 mach_vm_offset_t original_base_offset
;
2229 mach_vm_size_t total_size
;
2231 /* get the VM object from the file's memory object handle */
2232 file_object
= memory_object_control_to_vm_object(file_control
);
2234 original_base_offset
= base_offset
;
2236 LSF_DEBUG(("lsf_map"
2237 "(cnt=%d,file=%p,sm_info=%p)"
2239 map_cnt
, file_object
,
2242 restart_after_slide
:
2243 /* get a new "load_struct_t" to described the mappings for that file */
2244 entry
= (load_struct_t
*)zalloc(lsf_zone
);
2245 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
2246 LSF_DEBUG(("lsf_map"
2247 "(cnt=%d,file=%p,sm_info=%p) "
2249 map_cnt
, file_object
,
2251 if (entry
== NULL
) {
2252 SHARED_REGION_TRACE(
2253 SHARED_REGION_TRACE_ERROR
,
2254 ("shared_region: %p: "
2255 "lsf_map: unable to allocate entry\n",
2257 return KERN_NO_SPACE
;
2259 shared_file_available_hash_ele
--;
2260 entry
->file_object
= (int)file_object
;
2261 entry
->mapping_cnt
= map_cnt
;
2262 entry
->mappings
= NULL
;
2263 entry
->links
.prev
= (queue_entry_t
) 0;
2264 entry
->links
.next
= (queue_entry_t
) 0;
2265 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2266 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2267 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2269 /* insert the new file entry in the hash table, for later lookups */
2270 lsf_hash_insert(entry
, sm_info
);
2272 /* where we should add the next mapping description for that file */
2273 tptr
= &(entry
->mappings
);
2275 entry
->base_address
= base_offset
;
2278 /* establish each requested mapping */
2279 for (i
= 0; i
< map_cnt
; i
++) {
2280 mach_vm_offset_t target_address
;
2281 mach_vm_offset_t region_mask
;
2283 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2284 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2285 region_mask
= SHARED_DATA_REGION_MASK
;
2286 if ((((mappings
[i
].sfm_address
+ base_offset
)
2287 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2288 (((mappings
[i
].sfm_address
+ base_offset
+
2289 mappings
[i
].sfm_size
- 1)
2290 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2291 SHARED_REGION_TRACE(
2292 SHARED_REGION_TRACE_ERROR
,
2293 ("shared_region: %p: lsf_map: "
2294 "RW mapping #%d not in segment",
2295 current_thread(), i
));
2296 shared_region_dump_mappings(
2297 SHARED_REGION_TRACE_ERROR
,
2298 mappings
, map_cnt
, base_offset
);
2300 lsf_deallocate(entry
,
2302 entry
->base_address
,
2305 return KERN_INVALID_ARGUMENT
;
2308 region_mask
= SHARED_TEXT_REGION_MASK
;
2309 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2310 if (((mappings
[i
].sfm_address
+ base_offset
)
2311 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2312 ((mappings
[i
].sfm_address
+ base_offset
+
2313 mappings
[i
].sfm_size
- 1)
2314 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2315 SHARED_REGION_TRACE(
2316 SHARED_REGION_TRACE_ERROR
,
2317 ("shared_region: %p: lsf_map: "
2318 "RO mapping #%d not in segment",
2319 current_thread(), i
));
2320 shared_region_dump_mappings(
2321 SHARED_REGION_TRACE_ERROR
,
2322 mappings
, map_cnt
, base_offset
);
2324 lsf_deallocate(entry
,
2326 entry
->base_address
,
2329 return KERN_INVALID_ARGUMENT
;
2332 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2333 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2335 SHARED_REGION_TRACE(
2336 SHARED_REGION_TRACE_ERROR
,
2337 ("shared_region: %p: lsf_map: "
2338 "ZF mapping #%d beyond EOF",
2339 current_thread(), i
));
2340 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR
,
2345 lsf_deallocate(entry
,
2347 entry
->base_address
,
2350 return KERN_INVALID_ARGUMENT
;
2352 target_address
= entry
->base_address
+
2353 ((mappings
[i
].sfm_address
) & region_mask
);
2354 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2355 map_port
= MACH_PORT_NULL
;
2357 map_port
= (ipc_port_t
) file_object
->pager
;
2359 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2361 total_size
+= mappings
[i
].sfm_size
;
2362 if (mappings
[i
].sfm_size
== 0) {
2363 /* nothing to map... */
2367 region_entry
->backing
.map
,
2369 vm_map_round_page(mappings
[i
].sfm_size
),
2373 mappings
[i
].sfm_file_offset
,
2375 (mappings
[i
].sfm_init_prot
&
2376 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2377 (mappings
[i
].sfm_max_prot
&
2378 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2379 VM_INHERIT_DEFAULT
);
2381 if (kr
!= KERN_SUCCESS
) {
2382 vm_offset_t old_base_address
;
2384 old_base_address
= entry
->base_address
;
2385 lsf_deallocate(entry
,
2387 entry
->base_address
,
2392 if (slide_p
!= NULL
) {
2394 * Requested mapping failed but the caller
2395 * is OK with sliding the library in the
2396 * shared region, so let's try and slide it...
2399 SHARED_REGION_TRACE(
2400 SHARED_REGION_TRACE_CONFLICT
,
2401 ("shared_region: %p: lsf_map: "
2402 "mapping #%d failed to map, "
2403 "kr=0x%x, sliding...\n",
2404 current_thread(), i
, kr
));
2405 shared_region_dump_mappings(
2406 SHARED_REGION_TRACE_INFO
,
2407 mappings
, map_cnt
, base_offset
);
2408 shared_region_dump_conflict_info(
2409 SHARED_REGION_TRACE_CONFLICT
,
2410 region_entry
->backing
.map
,
2412 ((mappings
[i
].sfm_address
)
2414 vm_map_round_page(mappings
[i
].sfm_size
));
2416 /* lookup an appropriate spot */
2417 kr
= lsf_slide(map_cnt
, mappings
,
2418 sm_info
, &base_offset
);
2419 if (kr
== KERN_SUCCESS
) {
2420 /* try and map it there ... */
2421 goto restart_after_slide
;
2423 /* couldn't slide ... */
2426 SHARED_REGION_TRACE(
2427 SHARED_REGION_TRACE_CONFLICT
,
2428 ("shared_region: %p: lsf_map: "
2429 "mapping #%d failed to map, "
2430 "kr=0x%x, no sliding\n",
2431 current_thread(), i
, kr
));
2432 shared_region_dump_mappings(
2433 SHARED_REGION_TRACE_INFO
,
2434 mappings
, map_cnt
, base_offset
);
2435 shared_region_dump_conflict_info(
2436 SHARED_REGION_TRACE_CONFLICT
,
2437 region_entry
->backing
.map
,
2439 ((mappings
[i
].sfm_address
)
2441 vm_map_round_page(mappings
[i
].sfm_size
));
2442 return KERN_FAILURE
;
2445 /* record this mapping */
2446 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2447 if (file_mapping
== NULL
) {
2448 lsf_deallocate(entry
,
2450 entry
->base_address
,
2453 SHARED_REGION_TRACE(
2454 SHARED_REGION_TRACE_ERROR
,
2455 ("shared_region: %p: "
2456 "lsf_map: unable to allocate mapping\n",
2458 return KERN_NO_SPACE
;
2460 shared_file_available_hash_ele
--;
2461 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2463 file_mapping
->size
= mappings
[i
].sfm_size
;
2464 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2465 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2466 file_mapping
->next
= NULL
;
2467 LSF_DEBUG(("lsf_map: file_mapping %p "
2468 "for offset=0x%x size=0x%x\n",
2469 file_mapping
, file_mapping
->mapping_offset
,
2470 file_mapping
->size
));
2472 /* and link it to the file entry */
2473 *tptr
= file_mapping
;
2475 /* where to put the next mapping's description */
2476 tptr
= &(file_mapping
->next
);
2479 if (slide_p
!= NULL
) {
2480 *slide_p
= base_offset
- original_base_offset
;
2483 if ((sm_info
->flags
& SHARED_REGION_STANDALONE
) ||
2484 (total_size
== 0)) {
2487 * 1. we have a standalone and private shared region, so we
2488 * don't really need to keep the information about each file
2489 * and each mapping. Just deallocate it all.
2490 * 2. the total size of the mappings is 0, so nothing at all
2491 * was mapped. Let's not waste kernel resources to describe
2494 * XXX we still have the hash table, though...
2496 lsf_deallocate(entry
, file_object
, entry
->base_address
, sm_info
,
2500 LSF_DEBUG(("lsf_map: done\n"));
2501 return KERN_SUCCESS
;
2505 /* finds the file_object extent list in the shared memory hash table */
2506 /* If one is found the associated extents in shared memory are deallocated */
2507 /* and the extent list is freed */
2512 vm_offset_t base_offset
,
2513 shared_region_task_mappings_t sm_info
)
2515 lsf_deallocate(NULL
, file_object
, base_offset
, sm_info
, TRUE
);
2521 * Deallocates all the "shared region" internal data structures describing
2522 * the file and its mappings.
2523 * Also deallocate the actual file mappings if requested ("unload" arg).
2527 load_struct_t
*target_entry
,
2529 vm_offset_t base_offset
,
2530 shared_region_task_mappings_t sm_info
,
2533 load_struct_t
*entry
;
2534 loaded_mapping_t
*map_ele
;
2535 loaded_mapping_t
*back_ptr
;
2538 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2539 target_entry
, file_object
, base_offset
, sm_info
, unload
));
2540 entry
= lsf_hash_delete(target_entry
,
2545 map_ele
= entry
->mappings
;
2546 while(map_ele
!= NULL
) {
2548 ipc_port_t region_handle
;
2549 vm_named_entry_t region_entry
;
2551 if(map_ele
->protection
& VM_PROT_COW
) {
2552 region_handle
= (ipc_port_t
)
2553 sm_info
->data_region
;
2555 region_handle
= (ipc_port_t
)
2556 sm_info
->text_region
;
2558 region_entry
= (vm_named_entry_t
)
2559 region_handle
->ip_kobject
;
2561 kr
= vm_deallocate(region_entry
->backing
.map
,
2562 (entry
->base_address
+
2563 map_ele
->mapping_offset
),
2565 assert(kr
== KERN_SUCCESS
);
2568 map_ele
= map_ele
->next
;
2569 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2570 "offset 0x%x size 0x%x\n",
2571 back_ptr
, back_ptr
->mapping_offset
,
2573 zfree(lsf_zone
, back_ptr
);
2574 shared_file_available_hash_ele
++;
2576 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2577 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2578 zfree(lsf_zone
, entry
);
2579 shared_file_available_hash_ele
++;
2581 LSF_DEBUG(("lsf_deallocate: done\n"));
2584 /* integer is from 1 to 100 and represents percent full */
2586 lsf_mapping_pool_gauge(void)
2588 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;