2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * File: vm/vm_shared_memory_server.c
31 * Author: Chris Youngworth
33 * Support routines for an in-kernel shared memory allocator
38 #include <mach/mach_types.h>
39 #include <mach/kern_return.h>
40 #include <mach/vm_inherit.h>
41 #include <mach/vm_map.h>
42 #include <machine/cpu_capabilities.h>
44 #include <kern/kern_types.h>
45 #include <kern/ipc_kobject.h>
46 #include <kern/thread.h>
47 #include <kern/zalloc.h>
48 #include <kern/kalloc.h>
50 #include <ipc/ipc_types.h>
51 #include <ipc/ipc_port.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_protos.h>
58 #include <mach/mach_vm.h>
59 #include <mach/shared_memory_server.h>
60 #include <vm/vm_shared_memory_server.h>
62 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR
;
66 int lsf_alloc_debug
= 0;
67 #define LSF_DEBUG(args) \
73 #define LSF_ALLOC_DEBUG(args) \
75 if (lsf_alloc_debug) { \
80 #define LSF_DEBUG(args)
81 #define LSF_ALLOC_DEBUG(args)
84 /* forward declarations */
86 shared_region_object_create(
88 ipc_port_t
*object_handle
);
91 shared_region_mapping_dealloc_lock(
92 shared_region_mapping_t shared_region
,
99 ipc_port_t
*text_region_handle
,
100 vm_size_t text_region_size
,
101 ipc_port_t
*data_region_handle
,
102 vm_size_t data_region_size
,
103 vm_offset_t
*file_mapping_array
);
106 shared_file_header_init(
107 shared_file_info_t
*shared_file_header
);
109 static load_struct_t
*
111 queue_head_t
*hash_table
,
113 vm_offset_t recognizableOffset
,
117 shared_region_task_mappings_t sm_info
);
119 static load_struct_t
*
121 load_struct_t
*target_entry
, /* optional */
123 vm_offset_t base_offset
,
124 shared_region_task_mappings_t sm_info
);
128 load_struct_t
*entry
,
129 shared_region_task_mappings_t sm_info
);
133 unsigned int map_cnt
,
134 struct shared_file_mapping_np
*mappings
,
135 shared_region_task_mappings_t sm_info
,
136 mach_vm_offset_t
*base_offset_p
);
140 struct shared_file_mapping_np
*mappings
,
143 memory_object_size_t file_size
,
144 shared_region_task_mappings_t sm_info
,
145 mach_vm_offset_t base_offset
,
146 mach_vm_offset_t
*slide_p
);
151 vm_offset_t base_offset
,
152 shared_region_task_mappings_t sm_info
);
156 load_struct_t
*target_entry
, /* optional */
158 vm_offset_t base_offset
,
159 shared_region_task_mappings_t sm_info
,
163 #define load_file_hash(file_object, size) \
164 ((((natural_t)file_object) & 0xffffff) % size)
167 vm_offset_t shared_file_mapping_array
= 0;
169 shared_region_mapping_t default_environment_shared_regions
= NULL
;
170 static decl_mutex_data(,default_regions_list_lock_data
)
172 #define default_regions_list_lock() \
173 mutex_lock(&default_regions_list_lock_data)
174 #define default_regions_list_lock_try() \
175 mutex_try(&default_regions_list_lock_data)
176 #define default_regions_list_unlock() \
177 mutex_unlock(&default_regions_list_lock_data)
180 ipc_port_t sfma_handle
= NULL
;
183 int shared_file_available_hash_ele
;
185 /* com region support */
186 ipc_port_t com_region_handle32
= NULL
;
187 ipc_port_t com_region_handle64
= NULL
;
188 vm_map_t com_region_map32
= NULL
;
189 vm_map_t com_region_map64
= NULL
;
190 vm_size_t com_region_size32
= _COMM_PAGE32_AREA_LENGTH
;
191 vm_size_t com_region_size64
= _COMM_PAGE64_AREA_LENGTH
;
192 shared_region_mapping_t com_mapping_resource
= NULL
;
196 int shared_region_debug
= 0;
201 vm_get_shared_region(
203 shared_region_mapping_t
*shared_region
)
205 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
206 if (*shared_region
) {
207 assert((*shared_region
)->ref_count
> 0);
209 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
210 task
, *shared_region
));
215 vm_set_shared_region(
217 shared_region_mapping_t shared_region
)
219 shared_region_mapping_t old_region
;
221 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
222 "shared_region=%p[%x,%x,%x])\n",
224 shared_region
? shared_region
->fs_base
: 0,
225 shared_region
? shared_region
->system
: 0,
226 shared_region
? shared_region
->flags
: 0));
228 assert(shared_region
->ref_count
> 0);
231 old_region
= task
->system_shared_region
;
233 SHARED_REGION_TRACE_INFO
,
234 ("shared_region: %p set_region(task=%p)"
235 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
236 current_thread(), task
,
238 old_region
? old_region
->fs_base
: 0,
239 old_region
? old_region
->system
: 0,
240 old_region
? old_region
->flags
: 0,
242 shared_region
? shared_region
->fs_base
: 0,
243 shared_region
? shared_region
->system
: 0,
244 shared_region
? shared_region
->flags
: 0));
246 task
->system_shared_region
= shared_region
;
251 * shared_region_object_chain_detach:
253 * Mark the shared region as being detached or standalone. This means
254 * that we won't keep track of which file is mapped and how, for this shared
255 * region. And we don't have a "shadow" shared region.
256 * This is used when we clone a private shared region and we intend to remove
257 * some mappings from it. It won't need to maintain mappings info because it's
258 * now private. It can't have a "shadow" shared region because we don't want
259 * to see the shadow of the mappings we're about to remove.
262 shared_region_object_chain_detached(
263 shared_region_mapping_t target_region
)
265 shared_region_mapping_lock(target_region
);
266 target_region
->flags
|= SHARED_REGION_STANDALONE
;
267 shared_region_mapping_unlock(target_region
);
271 * shared_region_object_chain_attach:
273 * Link "target_region" to "object_chain_region". "object_chain_region"
274 * is treated as a shadow of "target_region" for the purpose of looking up
275 * mappings. Since the "target_region" preserves all the mappings of the
276 * older "object_chain_region", we won't duplicate all the mappings info and
277 * we'll just lookup the next region in the "object_chain" if we can't find
278 * what we're looking for in the "target_region". See lsf_hash_lookup().
281 shared_region_object_chain_attach(
282 shared_region_mapping_t target_region
,
283 shared_region_mapping_t object_chain_region
)
285 shared_region_object_chain_t object_ele
;
287 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
288 "target_region=%p, object_chain_region=%p\n",
289 target_region
, object_chain_region
));
290 assert(target_region
->ref_count
> 0);
291 assert(object_chain_region
->ref_count
> 0);
292 if(target_region
->object_chain
)
294 object_ele
= (shared_region_object_chain_t
)
295 kalloc(sizeof (struct shared_region_object_chain
));
296 shared_region_mapping_lock(object_chain_region
);
297 target_region
->object_chain
= object_ele
;
298 object_ele
->object_chain_region
= object_chain_region
;
299 object_ele
->next
= object_chain_region
->object_chain
;
300 object_ele
->depth
= object_chain_region
->depth
;
301 object_chain_region
->depth
++;
302 target_region
->alternate_next
= object_chain_region
->alternate_next
;
303 shared_region_mapping_unlock(object_chain_region
);
307 /* LP64todo - need 64-bit safe version */
309 shared_region_mapping_create(
310 ipc_port_t text_region
,
312 ipc_port_t data_region
,
314 vm_offset_t region_mappings
,
315 vm_offset_t client_base
,
316 shared_region_mapping_t
*shared_region
,
317 vm_offset_t alt_base
,
318 vm_offset_t alt_next
,
322 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
323 *shared_region
= (shared_region_mapping_t
)
324 kalloc(sizeof (struct shared_region_mapping
));
325 if(*shared_region
== NULL
) {
326 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
330 shared_region_mapping_lock_init((*shared_region
));
331 (*shared_region
)->text_region
= text_region
;
332 (*shared_region
)->text_size
= text_size
;
333 (*shared_region
)->fs_base
= fs_base
;
334 (*shared_region
)->system
= system
;
335 (*shared_region
)->data_region
= data_region
;
336 (*shared_region
)->data_size
= data_size
;
337 (*shared_region
)->region_mappings
= region_mappings
;
338 (*shared_region
)->client_base
= client_base
;
339 (*shared_region
)->ref_count
= 1;
340 (*shared_region
)->next
= NULL
;
341 (*shared_region
)->object_chain
= NULL
;
342 (*shared_region
)->self
= *shared_region
;
343 (*shared_region
)->flags
= 0;
344 (*shared_region
)->depth
= 0;
345 (*shared_region
)->default_env_list
= NULL
;
346 (*shared_region
)->alternate_base
= alt_base
;
347 (*shared_region
)->alternate_next
= alt_next
;
348 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
353 /* LP64todo - need 64-bit safe version */
355 shared_region_mapping_info(
356 shared_region_mapping_t shared_region
,
357 ipc_port_t
*text_region
,
358 vm_size_t
*text_size
,
359 ipc_port_t
*data_region
,
360 vm_size_t
*data_size
,
361 vm_offset_t
*region_mappings
,
362 vm_offset_t
*client_base
,
363 vm_offset_t
*alt_base
,
364 vm_offset_t
*alt_next
,
365 unsigned int *fs_base
,
366 unsigned int *system
,
368 shared_region_mapping_t
*next
)
370 shared_region_mapping_lock(shared_region
);
372 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
374 assert(shared_region
->ref_count
> 0);
375 *text_region
= shared_region
->text_region
;
376 *text_size
= shared_region
->text_size
;
377 *data_region
= shared_region
->data_region
;
378 *data_size
= shared_region
->data_size
;
379 *region_mappings
= shared_region
->region_mappings
;
380 *client_base
= shared_region
->client_base
;
381 *alt_base
= shared_region
->alternate_base
;
382 *alt_next
= shared_region
->alternate_next
;
383 *flags
= shared_region
->flags
;
384 *fs_base
= shared_region
->fs_base
;
385 *system
= shared_region
->system
;
386 *next
= shared_region
->next
;
388 shared_region_mapping_unlock(shared_region
);
394 shared_region_mapping_ref(
395 shared_region_mapping_t shared_region
)
397 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
398 "ref_count=%d + 1\n",
400 shared_region
? shared_region
->ref_count
: 0));
401 if(shared_region
== NULL
)
403 assert(shared_region
->ref_count
> 0);
404 hw_atomic_add(&shared_region
->ref_count
, 1);
409 shared_region_mapping_dealloc_lock(
410 shared_region_mapping_t shared_region
,
414 struct shared_region_task_mappings sm_info
;
415 shared_region_mapping_t next
= NULL
;
416 unsigned int ref_count
;
418 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
419 "(shared_region=%p,%d,%d) ref_count=%d\n",
420 shared_region
, need_sfh_lock
, need_drl_lock
,
421 shared_region
? shared_region
->ref_count
: 0));
422 while (shared_region
) {
423 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
425 shared_region
, shared_region
->ref_count
));
426 assert(shared_region
->ref_count
> 0);
428 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
429 shared_region_mapping_lock(shared_region
);
431 sm_info
.text_region
= shared_region
->text_region
;
432 sm_info
.text_size
= shared_region
->text_size
;
433 sm_info
.data_region
= shared_region
->data_region
;
434 sm_info
.data_size
= shared_region
->data_size
;
435 sm_info
.region_mappings
= shared_region
->region_mappings
;
436 sm_info
.client_base
= shared_region
->client_base
;
437 sm_info
.alternate_base
= shared_region
->alternate_base
;
438 sm_info
.alternate_next
= shared_region
->alternate_next
;
439 sm_info
.flags
= shared_region
->flags
;
440 sm_info
.self
= (vm_offset_t
)shared_region
;
442 if(shared_region
->region_mappings
) {
443 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
445 if(((vm_named_entry_t
)
446 (shared_region
->text_region
->ip_kobject
))
447 ->backing
.map
->pmap
) {
448 pmap_remove(((vm_named_entry_t
)
449 (shared_region
->text_region
->ip_kobject
))
452 sm_info
.client_base
+ sm_info
.text_size
);
454 ipc_port_release_send(shared_region
->text_region
);
455 if(shared_region
->data_region
)
456 ipc_port_release_send(shared_region
->data_region
);
457 if (shared_region
->object_chain
) {
458 next
= shared_region
->object_chain
->object_chain_region
;
459 kfree(shared_region
->object_chain
,
460 sizeof (struct shared_region_object_chain
));
464 shared_region_mapping_unlock(shared_region
);
466 ("shared_region_mapping_dealloc_lock(%p): "
469 bzero((void *)shared_region
,
470 sizeof (*shared_region
)); /* FBDP debug */
472 sizeof (struct shared_region_mapping
));
473 shared_region
= next
;
475 /* Stale indicates that a system region is no */
476 /* longer in the default environment list. */
477 if((ref_count
== 1) &&
478 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
479 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
481 ("shared_region_mapping_dealloc_lock"
482 "(%p): removing stale\n",
484 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
489 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
495 * Stub function; always indicates that the lock needs to be taken in the
496 * call to lsf_remove_regions_mappings_lock().
499 shared_region_mapping_dealloc(
500 shared_region_mapping_t shared_region
)
502 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
503 "(shared_region=%p)\n",
506 assert(shared_region
->ref_count
> 0);
508 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
513 shared_region_object_create(
515 ipc_port_t
*object_handle
)
517 vm_named_entry_t user_entry
;
518 ipc_port_t user_handle
;
523 user_entry
= (vm_named_entry_t
)
524 kalloc(sizeof (struct vm_named_entry
));
525 if(user_entry
== NULL
) {
528 named_entry_lock_init(user_entry
);
529 user_handle
= ipc_port_alloc_kernel();
532 ip_lock(user_handle
);
534 /* make a sonce right */
535 user_handle
->ip_sorights
++;
536 ip_reference(user_handle
);
538 user_handle
->ip_destination
= IP_NULL
;
539 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
540 user_handle
->ip_receiver
= ipc_space_kernel
;
542 /* make a send right */
543 user_handle
->ip_mscount
++;
544 user_handle
->ip_srights
++;
545 ip_reference(user_handle
);
547 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
548 /* nsrequest unlocks user_handle */
550 /* Create a named object based on a submap of specified size */
552 new_map
= vm_map_create(pmap_create(0, FALSE
), 0, size
, TRUE
);
553 user_entry
->backing
.map
= new_map
;
554 user_entry
->internal
= TRUE
;
555 user_entry
->is_sub_map
= TRUE
;
556 user_entry
->is_pager
= FALSE
;
557 user_entry
->offset
= 0;
558 user_entry
->protection
= VM_PROT_ALL
;
559 user_entry
->size
= size
;
560 user_entry
->ref_count
= 1;
562 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
564 *object_handle
= user_handle
;
568 /* called for the non-default, private branch shared region support */
569 /* system default fields for fs_base and system supported are not */
570 /* relevant as the system default flag is not set */
572 shared_file_create_system_region(
573 shared_region_mapping_t
*shared_region
,
577 ipc_port_t text_handle
;
578 ipc_port_t data_handle
;
581 vm_offset_t mapping_array
;
584 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
586 text_size
= 0x10000000;
587 data_size
= 0x10000000;
589 kret
= shared_file_init(&text_handle
,
590 text_size
, &data_handle
, data_size
, &mapping_array
);
592 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
593 "shared_file_init failed kret=0x%x\n",
597 kret
= shared_region_mapping_create(text_handle
, text_size
,
598 data_handle
, data_size
,
600 GLOBAL_SHARED_TEXT_SEGMENT
,
602 SHARED_ALTERNATE_LOAD_BASE
,
603 SHARED_ALTERNATE_LOAD_BASE
,
607 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
608 "shared_region_mapping_create failed "
613 (*shared_region
)->flags
= 0;
614 if(com_mapping_resource
) {
615 shared_region_mapping_ref(com_mapping_resource
);
616 (*shared_region
)->next
= com_mapping_resource
;
619 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
620 "-> shared_region=%p\n",
626 * load a new default for a specified environment into the default share
627 * regions list. If a previous default exists for the envrionment specification
628 * it is returned along with its reference. It is expected that the new
629 * sytem region structure passes a reference.
632 shared_region_mapping_t
633 update_default_shared_region(
634 shared_region_mapping_t new_system_region
)
636 shared_region_mapping_t old_system_region
;
637 unsigned int fs_base
;
640 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
642 assert(new_system_region
->ref_count
> 0);
643 fs_base
= new_system_region
->fs_base
;
644 system
= new_system_region
->system
;
645 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
646 default_regions_list_lock();
647 old_system_region
= default_environment_shared_regions
;
649 if((old_system_region
!= NULL
) &&
650 (old_system_region
->fs_base
== fs_base
) &&
651 (old_system_region
->system
== system
)) {
652 new_system_region
->default_env_list
=
653 old_system_region
->default_env_list
;
654 old_system_region
->default_env_list
= NULL
;
655 default_environment_shared_regions
= new_system_region
;
656 old_system_region
->flags
|= SHARED_REGION_STALE
;
657 default_regions_list_unlock();
658 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
660 new_system_region
, old_system_region
));
661 assert(old_system_region
->ref_count
> 0);
662 return old_system_region
;
664 if (old_system_region
) {
665 while(old_system_region
->default_env_list
!= NULL
) {
666 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
667 (old_system_region
->default_env_list
->system
== system
)) {
668 shared_region_mapping_t tmp_system_region
;
671 old_system_region
->default_env_list
;
672 new_system_region
->default_env_list
=
673 tmp_system_region
->default_env_list
;
674 tmp_system_region
->default_env_list
= NULL
;
675 old_system_region
->default_env_list
=
677 old_system_region
= tmp_system_region
;
678 old_system_region
->flags
|= SHARED_REGION_STALE
;
679 default_regions_list_unlock();
680 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
681 ": old=%p stale 2\n",
684 assert(old_system_region
->ref_count
> 0);
685 return old_system_region
;
687 old_system_region
= old_system_region
->default_env_list
;
690 /* If we get here, we are at the end of the system list and we */
691 /* did not find a pre-existing entry */
692 if(old_system_region
) {
693 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
694 "adding after old=%p\n",
695 new_system_region
, old_system_region
));
696 assert(old_system_region
->ref_count
> 0);
697 old_system_region
->default_env_list
= new_system_region
;
699 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
702 default_environment_shared_regions
= new_system_region
;
704 assert(new_system_region
->ref_count
> 0);
705 default_regions_list_unlock();
710 * lookup a system_shared_region for the environment specified. If one is
711 * found, it is returned along with a reference against the structure
714 shared_region_mapping_t
715 lookup_default_shared_region(
716 unsigned int fs_base
,
719 shared_region_mapping_t system_region
;
720 default_regions_list_lock();
721 system_region
= default_environment_shared_regions
;
723 SHARED_REGION_DEBUG(("lookup_default_shared_region"
724 "(base=0x%x, system=0x%x)\n",
726 while(system_region
!= NULL
) {
727 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
728 ": system_region=%p base=0x%x system=0x%x"
730 fs_base
, system
, system_region
,
731 system_region
->fs_base
,
732 system_region
->system
,
733 system_region
->ref_count
));
734 assert(system_region
->ref_count
> 0);
735 if((system_region
->fs_base
== fs_base
) &&
736 (system_region
->system
== system
)) {
739 system_region
= system_region
->default_env_list
;
742 shared_region_mapping_ref(system_region
);
743 default_regions_list_unlock();
744 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
746 return system_region
;
750 * remove a system_region default if it appears in the default regions list.
751 * Drop a reference on removal.
754 __private_extern__
void
755 remove_default_shared_region_lock(
756 shared_region_mapping_t system_region
,
760 shared_region_mapping_t old_system_region
;
762 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
763 "(system_region=%p, %d, %d)\n",
764 system_region
, need_sfh_lock
, need_drl_lock
));
766 default_regions_list_lock();
768 old_system_region
= default_environment_shared_regions
;
770 if(old_system_region
== NULL
) {
771 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
772 "-> default_env=NULL\n",
775 default_regions_list_unlock();
780 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
782 system_region
, old_system_region
));
783 assert(old_system_region
->ref_count
> 0);
784 if (old_system_region
== system_region
) {
785 default_environment_shared_regions
786 = old_system_region
->default_env_list
;
787 old_system_region
->default_env_list
= NULL
;
788 old_system_region
->flags
|= SHARED_REGION_STALE
;
789 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
790 "old=%p ref_count=%d STALE\n",
791 system_region
, old_system_region
,
792 old_system_region
->ref_count
));
793 shared_region_mapping_dealloc_lock(old_system_region
,
797 default_regions_list_unlock();
802 while(old_system_region
->default_env_list
!= NULL
) {
803 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
804 "old=%p->default_env=%p\n",
805 system_region
, old_system_region
,
806 old_system_region
->default_env_list
));
807 assert(old_system_region
->default_env_list
->ref_count
> 0);
808 if(old_system_region
->default_env_list
== system_region
) {
809 shared_region_mapping_t dead_region
;
810 dead_region
= old_system_region
->default_env_list
;
811 old_system_region
->default_env_list
=
812 dead_region
->default_env_list
;
813 dead_region
->default_env_list
= NULL
;
814 dead_region
->flags
|= SHARED_REGION_STALE
;
816 ("remove_default_shared_region_lock(%p): "
817 "dead=%p ref_count=%d stale\n",
818 system_region
, dead_region
,
819 dead_region
->ref_count
));
820 shared_region_mapping_dealloc_lock(dead_region
,
824 default_regions_list_unlock();
828 old_system_region
= old_system_region
->default_env_list
;
831 default_regions_list_unlock();
836 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
837 * the only caller. Remove this stub function and the corresponding symbol
841 remove_default_shared_region(
842 shared_region_mapping_t system_region
)
844 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
847 assert(system_region
->ref_count
> 0);
849 remove_default_shared_region_lock(system_region
, 1, 1);
853 remove_all_shared_regions(void)
855 shared_region_mapping_t system_region
;
856 shared_region_mapping_t next_system_region
;
858 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
859 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
860 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
861 default_regions_list_lock();
862 system_region
= default_environment_shared_regions
;
864 if(system_region
== NULL
) {
865 default_regions_list_unlock();
869 while(system_region
!= NULL
) {
870 next_system_region
= system_region
->default_env_list
;
871 system_region
->default_env_list
= NULL
;
872 system_region
->flags
|= SHARED_REGION_STALE
;
873 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
874 "%p ref_count=%d stale\n",
875 system_region
, system_region
->ref_count
));
876 assert(system_region
->ref_count
> 0);
877 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
878 system_region
= next_system_region
;
880 default_environment_shared_regions
= NULL
;
881 default_regions_list_unlock();
882 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
883 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
884 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
887 /* shared_com_boot_time_init initializes the common page shared data and */
888 /* text region. This region is semi independent of the split libs */
889 /* and so its policies have to be handled differently by the code that */
890 /* manipulates the mapping of shared region environments. However, */
891 /* the shared region delivery system supports both */
892 void shared_com_boot_time_init(void); /* forward */
894 shared_com_boot_time_init(void)
897 vm_named_entry_t named_entry
;
899 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
900 if(com_region_handle32
) {
901 panic("shared_com_boot_time_init: "
902 "com_region_handle32 already set\n");
904 if(com_region_handle64
) {
905 panic("shared_com_boot_time_init: "
906 "com_region_handle64 already set\n");
909 /* create com page regions, 1 each for 32 and 64-bit code */
910 if((kret
= shared_region_object_create(
912 &com_region_handle32
))) {
913 panic("shared_com_boot_time_init: "
914 "unable to create 32-bit comm page\n");
917 if((kret
= shared_region_object_create(
919 &com_region_handle64
))) {
920 panic("shared_com_boot_time_init: "
921 "unable to create 64-bit comm page\n");
925 /* now set export the underlying region/map */
926 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
927 com_region_map32
= named_entry
->backing
.map
;
928 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
929 com_region_map64
= named_entry
->backing
.map
;
931 /* wrap the com region in its own shared file mapping structure */
932 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
933 kret
= shared_region_mapping_create(com_region_handle32
,
936 _COMM_PAGE_BASE_ADDRESS
,
937 &com_mapping_resource
,
939 ENV_DEFAULT_ROOT
, cpu_type());
941 panic("shared_region_mapping_create failed for commpage");
946 shared_file_boot_time_init(
947 unsigned int fs_base
,
950 mach_port_t text_region_handle
;
951 mach_port_t data_region_handle
;
952 long text_region_size
;
953 long data_region_size
;
954 shared_region_mapping_t new_system_region
;
955 shared_region_mapping_t old_default_env
;
957 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
958 "(base=0x%x,system=0x%x)\n",
960 text_region_size
= 0x10000000;
961 data_region_size
= 0x10000000;
962 shared_file_init(&text_region_handle
,
966 &shared_file_mapping_array
);
968 shared_region_mapping_create(text_region_handle
,
972 shared_file_mapping_array
,
973 GLOBAL_SHARED_TEXT_SEGMENT
,
975 SHARED_ALTERNATE_LOAD_BASE
,
976 SHARED_ALTERNATE_LOAD_BASE
,
979 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
981 /* grab an extra reference for the caller */
982 /* remember to grab before call to update */
983 shared_region_mapping_ref(new_system_region
);
984 old_default_env
= update_default_shared_region(new_system_region
);
985 /* hold an extra reference because these are the system */
986 /* shared regions. */
988 shared_region_mapping_dealloc(old_default_env
);
989 if(com_mapping_resource
== NULL
) {
990 shared_com_boot_time_init();
992 shared_region_mapping_ref(com_mapping_resource
);
993 new_system_region
->next
= com_mapping_resource
;
994 vm_set_shared_region(current_task(), new_system_region
);
995 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
1000 /* called at boot time, allocates two regions, each 256 megs in size */
1001 /* these regions are later mapped into task spaces, allowing them to */
1002 /* share the contents of the regions. shared_file_init is part of */
1003 /* a shared_memory_server which not only allocates the backing maps */
1004 /* but also coordinates requests for space. */
1007 static kern_return_t
1009 ipc_port_t
*text_region_handle
,
1010 vm_size_t text_region_size
,
1011 ipc_port_t
*data_region_handle
,
1012 vm_size_t data_region_size
,
1013 vm_offset_t
*file_mapping_array
)
1015 shared_file_info_t
*sf_head
;
1016 vm_size_t data_table_size
;
1020 vm_object_t buf_object
;
1021 vm_map_entry_t entry
;
1026 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1027 /* create text and data maps/regions */
1028 kret
= shared_region_object_create(
1030 text_region_handle
);
1034 kret
= shared_region_object_create(
1036 data_region_handle
);
1038 ipc_port_release_send(*text_region_handle
);
1042 data_table_size
= data_region_size
>> 9;
1043 hash_size
= data_region_size
>> 14;
1045 if(shared_file_mapping_array
== 0) {
1046 vm_map_address_t map_addr
;
1047 buf_object
= vm_object_allocate(data_table_size
);
1049 if(vm_map_find_space(kernel_map
, &map_addr
,
1050 data_table_size
, 0, 0, &entry
)
1052 panic("shared_file_init: no space");
1054 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1055 *file_mapping_array
= shared_file_mapping_array
;
1056 vm_map_unlock(kernel_map
);
1057 entry
->object
.vm_object
= buf_object
;
1060 for (b
= *file_mapping_array
, alloced
= 0;
1061 alloced
< (hash_size
+
1062 round_page(sizeof(struct sf_mapping
)));
1063 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1064 vm_object_lock(buf_object
);
1065 p
= vm_page_alloc(buf_object
, alloced
);
1066 if (p
== VM_PAGE_NULL
) {
1067 panic("shared_file_init: no space");
1070 vm_object_unlock(buf_object
);
1071 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1072 VM_PROT_READ
| VM_PROT_WRITE
,
1073 ((unsigned int)(p
->object
->wimg_bits
))
1079 /* initialize loaded file array */
1080 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1081 sf_head
->hash
= (queue_head_t
*)
1082 (((int)*file_mapping_array
) +
1083 sizeof(struct shared_file_info
));
1084 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1085 mutex_init(&(sf_head
->lock
), 0);
1086 sf_head
->hash_init
= FALSE
;
1089 mach_make_memory_entry(kernel_map
, &data_table_size
,
1090 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1093 if (vm_map_wire(kernel_map
,
1094 vm_map_trunc_page(*file_mapping_array
),
1095 vm_map_round_page(*file_mapping_array
+
1097 round_page(sizeof(struct sf_mapping
))),
1098 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1099 panic("shared_file_init: No memory for data table");
1102 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1104 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1105 0, "load_file_server");
1107 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1108 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1109 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1110 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1112 /* initialize the global default environment lock */
1113 mutex_init(&default_regions_list_lock_data
, 0);
1116 *file_mapping_array
= shared_file_mapping_array
;
1119 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1120 return KERN_SUCCESS
;
1123 static kern_return_t
1124 shared_file_header_init(
1125 shared_file_info_t
*shared_file_header
)
1127 vm_size_t hash_table_size
;
1128 vm_size_t hash_table_offset
;
1130 /* wire hash entry pool only as needed, since we are the only */
1131 /* users, we take a few liberties with the population of our */
1133 static int allocable_hash_pages
;
1134 static vm_offset_t hash_cram_address
;
1137 hash_table_size
= shared_file_header
->hash_size
1138 * sizeof (struct queue_entry
);
1139 hash_table_offset
= hash_table_size
+
1140 round_page(sizeof (struct sf_mapping
));
1141 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1142 queue_init(&shared_file_header
->hash
[i
]);
1144 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1146 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1147 + hash_table_offset
;
1148 shared_file_available_hash_ele
= 0;
1150 shared_file_header
->hash_init
= TRUE
;
1152 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1153 int cram_pages
, cram_size
;
1155 cram_pages
= allocable_hash_pages
> 3 ?
1156 3 : allocable_hash_pages
;
1157 cram_size
= cram_pages
* PAGE_SIZE
;
1158 if (vm_map_wire(kernel_map
, hash_cram_address
,
1159 hash_cram_address
+ cram_size
,
1160 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1161 SHARED_REGION_TRACE(
1162 SHARED_REGION_TRACE_ERROR
,
1163 ("shared_region: shared_file_header_init: "
1164 "No memory for data table\n"));
1165 return KERN_NO_SPACE
;
1167 allocable_hash_pages
-= cram_pages
;
1168 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1169 shared_file_available_hash_ele
1170 += cram_size
/sizeof(struct load_file_ele
);
1171 hash_cram_address
+= cram_size
;
1174 return KERN_SUCCESS
;
1178 extern void shared_region_dump_file_entry(
1180 load_struct_t
*entry
); /* forward */
1182 void shared_region_dump_file_entry(
1184 load_struct_t
*entry
)
1187 loaded_mapping_t
*mapping
;
1189 if (trace_level
> shared_region_trace_level
) {
1192 printf("shared region: %p: "
1193 "file_entry %p base_address=0x%x file_offset=0x%x "
1195 current_thread(), entry
,
1196 entry
->base_address
, entry
->file_offset
, entry
->mapping_cnt
);
1197 mapping
= entry
->mappings
;
1198 for (i
= 0; i
< entry
->mapping_cnt
; i
++) {
1199 printf("shared region: %p:\t#%d: "
1200 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1203 mapping
->mapping_offset
,
1205 mapping
->file_offset
,
1206 mapping
->protection
);
1207 mapping
= mapping
->next
;
1211 extern void shared_region_dump_mappings(
1213 struct shared_file_mapping_np
*mappings
,
1215 mach_vm_offset_t base_offset
); /* forward */
1217 void shared_region_dump_mappings(
1219 struct shared_file_mapping_np
*mappings
,
1221 mach_vm_offset_t base_offset
)
1225 if (trace_level
> shared_region_trace_level
) {
1229 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1230 current_thread(), map_cnt
, (uint64_t) base_offset
);
1231 for (i
= 0; i
< map_cnt
; i
++) {
1232 printf("shared region: %p:\t#%d: "
1233 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1237 (uint64_t) mappings
[i
].sfm_address
,
1238 (uint64_t) mappings
[i
].sfm_size
,
1239 (uint64_t) mappings
[i
].sfm_file_offset
,
1240 mappings
[i
].sfm_max_prot
,
1241 mappings
[i
].sfm_init_prot
);
1245 extern void shared_region_dump_conflict_info(
1248 vm_map_offset_t offset
,
1249 vm_map_size_t size
); /* forward */
1252 shared_region_dump_conflict_info(
1255 vm_map_offset_t offset
,
1258 vm_map_entry_t entry
;
1260 memory_object_t mem_object
;
1264 if (trace_level
> shared_region_trace_level
) {
1268 object
= VM_OBJECT_NULL
;
1270 vm_map_lock_read(map
);
1271 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
1272 entry
= entry
->vme_next
;
1275 if (entry
!= vm_map_to_entry(map
)) {
1276 if (entry
->is_sub_map
) {
1277 printf("shared region: %p: conflict with submap "
1278 "at 0x%llx size 0x%llx !?\n",
1285 object
= entry
->object
.vm_object
;
1286 if (object
== VM_OBJECT_NULL
) {
1287 printf("shared region: %p: conflict with NULL object "
1288 "at 0x%llx size 0x%llx !?\n",
1292 object
= VM_OBJECT_NULL
;
1296 vm_object_lock(object
);
1297 while (object
->shadow
!= VM_OBJECT_NULL
) {
1300 shadow
= object
->shadow
;
1301 vm_object_lock(shadow
);
1302 vm_object_unlock(object
);
1306 if (object
->internal
) {
1307 printf("shared region: %p: conflict with anonymous "
1308 "at 0x%llx size 0x%llx\n",
1314 if (! object
->pager_ready
) {
1315 printf("shared region: %p: conflict with uninitialized "
1316 "at 0x%llx size 0x%llx\n",
1323 mem_object
= object
->pager
;
1326 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1328 kr
= vnode_pager_get_object_filename(mem_object
,
1330 if (kr
!= KERN_SUCCESS
) {
1333 printf("shared region: %p: conflict with '%s' "
1334 "at 0x%llx size 0x%llx\n",
1336 filename
? filename
: "<unknown>",
1341 if (object
!= VM_OBJECT_NULL
) {
1342 vm_object_unlock(object
);
1344 vm_map_unlock_read(map
);
1350 * Attempt to map a split library into the shared region. Check if the mappings
1351 * are already in place.
1356 struct shared_file_mapping_np
*mappings
,
1357 memory_object_control_t file_control
,
1358 memory_object_size_t file_size
,
1359 shared_region_task_mappings_t sm_info
,
1360 mach_vm_offset_t base_offset
,
1361 mach_vm_offset_t
*slide_p
)
1363 vm_object_t file_object
;
1364 shared_file_info_t
*shared_file_header
;
1365 load_struct_t
*file_entry
;
1366 loaded_mapping_t
*file_mapping
;
1369 mach_vm_offset_t slide
;
1371 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1373 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1375 mutex_lock(&shared_file_header
->lock
);
1377 /* If this is the first call to this routine, take the opportunity */
1378 /* to initialize the hash table which will be used to look-up */
1379 /* mappings based on the file object */
1381 if(shared_file_header
->hash_init
== FALSE
) {
1382 ret
= shared_file_header_init(shared_file_header
);
1383 if (ret
!= KERN_SUCCESS
) {
1384 SHARED_REGION_TRACE(
1385 SHARED_REGION_TRACE_ERROR
,
1386 ("shared_region: %p: map_shared_file: "
1387 "shared_file_header_init() failed kr=0x%x\n",
1388 current_thread(), ret
));
1389 mutex_unlock(&shared_file_header
->lock
);
1390 return KERN_NO_SPACE
;
1395 /* Find the entry in the map associated with the current mapping */
1396 /* of the file object */
1397 file_object
= memory_object_control_to_vm_object(file_control
);
1399 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1400 (void *) file_object
,
1401 mappings
[0].sfm_file_offset
,
1402 shared_file_header
->hash_size
,
1403 TRUE
, TRUE
, sm_info
);
1405 /* File is loaded, check the load manifest for exact match */
1406 /* we simplify by requiring that the elements be the same */
1407 /* size and in the same order rather than checking for */
1408 /* semantic equivalence. */
1411 file_mapping
= file_entry
->mappings
;
1412 while(file_mapping
!= NULL
) {
1414 SHARED_REGION_TRACE(
1415 SHARED_REGION_TRACE_CONFLICT
,
1416 ("shared_region: %p: map_shared_file: "
1417 "already mapped with "
1418 "more than %d mappings\n",
1419 current_thread(), map_cnt
));
1420 shared_region_dump_file_entry(
1421 SHARED_REGION_TRACE_INFO
,
1423 shared_region_dump_mappings(
1424 SHARED_REGION_TRACE_INFO
,
1425 mappings
, map_cnt
, base_offset
);
1427 mutex_unlock(&shared_file_header
->lock
);
1428 return KERN_INVALID_ARGUMENT
;
1430 if(((mappings
[i
].sfm_address
)
1431 & SHARED_DATA_REGION_MASK
) !=
1432 file_mapping
->mapping_offset
||
1433 mappings
[i
].sfm_size
!= file_mapping
->size
||
1434 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1435 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1436 SHARED_REGION_TRACE(
1437 SHARED_REGION_TRACE_CONFLICT
,
1438 ("shared_region: %p: "
1439 "mapping #%d differs\n",
1440 current_thread(), i
));
1441 shared_region_dump_file_entry(
1442 SHARED_REGION_TRACE_INFO
,
1444 shared_region_dump_mappings(
1445 SHARED_REGION_TRACE_INFO
,
1446 mappings
, map_cnt
, base_offset
);
1450 file_mapping
= file_mapping
->next
;
1454 SHARED_REGION_TRACE(
1455 SHARED_REGION_TRACE_CONFLICT
,
1456 ("shared_region: %p: map_shared_file: "
1457 "already mapped with "
1458 "%d mappings instead of %d\n",
1459 current_thread(), i
, map_cnt
));
1460 shared_region_dump_file_entry(
1461 SHARED_REGION_TRACE_INFO
,
1463 shared_region_dump_mappings(
1464 SHARED_REGION_TRACE_INFO
,
1465 mappings
, map_cnt
, base_offset
);
1467 mutex_unlock(&shared_file_header
->lock
);
1468 return KERN_INVALID_ARGUMENT
;
1471 slide
= file_entry
->base_address
- base_offset
;
1472 if (slide_p
!= NULL
) {
1474 * File already mapped but at different address,
1475 * and the caller is OK with the sliding.
1481 * The caller doesn't want any sliding. The file needs
1482 * to be mapped at the requested address or not mapped.
1486 * The file is already mapped but at a different
1489 * XXX should we attempt to load at
1490 * requested address too ?
1493 SHARED_REGION_TRACE(
1494 SHARED_REGION_TRACE_CONFLICT
,
1495 ("shared_region: %p: "
1496 "map_shared_file: already mapped, "
1497 "would need to slide 0x%llx\n",
1502 * The file is already mapped at the correct
1509 mutex_unlock(&shared_file_header
->lock
);
1512 /* File is not loaded, lets attempt to load it */
1513 ret
= lsf_map(mappings
, map_cnt
,
1514 (void *)file_control
,
1519 if(ret
== KERN_NO_SPACE
) {
1520 shared_region_mapping_t regions
;
1521 shared_region_mapping_t system_region
;
1522 regions
= (shared_region_mapping_t
)sm_info
->self
;
1523 regions
->flags
|= SHARED_REGION_FULL
;
1524 system_region
= lookup_default_shared_region(
1525 regions
->fs_base
, regions
->system
);
1526 if (system_region
== regions
) {
1527 shared_region_mapping_t new_system_shared_region
;
1528 shared_file_boot_time_init(
1529 regions
->fs_base
, regions
->system
);
1530 /* current task must stay with its current */
1531 /* regions, drop count on system_shared_region */
1532 /* and put back our original set */
1533 vm_get_shared_region(current_task(),
1534 &new_system_shared_region
);
1535 shared_region_mapping_dealloc_lock(
1536 new_system_shared_region
, 0, 1);
1537 vm_set_shared_region(current_task(), regions
);
1538 } else if (system_region
!= NULL
) {
1539 shared_region_mapping_dealloc_lock(
1540 system_region
, 0, 1);
1543 mutex_unlock(&shared_file_header
->lock
);
1549 * shared_region_cleanup:
1551 * Deallocates all the mappings in the shared region, except those explicitly
1552 * specified in the "ranges" set of address ranges.
1555 shared_region_cleanup(
1556 unsigned int range_count
,
1557 struct shared_region_range_np
*ranges
,
1558 shared_region_task_mappings_t sm_info
)
1561 ipc_port_t region_handle
;
1562 vm_named_entry_t region_named_entry
;
1563 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1564 unsigned int i_range
;
1565 vm_map_offset_t range_start
, range_end
;
1566 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1567 vm_map_size_t delete_size
;
1569 struct shared_region_range_np tmp_range
;
1570 unsigned int sort_index
, sorted_index
;
1571 vm_map_offset_t sort_min_address
;
1572 unsigned int sort_min_index
;
1575 * Since we want to deallocate the holes between the "ranges",
1576 * sort the array by increasing addresses.
1578 for (sorted_index
= 0;
1579 sorted_index
< range_count
;
1582 /* first remaining entry is our new starting point */
1583 sort_min_index
= sorted_index
;
1584 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1586 /* find the lowest mapping_offset in the remaining entries */
1587 for (sort_index
= sorted_index
+ 1;
1588 sort_index
< range_count
;
1590 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1591 /* lowest address so far... */
1592 sort_min_index
= sort_index
;
1594 ranges
[sort_min_index
].srr_address
;
1598 if (sort_min_index
!= sorted_index
) {
1600 tmp_range
= ranges
[sort_min_index
];
1601 ranges
[sort_min_index
] = ranges
[sorted_index
];
1602 ranges
[sorted_index
] = tmp_range
;
1606 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1607 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1608 text_submap
= region_named_entry
->backing
.map
;
1610 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1611 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1612 data_submap
= region_named_entry
->backing
.map
;
1614 submap
= text_submap
;
1615 next_submap
= submap
;
1616 submap_base
= sm_info
->client_base
;
1618 submap_end
= submap_base
+ sm_info
->text_size
;
1620 i_range
< range_count
;
1623 /* get the next range of addresses to keep */
1624 range_start
= ranges
[i_range
].srr_address
;
1625 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1626 /* align them to page boundaries */
1627 range_start
= vm_map_trunc_page(range_start
);
1628 range_end
= vm_map_round_page(range_end
);
1630 /* make sure we don't go beyond the submap's boundaries */
1631 if (range_start
< submap_base
) {
1632 range_start
= submap_base
;
1633 } else if (range_start
>= submap_end
) {
1634 range_start
= submap_end
;
1636 if (range_end
< submap_base
) {
1637 range_end
= submap_base
;
1638 } else if (range_end
>= submap_end
) {
1639 range_end
= submap_end
;
1642 if (range_start
> submap_base
+ submap_offset
) {
1644 * Deallocate everything between the last offset in the
1645 * submap and the start of this range.
1647 delete_size
= range_start
-
1648 (submap_base
+ submap_offset
);
1649 (void) vm_deallocate(submap
,
1656 /* skip to the end of the range */
1657 submap_offset
+= delete_size
+ (range_end
- range_start
);
1659 if (submap_base
+ submap_offset
>= submap_end
) {
1660 /* get to next submap */
1662 if (submap
== data_submap
) {
1663 /* no other submap after data: done ! */
1667 /* get original range again */
1668 range_start
= ranges
[i_range
].srr_address
;
1669 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1670 range_start
= vm_map_trunc_page(range_start
);
1671 range_end
= vm_map_round_page(range_end
);
1673 if (range_end
> submap_end
) {
1675 * This last range overlaps with the next
1676 * submap. We need to process it again
1677 * after switching submaps. Otherwise, we'll
1678 * just continue with the next range.
1683 if (submap
== text_submap
) {
1685 * Switch to the data submap.
1687 submap
= data_submap
;
1689 submap_base
= sm_info
->client_base
+
1691 submap_end
= submap_base
+ sm_info
->data_size
;
1696 if (submap_base
+ submap_offset
< submap_end
) {
1697 /* delete remainder of this submap, from "offset" to the end */
1698 (void) vm_deallocate(submap
,
1700 submap_end
- submap_base
- submap_offset
);
1701 /* if nothing to keep in data submap, delete it all */
1702 if (submap
== text_submap
) {
1703 submap
= data_submap
;
1705 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1706 submap_end
= submap_base
+ sm_info
->data_size
;
1707 (void) vm_deallocate(data_submap
,
1709 submap_end
- submap_base
);
1717 /* A hash lookup function for the list of loaded files in */
1718 /* shared_memory_server space. */
1720 static load_struct_t
*
1722 queue_head_t
*hash_table
,
1724 vm_offset_t recognizableOffset
,
1727 boolean_t alternate
,
1728 shared_region_task_mappings_t sm_info
)
1730 register queue_t bucket
;
1731 load_struct_t
*entry
;
1732 shared_region_mapping_t target_region
;
1735 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1736 "reg=%d alt=%d sm_info=%p\n",
1737 hash_table
, file_object
, recognizableOffset
, size
,
1738 regular
, alternate
, sm_info
));
1740 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1741 for (entry
= (load_struct_t
*)queue_first(bucket
);
1742 !queue_end(bucket
, &entry
->links
);
1743 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1745 if ((entry
->file_object
== (int)file_object
) &&
1746 (entry
->file_offset
== recognizableOffset
)) {
1747 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1748 depth
= target_region
->depth
;
1749 while(target_region
) {
1750 if((!(sm_info
->self
)) ||
1751 ((target_region
== entry
->regions_instance
) &&
1752 (target_region
->depth
>= entry
->depth
))) {
1754 entry
->base_address
>= sm_info
->alternate_base
) {
1755 LSF_DEBUG(("lsf_hash_lookup: "
1756 "alt=%d found entry %p "
1760 entry
->base_address
,
1761 sm_info
->alternate_base
));
1765 entry
->base_address
< sm_info
->alternate_base
) {
1766 LSF_DEBUG(("lsf_hash_lookup: "
1767 "reg=%d found entry %p "
1771 entry
->base_address
,
1772 sm_info
->alternate_base
));
1776 if(target_region
->object_chain
) {
1777 target_region
= (shared_region_mapping_t
)
1778 target_region
->object_chain
->object_chain_region
;
1779 depth
= target_region
->object_chain
->depth
;
1781 target_region
= NULL
;
1787 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1788 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1789 hash_table
, file_object
, recognizableOffset
, size
,
1790 regular
, alternate
, sm_info
));
1791 return (load_struct_t
*)0;
1794 __private_extern__ load_struct_t
*
1795 lsf_remove_regions_mappings_lock(
1796 shared_region_mapping_t region
,
1797 shared_region_task_mappings_t sm_info
,
1801 register queue_t bucket
;
1802 shared_file_info_t
*shared_file_header
;
1803 load_struct_t
*entry
;
1804 load_struct_t
*next_entry
;
1806 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1808 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1810 region
, sm_info
, shared_file_header
));
1812 mutex_lock(&shared_file_header
->lock
);
1813 if(shared_file_header
->hash_init
== FALSE
) {
1815 mutex_unlock(&shared_file_header
->lock
);
1816 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1817 "(region=%p,sm_info=%p): not inited\n",
1821 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1822 bucket
= &shared_file_header
->hash
[i
];
1823 for (entry
= (load_struct_t
*)queue_first(bucket
);
1824 !queue_end(bucket
, &entry
->links
);) {
1825 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1826 if(region
== entry
->regions_instance
) {
1827 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1828 "entry %p region %p: "
1831 lsf_unload((void *)entry
->file_object
,
1832 entry
->base_address
, sm_info
);
1834 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1835 "entry %p region %p target region %p: "
1837 entry
, entry
->regions_instance
, region
));
1844 mutex_unlock(&shared_file_header
->lock
);
1845 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1847 return NULL
; /* XXX */
1851 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1852 * only caller. Remove this stub function and the corresponding symbol
1853 * export for Merlot.
1856 lsf_remove_regions_mappings(
1857 shared_region_mapping_t region
,
1858 shared_region_task_mappings_t sm_info
)
1860 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1863 /* Removes a map_list, (list of loaded extents) for a file from */
1864 /* the loaded file hash table. */
1866 static load_struct_t
*
1868 load_struct_t
*target_entry
, /* optional: NULL if not relevant */
1870 vm_offset_t base_offset
,
1871 shared_region_task_mappings_t sm_info
)
1873 register queue_t bucket
;
1874 shared_file_info_t
*shared_file_header
;
1875 load_struct_t
*entry
;
1877 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
1878 target_entry
, file_object
, base_offset
, sm_info
));
1880 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1882 bucket
= &shared_file_header
->hash
1883 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1885 for (entry
= (load_struct_t
*)queue_first(bucket
);
1886 !queue_end(bucket
, &entry
->links
);
1887 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1888 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1889 sm_info
->self
== entry
->regions_instance
)) {
1890 if ((target_entry
== NULL
||
1891 entry
== target_entry
) &&
1892 (entry
->file_object
== (int) file_object
) &&
1893 (entry
->base_address
== base_offset
)) {
1894 queue_remove(bucket
, entry
,
1895 load_struct_ptr_t
, links
);
1896 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1902 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1903 return (load_struct_t
*)0;
1906 /* Inserts a new map_list, (list of loaded file extents), into the */
1907 /* server loaded file hash table. */
1911 load_struct_t
*entry
,
1912 shared_region_task_mappings_t sm_info
)
1914 shared_file_info_t
*shared_file_header
;
1916 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1917 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1919 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1920 queue_enter(&shared_file_header
->hash
1921 [load_file_hash(entry
->file_object
,
1922 shared_file_header
->hash_size
)],
1923 entry
, load_struct_ptr_t
, links
);
1931 * Look in the shared region, starting from the end, for a place to fit all the
1932 * mappings while respecting their relative offsets.
1934 static kern_return_t
1936 unsigned int map_cnt
,
1937 struct shared_file_mapping_np
*mappings_in
,
1938 shared_region_task_mappings_t sm_info
,
1939 mach_vm_offset_t
*base_offset_p
)
1941 mach_vm_offset_t max_mapping_offset
;
1943 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
1944 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
1945 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
1946 mach_vm_offset_t base_offset
;
1947 mach_vm_size_t mapping_size
;
1948 mach_vm_offset_t wiggle_room
, wiggle
;
1949 vm_map_t text_map
, data_map
, map
;
1950 vm_named_entry_t region_entry
;
1951 ipc_port_t region_handle
;
1954 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
1955 unsigned int sort_index
, sorted_index
;
1956 vm_map_offset_t sort_min_address
;
1957 unsigned int sort_min_index
;
1960 * Sort the mappings array, so that we can try and fit them in
1961 * in the right order as we progress along the VM maps.
1963 * We can't modify the original array (the original order is
1964 * important when doing lookups of the mappings), so copy it first.
1967 kr
= kmem_alloc(kernel_map
,
1968 (vm_offset_t
*) &mappings
,
1969 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
1970 if (kr
!= KERN_SUCCESS
) {
1971 return KERN_NO_SPACE
;
1974 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
1976 max_mapping_offset
= 0;
1977 for (sorted_index
= 0;
1978 sorted_index
< map_cnt
;
1981 /* first remaining entry is our new starting point */
1982 sort_min_index
= sorted_index
;
1983 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
1984 SHARED_TEXT_REGION_MASK
) +
1985 mappings
[sort_min_index
].sfm_size
);
1986 sort_min_address
= mapping_end_offset
;
1987 /* compute the highest mapping_offset as well... */
1988 if (mapping_end_offset
> max_mapping_offset
) {
1989 max_mapping_offset
= mapping_end_offset
;
1991 /* find the lowest mapping_offset in the remaining entries */
1992 for (sort_index
= sorted_index
+ 1;
1993 sort_index
< map_cnt
;
1996 mapping_end_offset
=
1997 ((mappings
[sort_index
].sfm_address
&
1998 SHARED_TEXT_REGION_MASK
) +
1999 mappings
[sort_index
].sfm_size
);
2001 if (mapping_end_offset
< sort_min_address
) {
2002 /* lowest mapping_offset so far... */
2003 sort_min_index
= sort_index
;
2004 sort_min_address
= mapping_end_offset
;
2007 if (sort_min_index
!= sorted_index
) {
2009 tmp_mapping
= mappings
[sort_min_index
];
2010 mappings
[sort_min_index
] = mappings
[sorted_index
];
2011 mappings
[sorted_index
] = tmp_mapping
;
2016 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
2018 /* start from the end of the shared area */
2019 base_offset
= sm_info
->text_size
;
2021 /* can all the mappings fit ? */
2022 if (max_mapping_offset
> base_offset
) {
2023 kmem_free(kernel_map
,
2024 (vm_offset_t
) mappings
,
2025 map_cnt
* sizeof (mappings
[0]));
2026 return KERN_FAILURE
;
2030 * Align the last mapping to the end of the submaps
2031 * and start from there.
2033 base_offset
-= max_mapping_offset
;
2035 region_handle
= (ipc_port_t
) sm_info
->text_region
;
2036 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2037 text_map
= region_entry
->backing
.map
;
2039 region_handle
= (ipc_port_t
) sm_info
->data_region
;
2040 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2041 data_map
= region_entry
->backing
.map
;
2043 vm_map_lock_read(text_map
);
2044 vm_map_lock_read(data_map
);
2048 * At first, we can wiggle all the way from our starting point
2049 * (base_offset) towards the start of the map (0), if needed.
2051 wiggle_room
= base_offset
;
2053 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
2054 if (mappings
[i
].sfm_size
== 0) {
2055 /* nothing to map here... */
2058 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2059 /* copy-on-write mappings are in the data submap */
2062 /* other mappings are in the text submap */
2065 /* get the offset within the appropriate submap */
2066 mapping_offset
= (mappings
[i
].sfm_address
&
2067 SHARED_TEXT_REGION_MASK
);
2068 mapping_size
= mappings
[i
].sfm_size
;
2069 mapping_end_offset
= mapping_offset
+ mapping_size
;
2070 mapping_offset
= vm_map_trunc_page(mapping_offset
);
2071 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
2072 mapping_size
= mapping_end_offset
- mapping_offset
;
2075 if (vm_map_lookup_entry(map
,
2076 base_offset
+ mapping_offset
,
2079 * The start address for that mapping
2080 * is already mapped: no fit.
2081 * Locate the hole immediately before this map
2084 prev_hole_end
= map_entry
->vme_start
;
2085 prev_entry
= map_entry
->vme_prev
;
2086 if (prev_entry
== vm_map_to_entry(map
)) {
2087 /* no previous entry */
2088 prev_hole_start
= map
->min_offset
;
2090 /* previous entry ends here */
2091 prev_hole_start
= prev_entry
->vme_end
;
2095 * The start address for that mapping is not
2097 * Locate the start and end of the hole
2100 /* map_entry is the previous entry */
2101 if (map_entry
== vm_map_to_entry(map
)) {
2102 /* no previous entry */
2103 prev_hole_start
= map
->min_offset
;
2105 /* previous entry ends there */
2106 prev_hole_start
= map_entry
->vme_end
;
2108 next_entry
= map_entry
->vme_next
;
2109 if (next_entry
== vm_map_to_entry(map
)) {
2111 prev_hole_end
= map
->max_offset
;
2113 prev_hole_end
= next_entry
->vme_start
;
2117 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
2118 /* hole is to our left: try and wiggle to fit */
2119 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
2120 if (wiggle
> base_offset
) {
2121 /* we're getting out of the map */
2125 base_offset
-= wiggle
;
2126 if (wiggle
> wiggle_room
) {
2127 /* can't wiggle that much: start over */
2130 /* account for the wiggling done */
2131 wiggle_room
-= wiggle
;
2135 base_offset
+ mapping_offset
+ mapping_size
) {
2137 * The hole extends further to the right
2138 * than what we need. Ignore the extra space.
2140 prev_hole_end
= (base_offset
+ mapping_offset
+
2145 base_offset
+ mapping_offset
+ mapping_size
) {
2147 * The hole is not big enough to establish
2148 * the mapping right there: wiggle towards
2149 * the beginning of the hole so that the end
2150 * of our mapping fits in the hole...
2152 wiggle
= base_offset
+ mapping_offset
2153 + mapping_size
- prev_hole_end
;
2154 if (wiggle
> base_offset
) {
2155 /* we're getting out of the map */
2159 base_offset
-= wiggle
;
2160 if (wiggle
> wiggle_room
) {
2161 /* can't wiggle that much: start over */
2164 /* account for the wiggling done */
2165 wiggle_room
-= wiggle
;
2167 /* keep searching from this new base */
2171 if (prev_hole_start
> base_offset
+ mapping_offset
) {
2172 /* no hole found: keep looking */
2176 /* compute wiggling room at this hole */
2177 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
2178 if (wiggle
< wiggle_room
) {
2179 /* less wiggle room than before... */
2180 wiggle_room
= wiggle
;
2183 /* found a hole that fits: skip to next mapping */
2185 } /* while we look for a hole */
2186 } /* for each mapping */
2188 *base_offset_p
= base_offset
;
2192 vm_map_unlock_read(text_map
);
2193 vm_map_unlock_read(data_map
);
2195 kmem_free(kernel_map
,
2196 (vm_offset_t
) mappings
,
2197 map_cnt
* sizeof (mappings
[0]));
2205 * Attempt to establish the mappings for a split library into the shared region.
2207 static kern_return_t
2209 struct shared_file_mapping_np
*mappings
,
2212 memory_object_offset_t file_size
,
2213 shared_region_task_mappings_t sm_info
,
2214 mach_vm_offset_t base_offset
,
2215 mach_vm_offset_t
*slide_p
)
2217 load_struct_t
*entry
;
2218 loaded_mapping_t
*file_mapping
;
2219 loaded_mapping_t
**tptr
;
2220 ipc_port_t region_handle
;
2221 vm_named_entry_t region_entry
;
2222 mach_port_t map_port
;
2223 vm_object_t file_object
;
2226 mach_vm_offset_t original_base_offset
;
2227 mach_vm_size_t total_size
;
2229 /* get the VM object from the file's memory object handle */
2230 file_object
= memory_object_control_to_vm_object(file_control
);
2232 original_base_offset
= base_offset
;
2234 LSF_DEBUG(("lsf_map"
2235 "(cnt=%d,file=%p,sm_info=%p)"
2237 map_cnt
, file_object
,
2240 restart_after_slide
:
2241 /* get a new "load_struct_t" to described the mappings for that file */
2242 entry
= (load_struct_t
*)zalloc(lsf_zone
);
2243 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
2244 LSF_DEBUG(("lsf_map"
2245 "(cnt=%d,file=%p,sm_info=%p) "
2247 map_cnt
, file_object
,
2249 if (entry
== NULL
) {
2250 SHARED_REGION_TRACE(
2251 SHARED_REGION_TRACE_ERROR
,
2252 ("shared_region: %p: "
2253 "lsf_map: unable to allocate entry\n",
2255 return KERN_NO_SPACE
;
2257 shared_file_available_hash_ele
--;
2258 entry
->file_object
= (int)file_object
;
2259 entry
->mapping_cnt
= map_cnt
;
2260 entry
->mappings
= NULL
;
2261 entry
->links
.prev
= (queue_entry_t
) 0;
2262 entry
->links
.next
= (queue_entry_t
) 0;
2263 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2264 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2265 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2267 /* insert the new file entry in the hash table, for later lookups */
2268 lsf_hash_insert(entry
, sm_info
);
2270 /* where we should add the next mapping description for that file */
2271 tptr
= &(entry
->mappings
);
2273 entry
->base_address
= base_offset
;
2276 /* establish each requested mapping */
2277 for (i
= 0; i
< map_cnt
; i
++) {
2278 mach_vm_offset_t target_address
;
2279 mach_vm_offset_t region_mask
;
2281 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2282 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2283 region_mask
= SHARED_DATA_REGION_MASK
;
2284 if ((((mappings
[i
].sfm_address
+ base_offset
)
2285 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2286 (((mappings
[i
].sfm_address
+ base_offset
+
2287 mappings
[i
].sfm_size
- 1)
2288 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2289 SHARED_REGION_TRACE(
2290 SHARED_REGION_TRACE_ERROR
,
2291 ("shared_region: %p: lsf_map: "
2292 "RW mapping #%d not in segment",
2293 current_thread(), i
));
2294 shared_region_dump_mappings(
2295 SHARED_REGION_TRACE_ERROR
,
2296 mappings
, map_cnt
, base_offset
);
2298 lsf_deallocate(entry
,
2300 entry
->base_address
,
2303 return KERN_INVALID_ARGUMENT
;
2306 region_mask
= SHARED_TEXT_REGION_MASK
;
2307 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2308 if (((mappings
[i
].sfm_address
+ base_offset
)
2309 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2310 ((mappings
[i
].sfm_address
+ base_offset
+
2311 mappings
[i
].sfm_size
- 1)
2312 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2313 SHARED_REGION_TRACE(
2314 SHARED_REGION_TRACE_ERROR
,
2315 ("shared_region: %p: lsf_map: "
2316 "RO mapping #%d not in segment",
2317 current_thread(), i
));
2318 shared_region_dump_mappings(
2319 SHARED_REGION_TRACE_ERROR
,
2320 mappings
, map_cnt
, base_offset
);
2322 lsf_deallocate(entry
,
2324 entry
->base_address
,
2327 return KERN_INVALID_ARGUMENT
;
2330 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2331 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2333 SHARED_REGION_TRACE(
2334 SHARED_REGION_TRACE_ERROR
,
2335 ("shared_region: %p: lsf_map: "
2336 "ZF mapping #%d beyond EOF",
2337 current_thread(), i
));
2338 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR
,
2343 lsf_deallocate(entry
,
2345 entry
->base_address
,
2348 return KERN_INVALID_ARGUMENT
;
2350 target_address
= entry
->base_address
+
2351 ((mappings
[i
].sfm_address
) & region_mask
);
2352 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2353 map_port
= MACH_PORT_NULL
;
2355 map_port
= (ipc_port_t
) file_object
->pager
;
2357 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2359 total_size
+= mappings
[i
].sfm_size
;
2360 if (mappings
[i
].sfm_size
== 0) {
2361 /* nothing to map... */
2365 region_entry
->backing
.map
,
2367 vm_map_round_page(mappings
[i
].sfm_size
),
2371 mappings
[i
].sfm_file_offset
,
2373 (mappings
[i
].sfm_init_prot
&
2374 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2375 (mappings
[i
].sfm_max_prot
&
2376 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2377 VM_INHERIT_DEFAULT
);
2379 if (kr
!= KERN_SUCCESS
) {
2380 vm_offset_t old_base_address
;
2382 old_base_address
= entry
->base_address
;
2383 lsf_deallocate(entry
,
2385 entry
->base_address
,
2390 if (slide_p
!= NULL
) {
2392 * Requested mapping failed but the caller
2393 * is OK with sliding the library in the
2394 * shared region, so let's try and slide it...
2397 SHARED_REGION_TRACE(
2398 SHARED_REGION_TRACE_CONFLICT
,
2399 ("shared_region: %p: lsf_map: "
2400 "mapping #%d failed to map, "
2401 "kr=0x%x, sliding...\n",
2402 current_thread(), i
, kr
));
2403 shared_region_dump_mappings(
2404 SHARED_REGION_TRACE_INFO
,
2405 mappings
, map_cnt
, base_offset
);
2406 shared_region_dump_conflict_info(
2407 SHARED_REGION_TRACE_CONFLICT
,
2408 region_entry
->backing
.map
,
2410 ((mappings
[i
].sfm_address
)
2412 vm_map_round_page(mappings
[i
].sfm_size
));
2414 /* lookup an appropriate spot */
2415 kr
= lsf_slide(map_cnt
, mappings
,
2416 sm_info
, &base_offset
);
2417 if (kr
== KERN_SUCCESS
) {
2418 /* try and map it there ... */
2419 goto restart_after_slide
;
2421 /* couldn't slide ... */
2424 SHARED_REGION_TRACE(
2425 SHARED_REGION_TRACE_CONFLICT
,
2426 ("shared_region: %p: lsf_map: "
2427 "mapping #%d failed to map, "
2428 "kr=0x%x, no sliding\n",
2429 current_thread(), i
, kr
));
2430 shared_region_dump_mappings(
2431 SHARED_REGION_TRACE_INFO
,
2432 mappings
, map_cnt
, base_offset
);
2433 shared_region_dump_conflict_info(
2434 SHARED_REGION_TRACE_CONFLICT
,
2435 region_entry
->backing
.map
,
2437 ((mappings
[i
].sfm_address
)
2439 vm_map_round_page(mappings
[i
].sfm_size
));
2440 return KERN_FAILURE
;
2443 /* record this mapping */
2444 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2445 if (file_mapping
== NULL
) {
2446 lsf_deallocate(entry
,
2448 entry
->base_address
,
2451 SHARED_REGION_TRACE(
2452 SHARED_REGION_TRACE_ERROR
,
2453 ("shared_region: %p: "
2454 "lsf_map: unable to allocate mapping\n",
2456 return KERN_NO_SPACE
;
2458 shared_file_available_hash_ele
--;
2459 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2461 file_mapping
->size
= mappings
[i
].sfm_size
;
2462 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2463 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2464 file_mapping
->next
= NULL
;
2465 LSF_DEBUG(("lsf_map: file_mapping %p "
2466 "for offset=0x%x size=0x%x\n",
2467 file_mapping
, file_mapping
->mapping_offset
,
2468 file_mapping
->size
));
2470 /* and link it to the file entry */
2471 *tptr
= file_mapping
;
2473 /* where to put the next mapping's description */
2474 tptr
= &(file_mapping
->next
);
2477 if (slide_p
!= NULL
) {
2478 *slide_p
= base_offset
- original_base_offset
;
2481 if ((sm_info
->flags
& SHARED_REGION_STANDALONE
) ||
2482 (total_size
== 0)) {
2485 * 1. we have a standalone and private shared region, so we
2486 * don't really need to keep the information about each file
2487 * and each mapping. Just deallocate it all.
2488 * 2. the total size of the mappings is 0, so nothing at all
2489 * was mapped. Let's not waste kernel resources to describe
2492 * XXX we still have the hash table, though...
2494 lsf_deallocate(entry
, file_object
, entry
->base_address
, sm_info
,
2498 LSF_DEBUG(("lsf_map: done\n"));
2499 return KERN_SUCCESS
;
2503 /* finds the file_object extent list in the shared memory hash table */
2504 /* If one is found the associated extents in shared memory are deallocated */
2505 /* and the extent list is freed */
2510 vm_offset_t base_offset
,
2511 shared_region_task_mappings_t sm_info
)
2513 lsf_deallocate(NULL
, file_object
, base_offset
, sm_info
, TRUE
);
2519 * Deallocates all the "shared region" internal data structures describing
2520 * the file and its mappings.
2521 * Also deallocate the actual file mappings if requested ("unload" arg).
2525 load_struct_t
*target_entry
,
2527 vm_offset_t base_offset
,
2528 shared_region_task_mappings_t sm_info
,
2531 load_struct_t
*entry
;
2532 loaded_mapping_t
*map_ele
;
2533 loaded_mapping_t
*back_ptr
;
2536 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2537 target_entry
, file_object
, base_offset
, sm_info
, unload
));
2538 entry
= lsf_hash_delete(target_entry
,
2543 map_ele
= entry
->mappings
;
2544 while(map_ele
!= NULL
) {
2546 ipc_port_t region_handle
;
2547 vm_named_entry_t region_entry
;
2549 if(map_ele
->protection
& VM_PROT_COW
) {
2550 region_handle
= (ipc_port_t
)
2551 sm_info
->data_region
;
2553 region_handle
= (ipc_port_t
)
2554 sm_info
->text_region
;
2556 region_entry
= (vm_named_entry_t
)
2557 region_handle
->ip_kobject
;
2559 kr
= vm_deallocate(region_entry
->backing
.map
,
2560 (entry
->base_address
+
2561 map_ele
->mapping_offset
),
2563 assert(kr
== KERN_SUCCESS
);
2566 map_ele
= map_ele
->next
;
2567 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2568 "offset 0x%x size 0x%x\n",
2569 back_ptr
, back_ptr
->mapping_offset
,
2571 zfree(lsf_zone
, back_ptr
);
2572 shared_file_available_hash_ele
++;
2574 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2575 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2576 zfree(lsf_zone
, entry
);
2577 shared_file_available_hash_ele
++;
2579 LSF_DEBUG(("lsf_deallocate: done\n"));
2582 /* integer is from 1 to 100 and represents percent full */
2584 lsf_mapping_pool_gauge(void)
2586 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;