2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
27 * Support routines for an in-kernel shared memory allocator
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <mach/vm_map.h>
36 #include <machine/cpu_capabilities.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
44 #include <ipc/ipc_types.h>
45 #include <ipc/ipc_port.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_protos.h>
52 #include <mach/mach_vm.h>
53 #include <mach/shared_memory_server.h>
54 #include <vm/vm_shared_memory_server.h>
56 int shared_region_trace_level
= SHARED_REGION_TRACE_ERROR
;
60 int lsf_alloc_debug
= 0;
61 #define LSF_DEBUG(args) \
67 #define LSF_ALLOC_DEBUG(args) \
69 if (lsf_alloc_debug) { \
74 #define LSF_DEBUG(args)
75 #define LSF_ALLOC_DEBUG(args)
78 /* forward declarations */
80 shared_region_object_create(
82 ipc_port_t
*object_handle
);
85 shared_region_mapping_dealloc_lock(
86 shared_region_mapping_t shared_region
,
93 ipc_port_t
*text_region_handle
,
94 vm_size_t text_region_size
,
95 ipc_port_t
*data_region_handle
,
96 vm_size_t data_region_size
,
97 vm_offset_t
*file_mapping_array
);
100 shared_file_header_init(
101 shared_file_info_t
*shared_file_header
);
103 static load_struct_t
*
105 queue_head_t
*hash_table
,
107 vm_offset_t recognizableOffset
,
111 shared_region_task_mappings_t sm_info
);
113 static load_struct_t
*
115 load_struct_t
*target_entry
, /* optional */
117 vm_offset_t base_offset
,
118 shared_region_task_mappings_t sm_info
);
122 load_struct_t
*entry
,
123 shared_region_task_mappings_t sm_info
);
127 vm_offset_t mapped_file
,
128 vm_size_t mapped_file_size
,
129 vm_offset_t
*base_address
,
130 sf_mapping_t
*mappings
,
134 shared_region_task_mappings_t sm_info
);
138 unsigned int map_cnt
,
139 struct shared_file_mapping_np
*mappings
,
140 shared_region_task_mappings_t sm_info
,
141 mach_vm_offset_t
*base_offset_p
);
145 struct shared_file_mapping_np
*mappings
,
148 memory_object_size_t file_size
,
149 shared_region_task_mappings_t sm_info
,
150 mach_vm_offset_t base_offset
,
151 mach_vm_offset_t
*slide_p
);
156 vm_offset_t base_offset
,
157 shared_region_task_mappings_t sm_info
);
161 load_struct_t
*target_entry
, /* optional */
163 vm_offset_t base_offset
,
164 shared_region_task_mappings_t sm_info
,
168 #define load_file_hash(file_object, size) \
169 ((((natural_t)file_object) & 0xffffff) % size)
172 vm_offset_t shared_file_mapping_array
= 0;
174 shared_region_mapping_t default_environment_shared_regions
= NULL
;
175 static decl_mutex_data(,default_regions_list_lock_data
)
177 #define default_regions_list_lock() \
178 mutex_lock(&default_regions_list_lock_data)
179 #define default_regions_list_lock_try() \
180 mutex_try(&default_regions_list_lock_data)
181 #define default_regions_list_unlock() \
182 mutex_unlock(&default_regions_list_lock_data)
185 ipc_port_t sfma_handle
= NULL
;
188 int shared_file_available_hash_ele
;
190 /* com region support */
191 ipc_port_t com_region_handle32
= NULL
;
192 ipc_port_t com_region_handle64
= NULL
;
193 vm_map_t com_region_map32
= NULL
;
194 vm_map_t com_region_map64
= NULL
;
195 vm_size_t com_region_size32
= _COMM_PAGE32_AREA_LENGTH
;
196 vm_size_t com_region_size64
= _COMM_PAGE64_AREA_LENGTH
;
197 shared_region_mapping_t com_mapping_resource
= NULL
;
201 int shared_region_debug
= 0;
206 vm_get_shared_region(
208 shared_region_mapping_t
*shared_region
)
210 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
211 if (*shared_region
) {
212 assert((*shared_region
)->ref_count
> 0);
214 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
215 task
, *shared_region
));
220 vm_set_shared_region(
222 shared_region_mapping_t shared_region
)
224 shared_region_mapping_t old_region
;
226 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
227 "shared_region=%p[%x,%x,%x])\n",
229 shared_region
? shared_region
->fs_base
: 0,
230 shared_region
? shared_region
->system
: 0,
231 shared_region
? shared_region
->flags
: 0));
233 assert(shared_region
->ref_count
> 0);
236 old_region
= task
->system_shared_region
;
238 SHARED_REGION_TRACE_INFO
,
239 ("shared_region: %p set_region(task=%p)"
240 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
241 current_thread(), task
,
243 old_region
? old_region
->fs_base
: 0,
244 old_region
? old_region
->system
: 0,
245 old_region
? old_region
->flags
: 0,
247 shared_region
? shared_region
->fs_base
: 0,
248 shared_region
? shared_region
->system
: 0,
249 shared_region
? shared_region
->flags
: 0));
251 task
->system_shared_region
= shared_region
;
256 * shared_region_object_chain_detach:
258 * Mark the shared region as being detached or standalone. This means
259 * that we won't keep track of which file is mapped and how, for this shared
260 * region. And we don't have a "shadow" shared region.
261 * This is used when we clone a private shared region and we intend to remove
262 * some mappings from it. It won't need to maintain mappings info because it's
263 * now private. It can't have a "shadow" shared region because we don't want
264 * to see the shadow of the mappings we're about to remove.
267 shared_region_object_chain_detached(
268 shared_region_mapping_t target_region
)
270 shared_region_mapping_lock(target_region
);
271 target_region
->flags
|= SHARED_REGION_STANDALONE
;
272 shared_region_mapping_unlock(target_region
);
276 * shared_region_object_chain_attach:
278 * Link "target_region" to "object_chain_region". "object_chain_region"
279 * is treated as a shadow of "target_region" for the purpose of looking up
280 * mappings. Since the "target_region" preserves all the mappings of the
281 * older "object_chain_region", we won't duplicate all the mappings info and
282 * we'll just lookup the next region in the "object_chain" if we can't find
283 * what we're looking for in the "target_region". See lsf_hash_lookup().
286 shared_region_object_chain_attach(
287 shared_region_mapping_t target_region
,
288 shared_region_mapping_t object_chain_region
)
290 shared_region_object_chain_t object_ele
;
292 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
293 "target_region=%p, object_chain_region=%p\n",
294 target_region
, object_chain_region
));
295 assert(target_region
->ref_count
> 0);
296 assert(object_chain_region
->ref_count
> 0);
297 if(target_region
->object_chain
)
299 object_ele
= (shared_region_object_chain_t
)
300 kalloc(sizeof (struct shared_region_object_chain
));
301 shared_region_mapping_lock(object_chain_region
);
302 target_region
->object_chain
= object_ele
;
303 object_ele
->object_chain_region
= object_chain_region
;
304 object_ele
->next
= object_chain_region
->object_chain
;
305 object_ele
->depth
= object_chain_region
->depth
;
306 object_chain_region
->depth
++;
307 target_region
->alternate_next
= object_chain_region
->alternate_next
;
308 shared_region_mapping_unlock(object_chain_region
);
312 /* LP64todo - need 64-bit safe version */
314 shared_region_mapping_create(
315 ipc_port_t text_region
,
317 ipc_port_t data_region
,
319 vm_offset_t region_mappings
,
320 vm_offset_t client_base
,
321 shared_region_mapping_t
*shared_region
,
322 vm_offset_t alt_base
,
323 vm_offset_t alt_next
,
327 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
328 *shared_region
= (shared_region_mapping_t
)
329 kalloc(sizeof (struct shared_region_mapping
));
330 if(*shared_region
== NULL
) {
331 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
335 shared_region_mapping_lock_init((*shared_region
));
336 (*shared_region
)->text_region
= text_region
;
337 (*shared_region
)->text_size
= text_size
;
338 (*shared_region
)->fs_base
= fs_base
;
339 (*shared_region
)->system
= system
;
340 (*shared_region
)->data_region
= data_region
;
341 (*shared_region
)->data_size
= data_size
;
342 (*shared_region
)->region_mappings
= region_mappings
;
343 (*shared_region
)->client_base
= client_base
;
344 (*shared_region
)->ref_count
= 1;
345 (*shared_region
)->next
= NULL
;
346 (*shared_region
)->object_chain
= NULL
;
347 (*shared_region
)->self
= *shared_region
;
348 (*shared_region
)->flags
= 0;
349 (*shared_region
)->depth
= 0;
350 (*shared_region
)->default_env_list
= NULL
;
351 (*shared_region
)->alternate_base
= alt_base
;
352 (*shared_region
)->alternate_next
= alt_next
;
353 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
358 /* LP64todo - need 64-bit safe version */
360 shared_region_mapping_info(
361 shared_region_mapping_t shared_region
,
362 ipc_port_t
*text_region
,
363 vm_size_t
*text_size
,
364 ipc_port_t
*data_region
,
365 vm_size_t
*data_size
,
366 vm_offset_t
*region_mappings
,
367 vm_offset_t
*client_base
,
368 vm_offset_t
*alt_base
,
369 vm_offset_t
*alt_next
,
370 unsigned int *fs_base
,
371 unsigned int *system
,
373 shared_region_mapping_t
*next
)
375 shared_region_mapping_lock(shared_region
);
377 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
379 assert(shared_region
->ref_count
> 0);
380 *text_region
= shared_region
->text_region
;
381 *text_size
= shared_region
->text_size
;
382 *data_region
= shared_region
->data_region
;
383 *data_size
= shared_region
->data_size
;
384 *region_mappings
= shared_region
->region_mappings
;
385 *client_base
= shared_region
->client_base
;
386 *alt_base
= shared_region
->alternate_base
;
387 *alt_next
= shared_region
->alternate_next
;
388 *flags
= shared_region
->flags
;
389 *fs_base
= shared_region
->fs_base
;
390 *system
= shared_region
->system
;
391 *next
= shared_region
->next
;
393 shared_region_mapping_unlock(shared_region
);
398 /* LP64todo - need 64-bit safe version */
400 shared_region_mapping_set_alt_next(
401 shared_region_mapping_t shared_region
,
402 vm_offset_t alt_next
)
404 SHARED_REGION_DEBUG(("shared_region_mapping_set_alt_next"
405 "(shared_region=%p, alt_next=0%x)\n",
406 shared_region
, alt_next
));
407 assert(shared_region
->ref_count
> 0);
408 shared_region
->alternate_next
= alt_next
;
413 shared_region_mapping_ref(
414 shared_region_mapping_t shared_region
)
416 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
417 "ref_count=%d + 1\n",
419 shared_region
? shared_region
->ref_count
: 0));
420 if(shared_region
== NULL
)
422 assert(shared_region
->ref_count
> 0);
423 hw_atomic_add(&shared_region
->ref_count
, 1);
428 shared_region_mapping_dealloc_lock(
429 shared_region_mapping_t shared_region
,
433 struct shared_region_task_mappings sm_info
;
434 shared_region_mapping_t next
= NULL
;
435 unsigned int ref_count
;
437 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
438 "(shared_region=%p,%d,%d) ref_count=%d\n",
439 shared_region
, need_sfh_lock
, need_drl_lock
,
440 shared_region
? shared_region
->ref_count
: 0));
441 while (shared_region
) {
442 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
444 shared_region
, shared_region
->ref_count
));
445 assert(shared_region
->ref_count
> 0);
447 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
448 shared_region_mapping_lock(shared_region
);
450 sm_info
.text_region
= shared_region
->text_region
;
451 sm_info
.text_size
= shared_region
->text_size
;
452 sm_info
.data_region
= shared_region
->data_region
;
453 sm_info
.data_size
= shared_region
->data_size
;
454 sm_info
.region_mappings
= shared_region
->region_mappings
;
455 sm_info
.client_base
= shared_region
->client_base
;
456 sm_info
.alternate_base
= shared_region
->alternate_base
;
457 sm_info
.alternate_next
= shared_region
->alternate_next
;
458 sm_info
.flags
= shared_region
->flags
;
459 sm_info
.self
= (vm_offset_t
)shared_region
;
461 if(shared_region
->region_mappings
) {
462 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
464 if(((vm_named_entry_t
)
465 (shared_region
->text_region
->ip_kobject
))
466 ->backing
.map
->pmap
) {
467 pmap_remove(((vm_named_entry_t
)
468 (shared_region
->text_region
->ip_kobject
))
471 sm_info
.client_base
+ sm_info
.text_size
);
473 ipc_port_release_send(shared_region
->text_region
);
474 if(shared_region
->data_region
)
475 ipc_port_release_send(shared_region
->data_region
);
476 if (shared_region
->object_chain
) {
477 next
= shared_region
->object_chain
->object_chain_region
;
478 kfree(shared_region
->object_chain
,
479 sizeof (struct shared_region_object_chain
));
483 shared_region_mapping_unlock(shared_region
);
485 ("shared_region_mapping_dealloc_lock(%p): "
488 bzero((void *)shared_region
,
489 sizeof (*shared_region
)); /* FBDP debug */
491 sizeof (struct shared_region_mapping
));
492 shared_region
= next
;
494 /* Stale indicates that a system region is no */
495 /* longer in the default environment list. */
496 if((ref_count
== 1) &&
497 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
498 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
500 ("shared_region_mapping_dealloc_lock"
501 "(%p): removing stale\n",
503 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
508 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
514 * Stub function; always indicates that the lock needs to be taken in the
515 * call to lsf_remove_regions_mappings_lock().
518 shared_region_mapping_dealloc(
519 shared_region_mapping_t shared_region
)
521 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
522 "(shared_region=%p)\n",
525 assert(shared_region
->ref_count
> 0);
527 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
532 shared_region_object_create(
534 ipc_port_t
*object_handle
)
536 vm_named_entry_t user_entry
;
537 ipc_port_t user_handle
;
542 user_entry
= (vm_named_entry_t
)
543 kalloc(sizeof (struct vm_named_entry
));
544 if(user_entry
== NULL
) {
547 named_entry_lock_init(user_entry
);
548 user_handle
= ipc_port_alloc_kernel();
551 ip_lock(user_handle
);
553 /* make a sonce right */
554 user_handle
->ip_sorights
++;
555 ip_reference(user_handle
);
557 user_handle
->ip_destination
= IP_NULL
;
558 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
559 user_handle
->ip_receiver
= ipc_space_kernel
;
561 /* make a send right */
562 user_handle
->ip_mscount
++;
563 user_handle
->ip_srights
++;
564 ip_reference(user_handle
);
566 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
567 /* nsrequest unlocks user_handle */
569 /* Create a named object based on a submap of specified size */
571 new_map
= vm_map_create(pmap_create(0, FALSE
), 0, size
, TRUE
);
572 user_entry
->backing
.map
= new_map
;
573 user_entry
->internal
= TRUE
;
574 user_entry
->is_sub_map
= TRUE
;
575 user_entry
->is_pager
= FALSE
;
576 user_entry
->offset
= 0;
577 user_entry
->protection
= VM_PROT_ALL
;
578 user_entry
->size
= size
;
579 user_entry
->ref_count
= 1;
581 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
583 *object_handle
= user_handle
;
587 /* called for the non-default, private branch shared region support */
588 /* system default fields for fs_base and system supported are not */
589 /* relevant as the system default flag is not set */
591 shared_file_create_system_region(
592 shared_region_mapping_t
*shared_region
,
596 ipc_port_t text_handle
;
597 ipc_port_t data_handle
;
600 vm_offset_t mapping_array
;
603 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
605 text_size
= 0x10000000;
606 data_size
= 0x10000000;
608 kret
= shared_file_init(&text_handle
,
609 text_size
, &data_handle
, data_size
, &mapping_array
);
611 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
612 "shared_file_init failed kret=0x%x\n",
616 kret
= shared_region_mapping_create(text_handle
, text_size
,
617 data_handle
, data_size
,
619 GLOBAL_SHARED_TEXT_SEGMENT
,
621 SHARED_ALTERNATE_LOAD_BASE
,
622 SHARED_ALTERNATE_LOAD_BASE
,
626 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
627 "shared_region_mapping_create failed "
632 (*shared_region
)->flags
= 0;
633 if(com_mapping_resource
) {
634 shared_region_mapping_ref(com_mapping_resource
);
635 (*shared_region
)->next
= com_mapping_resource
;
638 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
639 "-> shared_region=%p\n",
645 * load a new default for a specified environment into the default share
646 * regions list. If a previous default exists for the envrionment specification
647 * it is returned along with its reference. It is expected that the new
648 * sytem region structure passes a reference.
651 shared_region_mapping_t
652 update_default_shared_region(
653 shared_region_mapping_t new_system_region
)
655 shared_region_mapping_t old_system_region
;
656 unsigned int fs_base
;
659 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
661 assert(new_system_region
->ref_count
> 0);
662 fs_base
= new_system_region
->fs_base
;
663 system
= new_system_region
->system
;
664 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
665 default_regions_list_lock();
666 old_system_region
= default_environment_shared_regions
;
668 if((old_system_region
!= NULL
) &&
669 (old_system_region
->fs_base
== fs_base
) &&
670 (old_system_region
->system
== system
)) {
671 new_system_region
->default_env_list
=
672 old_system_region
->default_env_list
;
673 old_system_region
->default_env_list
= NULL
;
674 default_environment_shared_regions
= new_system_region
;
675 old_system_region
->flags
|= SHARED_REGION_STALE
;
676 default_regions_list_unlock();
677 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
679 new_system_region
, old_system_region
));
680 assert(old_system_region
->ref_count
> 0);
681 return old_system_region
;
683 if (old_system_region
) {
684 while(old_system_region
->default_env_list
!= NULL
) {
685 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
686 (old_system_region
->default_env_list
->system
== system
)) {
687 shared_region_mapping_t tmp_system_region
;
690 old_system_region
->default_env_list
;
691 new_system_region
->default_env_list
=
692 tmp_system_region
->default_env_list
;
693 tmp_system_region
->default_env_list
= NULL
;
694 old_system_region
->default_env_list
=
696 old_system_region
= tmp_system_region
;
697 old_system_region
->flags
|= SHARED_REGION_STALE
;
698 default_regions_list_unlock();
699 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
700 ": old=%p stale 2\n",
703 assert(old_system_region
->ref_count
> 0);
704 return old_system_region
;
706 old_system_region
= old_system_region
->default_env_list
;
709 /* If we get here, we are at the end of the system list and we */
710 /* did not find a pre-existing entry */
711 if(old_system_region
) {
712 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
713 "adding after old=%p\n",
714 new_system_region
, old_system_region
));
715 assert(old_system_region
->ref_count
> 0);
716 old_system_region
->default_env_list
= new_system_region
;
718 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
721 default_environment_shared_regions
= new_system_region
;
723 assert(new_system_region
->ref_count
> 0);
724 default_regions_list_unlock();
729 * lookup a system_shared_region for the environment specified. If one is
730 * found, it is returned along with a reference against the structure
733 shared_region_mapping_t
734 lookup_default_shared_region(
735 unsigned int fs_base
,
738 shared_region_mapping_t system_region
;
739 default_regions_list_lock();
740 system_region
= default_environment_shared_regions
;
742 SHARED_REGION_DEBUG(("lookup_default_shared_region"
743 "(base=0x%x, system=0x%x)\n",
745 while(system_region
!= NULL
) {
746 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
747 ": system_region=%p base=0x%x system=0x%x"
749 fs_base
, system
, system_region
,
750 system_region
->fs_base
,
751 system_region
->system
,
752 system_region
->ref_count
));
753 assert(system_region
->ref_count
> 0);
754 if((system_region
->fs_base
== fs_base
) &&
755 (system_region
->system
== system
)) {
758 system_region
= system_region
->default_env_list
;
761 shared_region_mapping_ref(system_region
);
762 default_regions_list_unlock();
763 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
765 return system_region
;
769 * remove a system_region default if it appears in the default regions list.
770 * Drop a reference on removal.
773 __private_extern__
void
774 remove_default_shared_region_lock(
775 shared_region_mapping_t system_region
,
779 shared_region_mapping_t old_system_region
;
781 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
782 "(system_region=%p, %d, %d)\n",
783 system_region
, need_sfh_lock
, need_drl_lock
));
785 default_regions_list_lock();
787 old_system_region
= default_environment_shared_regions
;
789 if(old_system_region
== NULL
) {
790 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
791 "-> default_env=NULL\n",
794 default_regions_list_unlock();
799 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
801 system_region
, old_system_region
));
802 assert(old_system_region
->ref_count
> 0);
803 if (old_system_region
== system_region
) {
804 default_environment_shared_regions
805 = old_system_region
->default_env_list
;
806 old_system_region
->default_env_list
= NULL
;
807 old_system_region
->flags
|= SHARED_REGION_STALE
;
808 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
809 "old=%p ref_count=%d STALE\n",
810 system_region
, old_system_region
,
811 old_system_region
->ref_count
));
812 shared_region_mapping_dealloc_lock(old_system_region
,
816 default_regions_list_unlock();
821 while(old_system_region
->default_env_list
!= NULL
) {
822 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
823 "old=%p->default_env=%p\n",
824 system_region
, old_system_region
,
825 old_system_region
->default_env_list
));
826 assert(old_system_region
->default_env_list
->ref_count
> 0);
827 if(old_system_region
->default_env_list
== system_region
) {
828 shared_region_mapping_t dead_region
;
829 dead_region
= old_system_region
->default_env_list
;
830 old_system_region
->default_env_list
=
831 dead_region
->default_env_list
;
832 dead_region
->default_env_list
= NULL
;
833 dead_region
->flags
|= SHARED_REGION_STALE
;
835 ("remove_default_shared_region_lock(%p): "
836 "dead=%p ref_count=%d stale\n",
837 system_region
, dead_region
,
838 dead_region
->ref_count
));
839 shared_region_mapping_dealloc_lock(dead_region
,
843 default_regions_list_unlock();
847 old_system_region
= old_system_region
->default_env_list
;
850 default_regions_list_unlock();
855 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
856 * the only caller. Remove this stub function and the corresponding symbol
860 remove_default_shared_region(
861 shared_region_mapping_t system_region
)
863 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
866 assert(system_region
->ref_count
> 0);
868 remove_default_shared_region_lock(system_region
, 1, 1);
872 remove_all_shared_regions(void)
874 shared_region_mapping_t system_region
;
875 shared_region_mapping_t next_system_region
;
877 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
878 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
879 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
880 default_regions_list_lock();
881 system_region
= default_environment_shared_regions
;
883 if(system_region
== NULL
) {
884 default_regions_list_unlock();
888 while(system_region
!= NULL
) {
889 next_system_region
= system_region
->default_env_list
;
890 system_region
->default_env_list
= NULL
;
891 system_region
->flags
|= SHARED_REGION_STALE
;
892 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
893 "%p ref_count=%d stale\n",
894 system_region
, system_region
->ref_count
));
895 assert(system_region
->ref_count
> 0);
896 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
897 system_region
= next_system_region
;
899 default_environment_shared_regions
= NULL
;
900 default_regions_list_unlock();
901 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
902 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
903 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
906 /* shared_com_boot_time_init initializes the common page shared data and */
907 /* text region. This region is semi independent of the split libs */
908 /* and so its policies have to be handled differently by the code that */
909 /* manipulates the mapping of shared region environments. However, */
910 /* the shared region delivery system supports both */
911 void shared_com_boot_time_init(void); /* forward */
913 shared_com_boot_time_init(void)
916 vm_named_entry_t named_entry
;
918 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
919 if(com_region_handle32
) {
920 panic("shared_com_boot_time_init: "
921 "com_region_handle32 already set\n");
923 if(com_region_handle64
) {
924 panic("shared_com_boot_time_init: "
925 "com_region_handle64 already set\n");
928 /* create com page regions, 1 each for 32 and 64-bit code */
929 if((kret
= shared_region_object_create(
931 &com_region_handle32
))) {
932 panic("shared_com_boot_time_init: "
933 "unable to create 32-bit comm page\n");
936 if((kret
= shared_region_object_create(
938 &com_region_handle64
))) {
939 panic("shared_com_boot_time_init: "
940 "unable to create 64-bit comm page\n");
944 /* now set export the underlying region/map */
945 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
946 com_region_map32
= named_entry
->backing
.map
;
947 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
948 com_region_map64
= named_entry
->backing
.map
;
950 /* wrap the com region in its own shared file mapping structure */
951 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
952 kret
= shared_region_mapping_create(com_region_handle32
,
955 _COMM_PAGE_BASE_ADDRESS
,
956 &com_mapping_resource
,
958 ENV_DEFAULT_ROOT
, cpu_type());
960 panic("shared_region_mapping_create failed for commpage");
965 shared_file_boot_time_init(
966 unsigned int fs_base
,
969 mach_port_t text_region_handle
;
970 mach_port_t data_region_handle
;
971 long text_region_size
;
972 long data_region_size
;
973 shared_region_mapping_t new_system_region
;
974 shared_region_mapping_t old_default_env
;
976 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
977 "(base=0x%x,system=0x%x)\n",
979 text_region_size
= 0x10000000;
980 data_region_size
= 0x10000000;
981 shared_file_init(&text_region_handle
,
985 &shared_file_mapping_array
);
987 shared_region_mapping_create(text_region_handle
,
991 shared_file_mapping_array
,
992 GLOBAL_SHARED_TEXT_SEGMENT
,
994 SHARED_ALTERNATE_LOAD_BASE
,
995 SHARED_ALTERNATE_LOAD_BASE
,
998 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
1000 /* grab an extra reference for the caller */
1001 /* remember to grab before call to update */
1002 shared_region_mapping_ref(new_system_region
);
1003 old_default_env
= update_default_shared_region(new_system_region
);
1004 /* hold an extra reference because these are the system */
1005 /* shared regions. */
1007 shared_region_mapping_dealloc(old_default_env
);
1008 if(com_mapping_resource
== NULL
) {
1009 shared_com_boot_time_init();
1011 shared_region_mapping_ref(com_mapping_resource
);
1012 new_system_region
->next
= com_mapping_resource
;
1013 vm_set_shared_region(current_task(), new_system_region
);
1014 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
1019 /* called at boot time, allocates two regions, each 256 megs in size */
1020 /* these regions are later mapped into task spaces, allowing them to */
1021 /* share the contents of the regions. shared_file_init is part of */
1022 /* a shared_memory_server which not only allocates the backing maps */
1023 /* but also coordinates requests for space. */
1026 static kern_return_t
1028 ipc_port_t
*text_region_handle
,
1029 vm_size_t text_region_size
,
1030 ipc_port_t
*data_region_handle
,
1031 vm_size_t data_region_size
,
1032 vm_offset_t
*file_mapping_array
)
1034 shared_file_info_t
*sf_head
;
1035 vm_size_t data_table_size
;
1039 vm_object_t buf_object
;
1040 vm_map_entry_t entry
;
1045 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1046 /* create text and data maps/regions */
1047 kret
= shared_region_object_create(
1049 text_region_handle
);
1053 kret
= shared_region_object_create(
1055 data_region_handle
);
1057 ipc_port_release_send(*text_region_handle
);
1061 data_table_size
= data_region_size
>> 9;
1062 hash_size
= data_region_size
>> 14;
1064 if(shared_file_mapping_array
== 0) {
1065 vm_map_address_t map_addr
;
1066 buf_object
= vm_object_allocate(data_table_size
);
1068 if(vm_map_find_space(kernel_map
, &map_addr
,
1069 data_table_size
, 0, 0, &entry
)
1071 panic("shared_file_init: no space");
1073 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1074 *file_mapping_array
= shared_file_mapping_array
;
1075 vm_map_unlock(kernel_map
);
1076 entry
->object
.vm_object
= buf_object
;
1079 for (b
= *file_mapping_array
, alloced
= 0;
1080 alloced
< (hash_size
+
1081 round_page(sizeof(struct sf_mapping
)));
1082 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1083 vm_object_lock(buf_object
);
1084 p
= vm_page_alloc(buf_object
, alloced
);
1085 if (p
== VM_PAGE_NULL
) {
1086 panic("shared_file_init: no space");
1089 vm_object_unlock(buf_object
);
1090 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1091 VM_PROT_READ
| VM_PROT_WRITE
,
1092 ((unsigned int)(p
->object
->wimg_bits
))
1098 /* initialize loaded file array */
1099 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1100 sf_head
->hash
= (queue_head_t
*)
1101 (((int)*file_mapping_array
) +
1102 sizeof(struct shared_file_info
));
1103 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1104 mutex_init(&(sf_head
->lock
), 0);
1105 sf_head
->hash_init
= FALSE
;
1108 mach_make_memory_entry(kernel_map
, &data_table_size
,
1109 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1112 if (vm_map_wire(kernel_map
,
1113 vm_map_trunc_page(*file_mapping_array
),
1114 vm_map_round_page(*file_mapping_array
+
1116 round_page(sizeof(struct sf_mapping
))),
1117 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1118 panic("shared_file_init: No memory for data table");
1121 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1123 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1124 0, "load_file_server");
1126 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1127 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1128 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1129 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1131 /* initialize the global default environment lock */
1132 mutex_init(&default_regions_list_lock_data
, 0);
1135 *file_mapping_array
= shared_file_mapping_array
;
1138 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1139 return KERN_SUCCESS
;
1142 static kern_return_t
1143 shared_file_header_init(
1144 shared_file_info_t
*shared_file_header
)
1146 vm_size_t hash_table_size
;
1147 vm_size_t hash_table_offset
;
1149 /* wire hash entry pool only as needed, since we are the only */
1150 /* users, we take a few liberties with the population of our */
1152 static int allocable_hash_pages
;
1153 static vm_offset_t hash_cram_address
;
1156 hash_table_size
= shared_file_header
->hash_size
1157 * sizeof (struct queue_entry
);
1158 hash_table_offset
= hash_table_size
+
1159 round_page(sizeof (struct sf_mapping
));
1160 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1161 queue_init(&shared_file_header
->hash
[i
]);
1163 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1165 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1166 + hash_table_offset
;
1167 shared_file_available_hash_ele
= 0;
1169 shared_file_header
->hash_init
= TRUE
;
1171 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1172 int cram_pages
, cram_size
;
1174 cram_pages
= allocable_hash_pages
> 3 ?
1175 3 : allocable_hash_pages
;
1176 cram_size
= cram_pages
* PAGE_SIZE
;
1177 if (vm_map_wire(kernel_map
, hash_cram_address
,
1178 hash_cram_address
+ cram_size
,
1179 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1180 SHARED_REGION_TRACE(
1181 SHARED_REGION_TRACE_ERROR
,
1182 ("shared_region: shared_file_header_init: "
1183 "No memory for data table\n"));
1184 return KERN_NO_SPACE
;
1186 allocable_hash_pages
-= cram_pages
;
1187 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1188 shared_file_available_hash_ele
1189 += cram_size
/sizeof(struct load_file_ele
);
1190 hash_cram_address
+= cram_size
;
1193 return KERN_SUCCESS
;
1197 /* A call made from user space, copyin_shared_file requires the user to */
1198 /* provide the address and size of a mapped file, the full path name of */
1199 /* that file and a list of offsets to be mapped into shared memory. */
1200 /* By requiring that the file be pre-mapped, copyin_shared_file can */
1201 /* guarantee that the file is neither deleted nor changed after the user */
1202 /* begins the call. */
1206 vm_offset_t mapped_file
,
1207 vm_size_t mapped_file_size
,
1208 vm_offset_t
*base_address
,
1210 sf_mapping_t
*mappings
,
1211 memory_object_control_t file_control
,
1212 shared_region_task_mappings_t sm_info
,
1215 vm_object_t file_object
;
1216 vm_map_entry_t entry
;
1217 shared_file_info_t
*shared_file_header
;
1218 load_struct_t
*file_entry
;
1219 loaded_mapping_t
*file_mapping
;
1220 boolean_t alternate
;
1224 SHARED_REGION_DEBUG(("copyin_shared_file()\n"));
1226 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1228 mutex_lock(&shared_file_header
->lock
);
1230 /* If this is the first call to this routine, take the opportunity */
1231 /* to initialize the hash table which will be used to look-up */
1232 /* mappings based on the file object */
1234 if(shared_file_header
->hash_init
== FALSE
) {
1235 ret
= shared_file_header_init(shared_file_header
);
1236 if (ret
!= KERN_SUCCESS
) {
1237 mutex_unlock(&shared_file_header
->lock
);
1242 /* Find the entry in the map associated with the current mapping */
1243 /* of the file object */
1244 file_object
= memory_object_control_to_vm_object(file_control
);
1245 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
1246 vm_object_t mapped_object
;
1247 if(entry
->is_sub_map
||
1248 entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1249 mutex_unlock(&shared_file_header
->lock
);
1250 return KERN_INVALID_ADDRESS
;
1252 mapped_object
= entry
->object
.vm_object
;
1253 while(mapped_object
->shadow
!= NULL
) {
1254 mapped_object
= mapped_object
->shadow
;
1256 /* check to see that the file object passed is indeed the */
1257 /* same as the mapped object passed */
1258 if(file_object
!= mapped_object
) {
1259 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
1260 mutex_unlock(&shared_file_header
->lock
);
1261 return KERN_PROTECTION_FAILURE
;
1263 file_object
= mapped_object
;
1267 mutex_unlock(&shared_file_header
->lock
);
1268 return KERN_INVALID_ADDRESS
;
1271 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
1273 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1274 (void *) file_object
,
1275 mappings
[0].file_offset
,
1276 shared_file_header
->hash_size
,
1277 !alternate
, alternate
, sm_info
);
1279 /* File is loaded, check the load manifest for exact match */
1280 /* we simplify by requiring that the elements be the same */
1281 /* size and in the same order rather than checking for */
1282 /* semantic equivalence. */
1284 /* If the file is being loaded in the alternate */
1285 /* area, one load to alternate is allowed per mapped */
1286 /* object the base address is passed back to the */
1287 /* caller and the mappings field is filled in. If the */
1288 /* caller does not pass the precise mappings_cnt */
1289 /* and the Alternate is already loaded, an error */
1292 file_mapping
= file_entry
->mappings
;
1293 while(file_mapping
!= NULL
) {
1295 mutex_unlock(&shared_file_header
->lock
);
1296 return KERN_INVALID_ARGUMENT
;
1298 if(((mappings
[i
].mapping_offset
)
1299 & SHARED_DATA_REGION_MASK
) !=
1300 file_mapping
->mapping_offset
||
1302 file_mapping
->size
||
1303 mappings
[i
].file_offset
!=
1304 file_mapping
->file_offset
||
1305 mappings
[i
].protection
!=
1306 file_mapping
->protection
) {
1309 file_mapping
= file_mapping
->next
;
1313 mutex_unlock(&shared_file_header
->lock
);
1314 return KERN_INVALID_ARGUMENT
;
1316 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
1317 + file_entry
->base_address
;
1318 *flags
= SF_PREV_LOADED
;
1319 mutex_unlock(&shared_file_header
->lock
);
1320 return KERN_SUCCESS
;
1322 /* File is not loaded, lets attempt to load it */
1323 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
1325 (void *)file_object
,
1328 if(ret
== KERN_NO_SPACE
) {
1329 shared_region_mapping_t regions
;
1330 shared_region_mapping_t system_region
;
1331 regions
= (shared_region_mapping_t
)sm_info
->self
;
1332 regions
->flags
|= SHARED_REGION_FULL
;
1333 system_region
= lookup_default_shared_region(
1334 regions
->fs_base
, regions
->system
);
1335 if(system_region
== regions
) {
1336 shared_region_mapping_t new_system_shared_region
;
1337 shared_file_boot_time_init(
1338 regions
->fs_base
, regions
->system
);
1339 /* current task must stay with its current */
1340 /* regions, drop count on system_shared_region */
1341 /* and put back our original set */
1342 vm_get_shared_region(current_task(),
1343 &new_system_shared_region
);
1344 shared_region_mapping_dealloc_lock(
1345 new_system_shared_region
, 0, 1);
1346 vm_set_shared_region(current_task(), regions
);
1347 } else if(system_region
!= NULL
) {
1348 shared_region_mapping_dealloc_lock(
1349 system_region
, 0, 1);
1352 mutex_unlock(&shared_file_header
->lock
);
1357 extern void shared_region_dump_file_entry(
1359 load_struct_t
*entry
); /* forward */
1361 void shared_region_dump_file_entry(
1363 load_struct_t
*entry
)
1366 loaded_mapping_t
*mapping
;
1368 if (trace_level
> shared_region_trace_level
) {
1371 printf("shared region: %p: "
1372 "file_entry %p base_address=0x%x file_offset=0x%x "
1374 current_thread(), entry
,
1375 entry
->base_address
, entry
->file_offset
, entry
->mapping_cnt
);
1376 mapping
= entry
->mappings
;
1377 for (i
= 0; i
< entry
->mapping_cnt
; i
++) {
1378 printf("shared region: %p:\t#%d: "
1379 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1382 mapping
->mapping_offset
,
1384 mapping
->file_offset
,
1385 mapping
->protection
);
1386 mapping
= mapping
->next
;
1390 extern void shared_region_dump_mappings(
1392 struct shared_file_mapping_np
*mappings
,
1394 mach_vm_offset_t base_offset
); /* forward */
1396 void shared_region_dump_mappings(
1398 struct shared_file_mapping_np
*mappings
,
1400 mach_vm_offset_t base_offset
)
1404 if (trace_level
> shared_region_trace_level
) {
1408 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1409 current_thread(), map_cnt
, (uint64_t) base_offset
);
1410 for (i
= 0; i
< map_cnt
; i
++) {
1411 printf("shared region: %p:\t#%d: "
1412 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1416 (uint64_t) mappings
[i
].sfm_address
,
1417 (uint64_t) mappings
[i
].sfm_size
,
1418 (uint64_t) mappings
[i
].sfm_file_offset
,
1419 mappings
[i
].sfm_max_prot
,
1420 mappings
[i
].sfm_init_prot
);
1424 extern void shared_region_dump_conflict_info(
1427 vm_map_offset_t offset
,
1428 vm_map_size_t size
); /* forward */
1431 shared_region_dump_conflict_info(
1434 vm_map_offset_t offset
,
1437 vm_map_entry_t entry
;
1439 memory_object_t mem_object
;
1443 if (trace_level
> shared_region_trace_level
) {
1447 object
= VM_OBJECT_NULL
;
1449 vm_map_lock_read(map
);
1450 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
1451 entry
= entry
->vme_next
;
1454 if (entry
!= vm_map_to_entry(map
)) {
1455 if (entry
->is_sub_map
) {
1456 printf("shared region: %p: conflict with submap "
1457 "at 0x%llx size 0x%llx !?\n",
1464 object
= entry
->object
.vm_object
;
1465 if (object
== VM_OBJECT_NULL
) {
1466 printf("shared region: %p: conflict with NULL object "
1467 "at 0x%llx size 0x%llx !?\n",
1471 object
= VM_OBJECT_NULL
;
1475 vm_object_lock(object
);
1476 while (object
->shadow
!= VM_OBJECT_NULL
) {
1479 shadow
= object
->shadow
;
1480 vm_object_lock(shadow
);
1481 vm_object_unlock(object
);
1485 if (object
->internal
) {
1486 printf("shared region: %p: conflict with anonymous "
1487 "at 0x%llx size 0x%llx\n",
1493 if (! object
->pager_ready
) {
1494 printf("shared region: %p: conflict with uninitialized "
1495 "at 0x%llx size 0x%llx\n",
1502 mem_object
= object
->pager
;
1505 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1507 kr
= vnode_pager_get_object_filename(mem_object
,
1509 if (kr
!= KERN_SUCCESS
) {
1512 printf("shared region: %p: conflict with '%s' "
1513 "at 0x%llx size 0x%llx\n",
1515 filename
? filename
: "<unknown>",
1520 if (object
!= VM_OBJECT_NULL
) {
1521 vm_object_unlock(object
);
1523 vm_map_unlock_read(map
);
1529 * Attempt to map a split library into the shared region. Check if the mappings
1530 * are already in place.
1535 struct shared_file_mapping_np
*mappings
,
1536 memory_object_control_t file_control
,
1537 memory_object_size_t file_size
,
1538 shared_region_task_mappings_t sm_info
,
1539 mach_vm_offset_t base_offset
,
1540 mach_vm_offset_t
*slide_p
)
1542 vm_object_t file_object
;
1543 shared_file_info_t
*shared_file_header
;
1544 load_struct_t
*file_entry
;
1545 loaded_mapping_t
*file_mapping
;
1548 mach_vm_offset_t slide
;
1550 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1552 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1554 mutex_lock(&shared_file_header
->lock
);
1556 /* If this is the first call to this routine, take the opportunity */
1557 /* to initialize the hash table which will be used to look-up */
1558 /* mappings based on the file object */
1560 if(shared_file_header
->hash_init
== FALSE
) {
1561 ret
= shared_file_header_init(shared_file_header
);
1562 if (ret
!= KERN_SUCCESS
) {
1563 SHARED_REGION_TRACE(
1564 SHARED_REGION_TRACE_ERROR
,
1565 ("shared_region: %p: map_shared_file: "
1566 "shared_file_header_init() failed kr=0x%x\n",
1567 current_thread(), ret
));
1568 mutex_unlock(&shared_file_header
->lock
);
1569 return KERN_NO_SPACE
;
1574 /* Find the entry in the map associated with the current mapping */
1575 /* of the file object */
1576 file_object
= memory_object_control_to_vm_object(file_control
);
1578 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1579 (void *) file_object
,
1580 mappings
[0].sfm_file_offset
,
1581 shared_file_header
->hash_size
,
1582 TRUE
, TRUE
, sm_info
);
1584 /* File is loaded, check the load manifest for exact match */
1585 /* we simplify by requiring that the elements be the same */
1586 /* size and in the same order rather than checking for */
1587 /* semantic equivalence. */
1590 file_mapping
= file_entry
->mappings
;
1591 while(file_mapping
!= NULL
) {
1593 SHARED_REGION_TRACE(
1594 SHARED_REGION_TRACE_CONFLICT
,
1595 ("shared_region: %p: map_shared_file: "
1596 "already mapped with "
1597 "more than %d mappings\n",
1598 current_thread(), map_cnt
));
1599 shared_region_dump_file_entry(
1600 SHARED_REGION_TRACE_INFO
,
1602 shared_region_dump_mappings(
1603 SHARED_REGION_TRACE_INFO
,
1604 mappings
, map_cnt
, base_offset
);
1606 mutex_unlock(&shared_file_header
->lock
);
1607 return KERN_INVALID_ARGUMENT
;
1609 if(((mappings
[i
].sfm_address
)
1610 & SHARED_DATA_REGION_MASK
) !=
1611 file_mapping
->mapping_offset
||
1612 mappings
[i
].sfm_size
!= file_mapping
->size
||
1613 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1614 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1615 SHARED_REGION_TRACE(
1616 SHARED_REGION_TRACE_CONFLICT
,
1617 ("shared_region: %p: "
1618 "mapping #%d differs\n",
1619 current_thread(), i
));
1620 shared_region_dump_file_entry(
1621 SHARED_REGION_TRACE_INFO
,
1623 shared_region_dump_mappings(
1624 SHARED_REGION_TRACE_INFO
,
1625 mappings
, map_cnt
, base_offset
);
1629 file_mapping
= file_mapping
->next
;
1633 SHARED_REGION_TRACE(
1634 SHARED_REGION_TRACE_CONFLICT
,
1635 ("shared_region: %p: map_shared_file: "
1636 "already mapped with "
1637 "%d mappings instead of %d\n",
1638 current_thread(), i
, map_cnt
));
1639 shared_region_dump_file_entry(
1640 SHARED_REGION_TRACE_INFO
,
1642 shared_region_dump_mappings(
1643 SHARED_REGION_TRACE_INFO
,
1644 mappings
, map_cnt
, base_offset
);
1646 mutex_unlock(&shared_file_header
->lock
);
1647 return KERN_INVALID_ARGUMENT
;
1650 slide
= file_entry
->base_address
- base_offset
;
1651 if (slide_p
!= NULL
) {
1653 * File already mapped but at different address,
1654 * and the caller is OK with the sliding.
1660 * The caller doesn't want any sliding. The file needs
1661 * to be mapped at the requested address or not mapped.
1665 * The file is already mapped but at a different
1668 * XXX should we attempt to load at
1669 * requested address too ?
1672 SHARED_REGION_TRACE(
1673 SHARED_REGION_TRACE_CONFLICT
,
1674 ("shared_region: %p: "
1675 "map_shared_file: already mapped, "
1676 "would need to slide 0x%llx\n",
1681 * The file is already mapped at the correct
1688 mutex_unlock(&shared_file_header
->lock
);
1691 /* File is not loaded, lets attempt to load it */
1692 ret
= lsf_map(mappings
, map_cnt
,
1693 (void *)file_control
,
1698 if(ret
== KERN_NO_SPACE
) {
1699 shared_region_mapping_t regions
;
1700 shared_region_mapping_t system_region
;
1701 regions
= (shared_region_mapping_t
)sm_info
->self
;
1702 regions
->flags
|= SHARED_REGION_FULL
;
1703 system_region
= lookup_default_shared_region(
1704 regions
->fs_base
, regions
->system
);
1705 if (system_region
== regions
) {
1706 shared_region_mapping_t new_system_shared_region
;
1707 shared_file_boot_time_init(
1708 regions
->fs_base
, regions
->system
);
1709 /* current task must stay with its current */
1710 /* regions, drop count on system_shared_region */
1711 /* and put back our original set */
1712 vm_get_shared_region(current_task(),
1713 &new_system_shared_region
);
1714 shared_region_mapping_dealloc_lock(
1715 new_system_shared_region
, 0, 1);
1716 vm_set_shared_region(current_task(), regions
);
1717 } else if (system_region
!= NULL
) {
1718 shared_region_mapping_dealloc_lock(
1719 system_region
, 0, 1);
1722 mutex_unlock(&shared_file_header
->lock
);
1728 * shared_region_cleanup:
1730 * Deallocates all the mappings in the shared region, except those explicitly
1731 * specified in the "ranges" set of address ranges.
1734 shared_region_cleanup(
1735 unsigned int range_count
,
1736 struct shared_region_range_np
*ranges
,
1737 shared_region_task_mappings_t sm_info
)
1740 ipc_port_t region_handle
;
1741 vm_named_entry_t region_named_entry
;
1742 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1743 unsigned int i_range
;
1744 vm_map_offset_t range_start
, range_end
;
1745 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1746 vm_map_size_t delete_size
;
1748 struct shared_region_range_np tmp_range
;
1749 unsigned int sort_index
, sorted_index
;
1750 vm_map_offset_t sort_min_address
;
1751 unsigned int sort_min_index
;
1754 * Since we want to deallocate the holes between the "ranges",
1755 * sort the array by increasing addresses.
1757 for (sorted_index
= 0;
1758 sorted_index
< range_count
;
1761 /* first remaining entry is our new starting point */
1762 sort_min_index
= sorted_index
;
1763 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1765 /* find the lowest mapping_offset in the remaining entries */
1766 for (sort_index
= sorted_index
+ 1;
1767 sort_index
< range_count
;
1769 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1770 /* lowest address so far... */
1771 sort_min_index
= sort_index
;
1773 ranges
[sort_min_index
].srr_address
;
1777 if (sort_min_index
!= sorted_index
) {
1779 tmp_range
= ranges
[sort_min_index
];
1780 ranges
[sort_min_index
] = ranges
[sorted_index
];
1781 ranges
[sorted_index
] = tmp_range
;
1785 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1786 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1787 text_submap
= region_named_entry
->backing
.map
;
1789 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1790 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1791 data_submap
= region_named_entry
->backing
.map
;
1793 submap
= text_submap
;
1794 next_submap
= submap
;
1795 submap_base
= sm_info
->client_base
;
1797 submap_end
= submap_base
+ sm_info
->text_size
;
1799 i_range
< range_count
;
1802 /* get the next range of addresses to keep */
1803 range_start
= ranges
[i_range
].srr_address
;
1804 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1805 /* align them to page boundaries */
1806 range_start
= vm_map_trunc_page(range_start
);
1807 range_end
= vm_map_round_page(range_end
);
1809 /* make sure we don't go beyond the submap's boundaries */
1810 if (range_start
< submap_base
) {
1811 range_start
= submap_base
;
1812 } else if (range_start
>= submap_end
) {
1813 range_start
= submap_end
;
1815 if (range_end
< submap_base
) {
1816 range_end
= submap_base
;
1817 } else if (range_end
>= submap_end
) {
1818 range_end
= submap_end
;
1821 if (range_start
> submap_base
+ submap_offset
) {
1823 * Deallocate everything between the last offset in the
1824 * submap and the start of this range.
1826 delete_size
= range_start
-
1827 (submap_base
+ submap_offset
);
1828 (void) vm_deallocate(submap
,
1835 /* skip to the end of the range */
1836 submap_offset
+= delete_size
+ (range_end
- range_start
);
1838 if (submap_base
+ submap_offset
>= submap_end
) {
1839 /* get to next submap */
1841 if (submap
== data_submap
) {
1842 /* no other submap after data: done ! */
1846 /* get original range again */
1847 range_start
= ranges
[i_range
].srr_address
;
1848 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1849 range_start
= vm_map_trunc_page(range_start
);
1850 range_end
= vm_map_round_page(range_end
);
1852 if (range_end
> submap_end
) {
1854 * This last range overlaps with the next
1855 * submap. We need to process it again
1856 * after switching submaps. Otherwise, we'll
1857 * just continue with the next range.
1862 if (submap
== text_submap
) {
1864 * Switch to the data submap.
1866 submap
= data_submap
;
1868 submap_base
= sm_info
->client_base
+
1870 submap_end
= submap_base
+ sm_info
->data_size
;
1875 if (submap_base
+ submap_offset
< submap_end
) {
1876 /* delete remainder of this submap, from "offset" to the end */
1877 (void) vm_deallocate(submap
,
1879 submap_end
- submap_base
- submap_offset
);
1880 /* if nothing to keep in data submap, delete it all */
1881 if (submap
== text_submap
) {
1882 submap
= data_submap
;
1884 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1885 submap_end
= submap_base
+ sm_info
->data_size
;
1886 (void) vm_deallocate(data_submap
,
1888 submap_end
- submap_base
);
1896 /* A hash lookup function for the list of loaded files in */
1897 /* shared_memory_server space. */
1899 static load_struct_t
*
1901 queue_head_t
*hash_table
,
1903 vm_offset_t recognizableOffset
,
1906 boolean_t alternate
,
1907 shared_region_task_mappings_t sm_info
)
1909 register queue_t bucket
;
1910 load_struct_t
*entry
;
1911 shared_region_mapping_t target_region
;
1914 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1915 "reg=%d alt=%d sm_info=%p\n",
1916 hash_table
, file_object
, recognizableOffset
, size
,
1917 regular
, alternate
, sm_info
));
1919 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1920 for (entry
= (load_struct_t
*)queue_first(bucket
);
1921 !queue_end(bucket
, &entry
->links
);
1922 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1924 if ((entry
->file_object
== (int)file_object
) &&
1925 (entry
->file_offset
== recognizableOffset
)) {
1926 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1927 depth
= target_region
->depth
;
1928 while(target_region
) {
1929 if((!(sm_info
->self
)) ||
1930 ((target_region
== entry
->regions_instance
) &&
1931 (target_region
->depth
>= entry
->depth
))) {
1933 entry
->base_address
>= sm_info
->alternate_base
) {
1934 LSF_DEBUG(("lsf_hash_lookup: "
1935 "alt=%d found entry %p "
1939 entry
->base_address
,
1940 sm_info
->alternate_base
));
1944 entry
->base_address
< sm_info
->alternate_base
) {
1945 LSF_DEBUG(("lsf_hash_lookup: "
1946 "reg=%d found entry %p "
1950 entry
->base_address
,
1951 sm_info
->alternate_base
));
1955 if(target_region
->object_chain
) {
1956 target_region
= (shared_region_mapping_t
)
1957 target_region
->object_chain
->object_chain_region
;
1958 depth
= target_region
->object_chain
->depth
;
1960 target_region
= NULL
;
1966 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1967 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1968 hash_table
, file_object
, recognizableOffset
, size
,
1969 regular
, alternate
, sm_info
));
1970 return (load_struct_t
*)0;
1973 __private_extern__ load_struct_t
*
1974 lsf_remove_regions_mappings_lock(
1975 shared_region_mapping_t region
,
1976 shared_region_task_mappings_t sm_info
,
1980 register queue_t bucket
;
1981 shared_file_info_t
*shared_file_header
;
1982 load_struct_t
*entry
;
1983 load_struct_t
*next_entry
;
1985 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1987 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1989 region
, sm_info
, shared_file_header
));
1991 mutex_lock(&shared_file_header
->lock
);
1992 if(shared_file_header
->hash_init
== FALSE
) {
1994 mutex_unlock(&shared_file_header
->lock
);
1995 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1996 "(region=%p,sm_info=%p): not inited\n",
2000 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
2001 bucket
= &shared_file_header
->hash
[i
];
2002 for (entry
= (load_struct_t
*)queue_first(bucket
);
2003 !queue_end(bucket
, &entry
->links
);) {
2004 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
2005 if(region
== entry
->regions_instance
) {
2006 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
2007 "entry %p region %p: "
2010 lsf_unload((void *)entry
->file_object
,
2011 entry
->base_address
, sm_info
);
2013 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
2014 "entry %p region %p target region %p: "
2016 entry
, entry
->regions_instance
, region
));
2023 mutex_unlock(&shared_file_header
->lock
);
2024 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
2026 return NULL
; /* XXX */
2030 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
2031 * only caller. Remove this stub function and the corresponding symbol
2032 * export for Merlot.
2035 lsf_remove_regions_mappings(
2036 shared_region_mapping_t region
,
2037 shared_region_task_mappings_t sm_info
)
2039 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
2042 /* Removes a map_list, (list of loaded extents) for a file from */
2043 /* the loaded file hash table. */
2045 static load_struct_t
*
2047 load_struct_t
*target_entry
, /* optional: NULL if not relevant */
2049 vm_offset_t base_offset
,
2050 shared_region_task_mappings_t sm_info
)
2052 register queue_t bucket
;
2053 shared_file_info_t
*shared_file_header
;
2054 load_struct_t
*entry
;
2056 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
2057 target_entry
, file_object
, base_offset
, sm_info
));
2059 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
2061 bucket
= &shared_file_header
->hash
2062 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
2064 for (entry
= (load_struct_t
*)queue_first(bucket
);
2065 !queue_end(bucket
, &entry
->links
);
2066 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
2067 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
2068 sm_info
->self
== entry
->regions_instance
)) {
2069 if ((target_entry
== NULL
||
2070 entry
== target_entry
) &&
2071 (entry
->file_object
== (int) file_object
) &&
2072 (entry
->base_address
== base_offset
)) {
2073 queue_remove(bucket
, entry
,
2074 load_struct_ptr_t
, links
);
2075 LSF_DEBUG(("lsf_hash_delete: found it\n"));
2081 LSF_DEBUG(("lsf_hash_delete; not found\n"));
2082 return (load_struct_t
*)0;
2085 /* Inserts a new map_list, (list of loaded file extents), into the */
2086 /* server loaded file hash table. */
2090 load_struct_t
*entry
,
2091 shared_region_task_mappings_t sm_info
)
2093 shared_file_info_t
*shared_file_header
;
2095 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
2096 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
2098 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
2099 queue_enter(&shared_file_header
->hash
2100 [load_file_hash(entry
->file_object
,
2101 shared_file_header
->hash_size
)],
2102 entry
, load_struct_ptr_t
, links
);
2105 /* Looks up the file type requested. If already loaded and the */
2106 /* file extents are an exact match, returns Success. If not */
2107 /* loaded attempts to load the file extents at the given offsets */
2108 /* if any extent fails to load or if the file was already loaded */
2109 /* in a different configuration, lsf_load fails. */
2111 static kern_return_t
2113 vm_offset_t mapped_file
,
2114 vm_size_t mapped_file_size
,
2115 vm_offset_t
*base_address
,
2116 sf_mapping_t
*mappings
,
2120 shared_region_task_mappings_t sm_info
)
2123 load_struct_t
*entry
;
2124 vm_map_copy_t copy_object
;
2125 loaded_mapping_t
*file_mapping
;
2126 loaded_mapping_t
**tptr
;
2128 ipc_port_t local_map
;
2129 vm_offset_t original_alt_load_next
;
2130 vm_offset_t alternate_load_next
;
2132 LSF_DEBUG(("lsf_load"
2133 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
2135 mapped_file_size
, *base_address
, map_cnt
, file_object
,
2137 entry
= (load_struct_t
*)zalloc(lsf_zone
);
2138 LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry
, map_cnt
));
2139 LSF_DEBUG(("lsf_load"
2140 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
2142 mapped_file_size
, *base_address
, map_cnt
, file_object
,
2143 flags
, sm_info
, entry
));
2144 if (entry
== NULL
) {
2145 printf("lsf_load: unable to allocate memory\n");
2146 return KERN_NO_SPACE
;
2149 shared_file_available_hash_ele
--;
2150 entry
->file_object
= (int)file_object
;
2151 entry
->mapping_cnt
= map_cnt
;
2152 entry
->mappings
= NULL
;
2153 entry
->links
.prev
= (queue_entry_t
) 0;
2154 entry
->links
.next
= (queue_entry_t
) 0;
2155 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2156 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2157 entry
->file_offset
= mappings
[0].file_offset
;
2159 lsf_hash_insert(entry
, sm_info
);
2160 tptr
= &(entry
->mappings
);
2163 alternate_load_next
= sm_info
->alternate_next
;
2164 original_alt_load_next
= alternate_load_next
;
2165 if (flags
& ALTERNATE_LOAD_SITE
) {
2166 vm_offset_t max_loadfile_offset
;
2168 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
2169 sm_info
->alternate_next
;
2170 max_loadfile_offset
= 0;
2171 for(i
= 0; i
<map_cnt
; i
++) {
2172 if(((mappings
[i
].mapping_offset
2173 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
2174 max_loadfile_offset
) {
2175 max_loadfile_offset
=
2176 (mappings
[i
].mapping_offset
2177 & SHARED_TEXT_REGION_MASK
)
2181 if((alternate_load_next
+ round_page(max_loadfile_offset
)) >=
2182 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
2183 entry
->base_address
=
2184 (*base_address
) & SHARED_TEXT_REGION_MASK
;
2185 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2187 return KERN_NO_SPACE
;
2189 alternate_load_next
+= round_page(max_loadfile_offset
);
2192 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
2193 sm_info
->alternate_base
) {
2194 entry
->base_address
=
2195 (*base_address
) & SHARED_TEXT_REGION_MASK
;
2196 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2197 return KERN_INVALID_ARGUMENT
;
2201 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
2203 // Sanity check the mappings -- make sure we don't stray across the
2204 // alternate boundary. If any bit of a library that we're not trying
2205 // to load in the alternate load space strays across that boundary,
2206 // return KERN_INVALID_ARGUMENT immediately so that the caller can
2207 // try to load it in the alternate shared area. We do this to avoid
2208 // a nasty case: if a library tries to load so that it crosses the
2209 // boundary, it'll occupy a bit of the alternate load area without
2210 // the kernel being aware. When loads into the alternate load area
2211 // at the first free address are tried, the load will fail.
2212 // Thus, a single library straddling the boundary causes all sliding
2213 // libraries to fail to load. This check will avoid such a case.
2215 if (!(flags
& ALTERNATE_LOAD_SITE
)) {
2216 for (i
= 0; i
<map_cnt
;i
++) {
2217 vm_offset_t region_mask
;
2218 vm_address_t region_start
;
2219 vm_address_t region_end
;
2221 if ((mappings
[i
].protection
& VM_PROT_WRITE
) == 0) {
2222 // mapping offsets are relative to start of shared segments.
2223 region_mask
= SHARED_TEXT_REGION_MASK
;
2224 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
2225 region_end
= (mappings
[i
].size
+ region_start
);
2226 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
2227 // No library is permitted to load so any bit of it is in the
2228 // shared alternate space. If they want it loaded, they can put
2229 // it in the alternate space explicitly.
2230 printf("Library trying to load across alternate shared region boundary -- denied!\n");
2231 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2232 return KERN_INVALID_ARGUMENT
;
2236 region_mask
= SHARED_DATA_REGION_MASK
;
2237 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
2238 region_end
= (mappings
[i
].size
+ region_start
);
2239 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
2240 printf("Library trying to load across alternate shared region boundary-- denied!\n");
2241 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2242 return KERN_INVALID_ARGUMENT
;
2246 } // if not alternate load site.
2248 /* copyin mapped file data */
2249 for(i
= 0; i
<map_cnt
; i
++) {
2250 vm_offset_t target_address
;
2251 vm_offset_t region_mask
;
2253 if(mappings
[i
].protection
& VM_PROT_COW
) {
2254 local_map
= (ipc_port_t
)sm_info
->data_region
;
2255 region_mask
= SHARED_DATA_REGION_MASK
;
2256 if((mappings
[i
].mapping_offset
2257 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
2258 lsf_unload(file_object
,
2259 entry
->base_address
, sm_info
);
2260 return KERN_INVALID_ARGUMENT
;
2263 region_mask
= SHARED_TEXT_REGION_MASK
;
2264 local_map
= (ipc_port_t
)sm_info
->text_region
;
2265 if(mappings
[i
].mapping_offset
2266 & GLOBAL_SHARED_SEGMENT_MASK
) {
2267 lsf_unload(file_object
,
2268 entry
->base_address
, sm_info
);
2269 return KERN_INVALID_ARGUMENT
;
2272 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
2273 && ((mapped_file
+ mappings
[i
].file_offset
+
2275 (mapped_file
+ mapped_file_size
))) {
2276 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2277 return KERN_INVALID_ARGUMENT
;
2279 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
2280 + entry
->base_address
;
2281 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2282 ->backing
.map
, &target_address
,
2283 mappings
[i
].size
, VM_FLAGS_FIXED
)) {
2284 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2285 return KERN_FAILURE
;
2287 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
2288 + entry
->base_address
;
2289 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
2290 if(vm_map_copyin(current_map(),
2291 (vm_map_address_t
)(mapped_file
+ mappings
[i
].file_offset
),
2292 vm_map_round_page(mappings
[i
].size
), FALSE
, ©_object
)) {
2293 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2294 ->backing
.map
, target_address
, mappings
[i
].size
);
2295 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2296 return KERN_FAILURE
;
2298 if(vm_map_copy_overwrite(((vm_named_entry_t
)
2299 local_map
->ip_kobject
)->backing
.map
,
2300 (vm_map_address_t
)target_address
,
2301 copy_object
, FALSE
)) {
2302 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2303 ->backing
.map
, target_address
, mappings
[i
].size
);
2304 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2305 return KERN_FAILURE
;
2309 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2310 if (file_mapping
== NULL
) {
2311 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2312 printf("lsf_load: unable to allocate memory\n");
2313 return KERN_NO_SPACE
;
2315 shared_file_available_hash_ele
--;
2316 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
2318 file_mapping
->size
= mappings
[i
].size
;
2319 file_mapping
->file_offset
= mappings
[i
].file_offset
;
2320 file_mapping
->protection
= mappings
[i
].protection
;
2321 file_mapping
->next
= NULL
;
2322 LSF_DEBUG(("lsf_load: file_mapping %p "
2323 "for offset=0x%x size=0x%x\n",
2324 file_mapping
, file_mapping
->mapping_offset
,
2325 file_mapping
->size
));
2327 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
2328 ->backing
.map
, target_address
,
2329 round_page(target_address
+ mappings
[i
].size
),
2330 (mappings
[i
].protection
&
2331 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
2333 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
2334 ->backing
.map
, target_address
,
2335 round_page(target_address
+ mappings
[i
].size
),
2336 (mappings
[i
].protection
&
2337 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
2340 *tptr
= file_mapping
;
2341 tptr
= &(file_mapping
->next
);
2343 shared_region_mapping_set_alt_next(
2344 (shared_region_mapping_t
) sm_info
->self
,
2345 alternate_load_next
);
2346 LSF_DEBUG(("lsf_load: done\n"));
2347 return KERN_SUCCESS
;
2354 * Look in the shared region, starting from the end, for a place to fit all the
2355 * mappings while respecting their relative offsets.
2357 static kern_return_t
2359 unsigned int map_cnt
,
2360 struct shared_file_mapping_np
*mappings_in
,
2361 shared_region_task_mappings_t sm_info
,
2362 mach_vm_offset_t
*base_offset_p
)
2364 mach_vm_offset_t max_mapping_offset
;
2366 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
2367 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
2368 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
2369 mach_vm_offset_t base_offset
;
2370 mach_vm_size_t mapping_size
;
2371 mach_vm_offset_t wiggle_room
, wiggle
;
2372 vm_map_t text_map
, data_map
, map
;
2373 vm_named_entry_t region_entry
;
2374 ipc_port_t region_handle
;
2377 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
2378 unsigned int sort_index
, sorted_index
;
2379 vm_map_offset_t sort_min_address
;
2380 unsigned int sort_min_index
;
2383 * Sort the mappings array, so that we can try and fit them in
2384 * in the right order as we progress along the VM maps.
2386 * We can't modify the original array (the original order is
2387 * important when doing lookups of the mappings), so copy it first.
2390 kr
= kmem_alloc(kernel_map
,
2391 (vm_offset_t
*) &mappings
,
2392 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
2393 if (kr
!= KERN_SUCCESS
) {
2394 return KERN_NO_SPACE
;
2397 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
2399 max_mapping_offset
= 0;
2400 for (sorted_index
= 0;
2401 sorted_index
< map_cnt
;
2404 /* first remaining entry is our new starting point */
2405 sort_min_index
= sorted_index
;
2406 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
2407 SHARED_TEXT_REGION_MASK
) +
2408 mappings
[sort_min_index
].sfm_size
);
2409 sort_min_address
= mapping_end_offset
;
2410 /* compute the highest mapping_offset as well... */
2411 if (mapping_end_offset
> max_mapping_offset
) {
2412 max_mapping_offset
= mapping_end_offset
;
2414 /* find the lowest mapping_offset in the remaining entries */
2415 for (sort_index
= sorted_index
+ 1;
2416 sort_index
< map_cnt
;
2419 mapping_end_offset
=
2420 ((mappings
[sort_index
].sfm_address
&
2421 SHARED_TEXT_REGION_MASK
) +
2422 mappings
[sort_index
].sfm_size
);
2424 if (mapping_end_offset
< sort_min_address
) {
2425 /* lowest mapping_offset so far... */
2426 sort_min_index
= sort_index
;
2427 sort_min_address
= mapping_end_offset
;
2430 if (sort_min_index
!= sorted_index
) {
2432 tmp_mapping
= mappings
[sort_min_index
];
2433 mappings
[sort_min_index
] = mappings
[sorted_index
];
2434 mappings
[sorted_index
] = tmp_mapping
;
2439 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
2441 /* start from the end of the shared area */
2442 base_offset
= sm_info
->text_size
;
2444 /* can all the mappings fit ? */
2445 if (max_mapping_offset
> base_offset
) {
2446 kmem_free(kernel_map
,
2447 (vm_offset_t
) mappings
,
2448 map_cnt
* sizeof (mappings
[0]));
2449 return KERN_FAILURE
;
2453 * Align the last mapping to the end of the submaps
2454 * and start from there.
2456 base_offset
-= max_mapping_offset
;
2458 region_handle
= (ipc_port_t
) sm_info
->text_region
;
2459 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2460 text_map
= region_entry
->backing
.map
;
2462 region_handle
= (ipc_port_t
) sm_info
->data_region
;
2463 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2464 data_map
= region_entry
->backing
.map
;
2466 vm_map_lock_read(text_map
);
2467 vm_map_lock_read(data_map
);
2471 * At first, we can wiggle all the way from our starting point
2472 * (base_offset) towards the start of the map (0), if needed.
2474 wiggle_room
= base_offset
;
2476 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
2477 if (mappings
[i
].sfm_size
== 0) {
2478 /* nothing to map here... */
2481 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2482 /* copy-on-write mappings are in the data submap */
2485 /* other mappings are in the text submap */
2488 /* get the offset within the appropriate submap */
2489 mapping_offset
= (mappings
[i
].sfm_address
&
2490 SHARED_TEXT_REGION_MASK
);
2491 mapping_size
= mappings
[i
].sfm_size
;
2492 mapping_end_offset
= mapping_offset
+ mapping_size
;
2493 mapping_offset
= vm_map_trunc_page(mapping_offset
);
2494 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
2495 mapping_size
= mapping_end_offset
- mapping_offset
;
2498 if (vm_map_lookup_entry(map
,
2499 base_offset
+ mapping_offset
,
2502 * The start address for that mapping
2503 * is already mapped: no fit.
2504 * Locate the hole immediately before this map
2507 prev_hole_end
= map_entry
->vme_start
;
2508 prev_entry
= map_entry
->vme_prev
;
2509 if (prev_entry
== vm_map_to_entry(map
)) {
2510 /* no previous entry */
2511 prev_hole_start
= map
->min_offset
;
2513 /* previous entry ends here */
2514 prev_hole_start
= prev_entry
->vme_end
;
2518 * The start address for that mapping is not
2520 * Locate the start and end of the hole
2523 /* map_entry is the previous entry */
2524 if (map_entry
== vm_map_to_entry(map
)) {
2525 /* no previous entry */
2526 prev_hole_start
= map
->min_offset
;
2528 /* previous entry ends there */
2529 prev_hole_start
= map_entry
->vme_end
;
2531 next_entry
= map_entry
->vme_next
;
2532 if (next_entry
== vm_map_to_entry(map
)) {
2534 prev_hole_end
= map
->max_offset
;
2536 prev_hole_end
= next_entry
->vme_start
;
2540 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
2541 /* hole is to our left: try and wiggle to fit */
2542 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
2543 if (wiggle
> base_offset
) {
2544 /* we're getting out of the map */
2548 base_offset
-= wiggle
;
2549 if (wiggle
> wiggle_room
) {
2550 /* can't wiggle that much: start over */
2553 /* account for the wiggling done */
2554 wiggle_room
-= wiggle
;
2558 base_offset
+ mapping_offset
+ mapping_size
) {
2560 * The hole extends further to the right
2561 * than what we need. Ignore the extra space.
2563 prev_hole_end
= (base_offset
+ mapping_offset
+
2568 base_offset
+ mapping_offset
+ mapping_size
) {
2570 * The hole is not big enough to establish
2571 * the mapping right there: wiggle towards
2572 * the beginning of the hole so that the end
2573 * of our mapping fits in the hole...
2575 wiggle
= base_offset
+ mapping_offset
2576 + mapping_size
- prev_hole_end
;
2577 if (wiggle
> base_offset
) {
2578 /* we're getting out of the map */
2582 base_offset
-= wiggle
;
2583 if (wiggle
> wiggle_room
) {
2584 /* can't wiggle that much: start over */
2587 /* account for the wiggling done */
2588 wiggle_room
-= wiggle
;
2590 /* keep searching from this new base */
2594 if (prev_hole_start
> base_offset
+ mapping_offset
) {
2595 /* no hole found: keep looking */
2599 /* compute wiggling room at this hole */
2600 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
2601 if (wiggle
< wiggle_room
) {
2602 /* less wiggle room than before... */
2603 wiggle_room
= wiggle
;
2606 /* found a hole that fits: skip to next mapping */
2608 } /* while we look for a hole */
2609 } /* for each mapping */
2611 *base_offset_p
= base_offset
;
2615 vm_map_unlock_read(text_map
);
2616 vm_map_unlock_read(data_map
);
2618 kmem_free(kernel_map
,
2619 (vm_offset_t
) mappings
,
2620 map_cnt
* sizeof (mappings
[0]));
2628 * Attempt to establish the mappings for a split library into the shared region.
2630 static kern_return_t
2632 struct shared_file_mapping_np
*mappings
,
2635 memory_object_offset_t file_size
,
2636 shared_region_task_mappings_t sm_info
,
2637 mach_vm_offset_t base_offset
,
2638 mach_vm_offset_t
*slide_p
)
2640 load_struct_t
*entry
;
2641 loaded_mapping_t
*file_mapping
;
2642 loaded_mapping_t
**tptr
;
2643 ipc_port_t region_handle
;
2644 vm_named_entry_t region_entry
;
2645 mach_port_t map_port
;
2646 vm_object_t file_object
;
2649 mach_vm_offset_t original_base_offset
;
2650 mach_vm_size_t total_size
;
2652 /* get the VM object from the file's memory object handle */
2653 file_object
= memory_object_control_to_vm_object(file_control
);
2655 original_base_offset
= base_offset
;
2657 LSF_DEBUG(("lsf_map"
2658 "(cnt=%d,file=%p,sm_info=%p)"
2660 map_cnt
, file_object
,
2663 restart_after_slide
:
2664 /* get a new "load_struct_t" to described the mappings for that file */
2665 entry
= (load_struct_t
*)zalloc(lsf_zone
);
2666 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
2667 LSF_DEBUG(("lsf_map"
2668 "(cnt=%d,file=%p,sm_info=%p) "
2670 map_cnt
, file_object
,
2672 if (entry
== NULL
) {
2673 SHARED_REGION_TRACE(
2674 SHARED_REGION_TRACE_ERROR
,
2675 ("shared_region: %p: "
2676 "lsf_map: unable to allocate entry\n",
2678 return KERN_NO_SPACE
;
2680 shared_file_available_hash_ele
--;
2681 entry
->file_object
= (int)file_object
;
2682 entry
->mapping_cnt
= map_cnt
;
2683 entry
->mappings
= NULL
;
2684 entry
->links
.prev
= (queue_entry_t
) 0;
2685 entry
->links
.next
= (queue_entry_t
) 0;
2686 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2687 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2688 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2690 /* insert the new file entry in the hash table, for later lookups */
2691 lsf_hash_insert(entry
, sm_info
);
2693 /* where we should add the next mapping description for that file */
2694 tptr
= &(entry
->mappings
);
2696 entry
->base_address
= base_offset
;
2699 /* establish each requested mapping */
2700 for (i
= 0; i
< map_cnt
; i
++) {
2701 mach_vm_offset_t target_address
;
2702 mach_vm_offset_t region_mask
;
2704 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2705 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2706 region_mask
= SHARED_DATA_REGION_MASK
;
2707 if ((((mappings
[i
].sfm_address
+ base_offset
)
2708 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2709 (((mappings
[i
].sfm_address
+ base_offset
+
2710 mappings
[i
].sfm_size
- 1)
2711 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2712 SHARED_REGION_TRACE(
2713 SHARED_REGION_TRACE_ERROR
,
2714 ("shared_region: %p: lsf_map: "
2715 "RW mapping #%d not in segment",
2716 current_thread(), i
));
2717 shared_region_dump_mappings(
2718 SHARED_REGION_TRACE_ERROR
,
2719 mappings
, map_cnt
, base_offset
);
2721 lsf_deallocate(entry
,
2723 entry
->base_address
,
2726 return KERN_INVALID_ARGUMENT
;
2729 region_mask
= SHARED_TEXT_REGION_MASK
;
2730 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2731 if (((mappings
[i
].sfm_address
+ base_offset
)
2732 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2733 ((mappings
[i
].sfm_address
+ base_offset
+
2734 mappings
[i
].sfm_size
- 1)
2735 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2736 SHARED_REGION_TRACE(
2737 SHARED_REGION_TRACE_ERROR
,
2738 ("shared_region: %p: lsf_map: "
2739 "RO mapping #%d not in segment",
2740 current_thread(), i
));
2741 shared_region_dump_mappings(
2742 SHARED_REGION_TRACE_ERROR
,
2743 mappings
, map_cnt
, base_offset
);
2745 lsf_deallocate(entry
,
2747 entry
->base_address
,
2750 return KERN_INVALID_ARGUMENT
;
2753 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2754 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2756 SHARED_REGION_TRACE(
2757 SHARED_REGION_TRACE_ERROR
,
2758 ("shared_region: %p: lsf_map: "
2759 "ZF mapping #%d beyond EOF",
2760 current_thread(), i
));
2761 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR
,
2766 lsf_deallocate(entry
,
2768 entry
->base_address
,
2771 return KERN_INVALID_ARGUMENT
;
2773 target_address
= entry
->base_address
+
2774 ((mappings
[i
].sfm_address
) & region_mask
);
2775 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2776 map_port
= MACH_PORT_NULL
;
2778 map_port
= (ipc_port_t
) file_object
->pager
;
2780 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2782 total_size
+= mappings
[i
].sfm_size
;
2783 if (mappings
[i
].sfm_size
== 0) {
2784 /* nothing to map... */
2788 region_entry
->backing
.map
,
2790 vm_map_round_page(mappings
[i
].sfm_size
),
2794 mappings
[i
].sfm_file_offset
,
2796 (mappings
[i
].sfm_init_prot
&
2797 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2798 (mappings
[i
].sfm_max_prot
&
2799 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2800 VM_INHERIT_DEFAULT
);
2802 if (kr
!= KERN_SUCCESS
) {
2803 vm_offset_t old_base_address
;
2805 old_base_address
= entry
->base_address
;
2806 lsf_deallocate(entry
,
2808 entry
->base_address
,
2813 if (slide_p
!= NULL
) {
2815 * Requested mapping failed but the caller
2816 * is OK with sliding the library in the
2817 * shared region, so let's try and slide it...
2820 SHARED_REGION_TRACE(
2821 SHARED_REGION_TRACE_CONFLICT
,
2822 ("shared_region: %p: lsf_map: "
2823 "mapping #%d failed to map, "
2824 "kr=0x%x, sliding...\n",
2825 current_thread(), i
, kr
));
2826 shared_region_dump_mappings(
2827 SHARED_REGION_TRACE_INFO
,
2828 mappings
, map_cnt
, base_offset
);
2829 shared_region_dump_conflict_info(
2830 SHARED_REGION_TRACE_CONFLICT
,
2831 region_entry
->backing
.map
,
2833 ((mappings
[i
].sfm_address
)
2835 vm_map_round_page(mappings
[i
].sfm_size
));
2837 /* lookup an appropriate spot */
2838 kr
= lsf_slide(map_cnt
, mappings
,
2839 sm_info
, &base_offset
);
2840 if (kr
== KERN_SUCCESS
) {
2841 /* try and map it there ... */
2842 goto restart_after_slide
;
2844 /* couldn't slide ... */
2847 SHARED_REGION_TRACE(
2848 SHARED_REGION_TRACE_CONFLICT
,
2849 ("shared_region: %p: lsf_map: "
2850 "mapping #%d failed to map, "
2851 "kr=0x%x, no sliding\n",
2852 current_thread(), i
, kr
));
2853 shared_region_dump_mappings(
2854 SHARED_REGION_TRACE_INFO
,
2855 mappings
, map_cnt
, base_offset
);
2856 shared_region_dump_conflict_info(
2857 SHARED_REGION_TRACE_CONFLICT
,
2858 region_entry
->backing
.map
,
2860 ((mappings
[i
].sfm_address
)
2862 vm_map_round_page(mappings
[i
].sfm_size
));
2863 return KERN_FAILURE
;
2866 /* record this mapping */
2867 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2868 if (file_mapping
== NULL
) {
2869 lsf_deallocate(entry
,
2871 entry
->base_address
,
2874 SHARED_REGION_TRACE(
2875 SHARED_REGION_TRACE_ERROR
,
2876 ("shared_region: %p: "
2877 "lsf_map: unable to allocate mapping\n",
2879 return KERN_NO_SPACE
;
2881 shared_file_available_hash_ele
--;
2882 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2884 file_mapping
->size
= mappings
[i
].sfm_size
;
2885 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2886 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2887 file_mapping
->next
= NULL
;
2888 LSF_DEBUG(("lsf_map: file_mapping %p "
2889 "for offset=0x%x size=0x%x\n",
2890 file_mapping
, file_mapping
->mapping_offset
,
2891 file_mapping
->size
));
2893 /* and link it to the file entry */
2894 *tptr
= file_mapping
;
2896 /* where to put the next mapping's description */
2897 tptr
= &(file_mapping
->next
);
2900 if (slide_p
!= NULL
) {
2901 *slide_p
= base_offset
- original_base_offset
;
2904 if ((sm_info
->flags
& SHARED_REGION_STANDALONE
) ||
2905 (total_size
== 0)) {
2908 * 1. we have a standalone and private shared region, so we
2909 * don't really need to keep the information about each file
2910 * and each mapping. Just deallocate it all.
2911 * 2. the total size of the mappings is 0, so nothing at all
2912 * was mapped. Let's not waste kernel resources to describe
2915 * XXX we still have the hash table, though...
2917 lsf_deallocate(entry
, file_object
, entry
->base_address
, sm_info
,
2921 LSF_DEBUG(("lsf_map: done\n"));
2922 return KERN_SUCCESS
;
2926 /* finds the file_object extent list in the shared memory hash table */
2927 /* If one is found the associated extents in shared memory are deallocated */
2928 /* and the extent list is freed */
2933 vm_offset_t base_offset
,
2934 shared_region_task_mappings_t sm_info
)
2936 lsf_deallocate(NULL
, file_object
, base_offset
, sm_info
, TRUE
);
2942 * Deallocates all the "shared region" internal data structures describing
2943 * the file and its mappings.
2944 * Also deallocate the actual file mappings if requested ("unload" arg).
2948 load_struct_t
*target_entry
,
2950 vm_offset_t base_offset
,
2951 shared_region_task_mappings_t sm_info
,
2954 load_struct_t
*entry
;
2955 loaded_mapping_t
*map_ele
;
2956 loaded_mapping_t
*back_ptr
;
2959 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2960 target_entry
, file_object
, base_offset
, sm_info
, unload
));
2961 entry
= lsf_hash_delete(target_entry
,
2966 map_ele
= entry
->mappings
;
2967 while(map_ele
!= NULL
) {
2969 ipc_port_t region_handle
;
2970 vm_named_entry_t region_entry
;
2972 if(map_ele
->protection
& VM_PROT_COW
) {
2973 region_handle
= (ipc_port_t
)
2974 sm_info
->data_region
;
2976 region_handle
= (ipc_port_t
)
2977 sm_info
->text_region
;
2979 region_entry
= (vm_named_entry_t
)
2980 region_handle
->ip_kobject
;
2982 kr
= vm_deallocate(region_entry
->backing
.map
,
2983 (entry
->base_address
+
2984 map_ele
->mapping_offset
),
2986 assert(kr
== KERN_SUCCESS
);
2989 map_ele
= map_ele
->next
;
2990 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2991 "offset 0x%x size 0x%x\n",
2992 back_ptr
, back_ptr
->mapping_offset
,
2994 zfree(lsf_zone
, back_ptr
);
2995 shared_file_available_hash_ele
++;
2997 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2998 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2999 zfree(lsf_zone
, entry
);
3000 shared_file_available_hash_ele
++;
3002 LSF_DEBUG(("lsf_deallocate: done\n"));
3005 /* integer is from 1 to 100 and represents percent full */
3007 lsf_mapping_pool_gauge(void)
3009 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;