]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_memory_server.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
CommitLineData
1c79356b 1/*
5d5c5d0d 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b
A
29 */
30/*
31 *
32 * File: vm/vm_shared_memory_server.c
33 * Author: Chris Youngworth
34 *
35 * Support routines for an in-kernel shared memory allocator
36 */
37
91447636
A
38#include <debug.h>
39
40#include <mach/mach_types.h>
1c79356b
A
41#include <mach/kern_return.h>
42#include <mach/vm_inherit.h>
91447636 43#include <mach/vm_map.h>
55e303ae 44#include <machine/cpu_capabilities.h>
91447636
A
45
46#include <kern/kern_types.h>
47#include <kern/ipc_kobject.h>
48#include <kern/thread.h>
49#include <kern/zalloc.h>
50#include <kern/kalloc.h>
51
52#include <ipc/ipc_types.h>
53#include <ipc/ipc_port.h>
54
1c79356b
A
55#include <vm/vm_kern.h>
56#include <vm/vm_map.h>
57#include <vm/vm_page.h>
5d5c5d0d 58#include <vm/vm_protos.h>
1c79356b 59
91447636 60#include <mach/mach_vm.h>
9bccf70c
A
61#include <mach/shared_memory_server.h>
62#include <vm/vm_shared_memory_server.h>
63
5d5c5d0d
A
64int shared_region_trace_level = SHARED_REGION_TRACE_ERROR;
65
91447636
A
66#if DEBUG
67int lsf_debug = 0;
68int lsf_alloc_debug = 0;
69#define LSF_DEBUG(args) \
70 MACRO_BEGIN \
71 if (lsf_debug) { \
72 kprintf args; \
73 } \
74 MACRO_END
75#define LSF_ALLOC_DEBUG(args) \
76 MACRO_BEGIN \
77 if (lsf_alloc_debug) { \
78 kprintf args; \
79 } \
80 MACRO_END
81#else /* DEBUG */
82#define LSF_DEBUG(args)
83#define LSF_ALLOC_DEBUG(args)
84#endif /* DEBUG */
85
9bccf70c 86/* forward declarations */
91447636
A
87static kern_return_t
88shared_region_object_create(
89 vm_size_t size,
90 ipc_port_t *object_handle);
91
92static kern_return_t
93shared_region_mapping_dealloc_lock(
94 shared_region_mapping_t shared_region,
95 int need_sfh_lock,
96 int need_drl_lock);
97
98
9bccf70c
A
99static kern_return_t
100shared_file_init(
91447636 101 ipc_port_t *text_region_handle,
9bccf70c 102 vm_size_t text_region_size,
91447636 103 ipc_port_t *data_region_handle,
9bccf70c 104 vm_size_t data_region_size,
91447636
A
105 vm_offset_t *file_mapping_array);
106
107static kern_return_t
108shared_file_header_init(
109 shared_file_info_t *shared_file_header);
9bccf70c
A
110
111static load_struct_t *
112lsf_hash_lookup(
113 queue_head_t *hash_table,
114 void *file_object,
55e303ae 115 vm_offset_t recognizableOffset,
9bccf70c 116 int size,
91447636 117 boolean_t regular,
9bccf70c
A
118 boolean_t alternate,
119 shared_region_task_mappings_t sm_info);
120
121static load_struct_t *
122lsf_hash_delete(
5d5c5d0d 123 load_struct_t *target_entry, /* optional */
9bccf70c
A
124 void *file_object,
125 vm_offset_t base_offset,
126 shared_region_task_mappings_t sm_info);
127
128static void
129lsf_hash_insert(
130 load_struct_t *entry,
131 shared_region_task_mappings_t sm_info);
132
91447636
A
133static kern_return_t
134lsf_slide(
135 unsigned int map_cnt,
136 struct shared_file_mapping_np *mappings,
137 shared_region_task_mappings_t sm_info,
138 mach_vm_offset_t *base_offset_p);
139
140static kern_return_t
141lsf_map(
142 struct shared_file_mapping_np *mappings,
143 int map_cnt,
144 void *file_control,
145 memory_object_size_t file_size,
146 shared_region_task_mappings_t sm_info,
147 mach_vm_offset_t base_offset,
148 mach_vm_offset_t *slide_p);
149
9bccf70c
A
150static void
151lsf_unload(
152 void *file_object,
153 vm_offset_t base_offset,
154 shared_region_task_mappings_t sm_info);
155
91447636
A
156static void
157lsf_deallocate(
5d5c5d0d 158 load_struct_t *target_entry, /* optional */
91447636
A
159 void *file_object,
160 vm_offset_t base_offset,
161 shared_region_task_mappings_t sm_info,
162 boolean_t unload);
163
9bccf70c
A
164
165#define load_file_hash(file_object, size) \
166 ((((natural_t)file_object) & 0xffffff) % size)
1c79356b 167
9bccf70c 168/* Implementation */
1c79356b 169vm_offset_t shared_file_mapping_array = 0;
55e303ae
A
170
171shared_region_mapping_t default_environment_shared_regions = NULL;
172static decl_mutex_data(,default_regions_list_lock_data)
173
174#define default_regions_list_lock() \
175 mutex_lock(&default_regions_list_lock_data)
176#define default_regions_list_lock_try() \
177 mutex_try(&default_regions_list_lock_data)
178#define default_regions_list_unlock() \
179 mutex_unlock(&default_regions_list_lock_data)
180
1c79356b
A
181
182ipc_port_t sfma_handle = NULL;
183zone_t lsf_zone;
184
185int shared_file_available_hash_ele;
186
55e303ae 187/* com region support */
91447636
A
188ipc_port_t com_region_handle32 = NULL;
189ipc_port_t com_region_handle64 = NULL;
190vm_map_t com_region_map32 = NULL;
191vm_map_t com_region_map64 = NULL;
5d5c5d0d
A
192vm_size_t com_region_size32 = _COMM_PAGE32_AREA_LENGTH;
193vm_size_t com_region_size64 = _COMM_PAGE64_AREA_LENGTH;
55e303ae
A
194shared_region_mapping_t com_mapping_resource = NULL;
195
91447636
A
196
197#if DEBUG
198int shared_region_debug = 0;
199#endif /* DEBUG */
200
201
202kern_return_t
203vm_get_shared_region(
204 task_t task,
205 shared_region_mapping_t *shared_region)
206{
207 *shared_region = (shared_region_mapping_t) task->system_shared_region;
208 if (*shared_region) {
209 assert((*shared_region)->ref_count > 0);
210 }
211 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
212 task, *shared_region));
213 return KERN_SUCCESS;
214}
215
216kern_return_t
217vm_set_shared_region(
218 task_t task,
219 shared_region_mapping_t shared_region)
220{
5d5c5d0d
A
221 shared_region_mapping_t old_region;
222
91447636 223 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
5d5c5d0d
A
224 "shared_region=%p[%x,%x,%x])\n",
225 task, shared_region,
226 shared_region ? shared_region->fs_base : 0,
227 shared_region ? shared_region->system : 0,
228 shared_region ? shared_region->flags : 0));
91447636
A
229 if (shared_region) {
230 assert(shared_region->ref_count > 0);
231 }
5d5c5d0d
A
232
233 old_region = task->system_shared_region;
234 SHARED_REGION_TRACE(
235 SHARED_REGION_TRACE_INFO,
236 ("shared_region: %p set_region(task=%p)"
237 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
238 current_thread(), task,
239 old_region,
240 old_region ? old_region->fs_base : 0,
241 old_region ? old_region->system : 0,
242 old_region ? old_region->flags : 0,
243 shared_region,
244 shared_region ? shared_region->fs_base : 0,
245 shared_region ? shared_region->system : 0,
246 shared_region ? shared_region->flags : 0));
247
91447636
A
248 task->system_shared_region = shared_region;
249 return KERN_SUCCESS;
250}
251
252/*
253 * shared_region_object_chain_detach:
254 *
255 * Mark the shared region as being detached or standalone. This means
256 * that we won't keep track of which file is mapped and how, for this shared
257 * region. And we don't have a "shadow" shared region.
258 * This is used when we clone a private shared region and we intend to remove
259 * some mappings from it. It won't need to maintain mappings info because it's
260 * now private. It can't have a "shadow" shared region because we don't want
261 * to see the shadow of the mappings we're about to remove.
262 */
263void
264shared_region_object_chain_detached(
265 shared_region_mapping_t target_region)
266{
267 shared_region_mapping_lock(target_region);
268 target_region->flags |= SHARED_REGION_STANDALONE;
269 shared_region_mapping_unlock(target_region);
270}
271
272/*
273 * shared_region_object_chain_attach:
274 *
275 * Link "target_region" to "object_chain_region". "object_chain_region"
276 * is treated as a shadow of "target_region" for the purpose of looking up
277 * mappings. Since the "target_region" preserves all the mappings of the
278 * older "object_chain_region", we won't duplicate all the mappings info and
279 * we'll just lookup the next region in the "object_chain" if we can't find
280 * what we're looking for in the "target_region". See lsf_hash_lookup().
281 */
282kern_return_t
283shared_region_object_chain_attach(
284 shared_region_mapping_t target_region,
285 shared_region_mapping_t object_chain_region)
286{
287 shared_region_object_chain_t object_ele;
288
289 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
290 "target_region=%p, object_chain_region=%p\n",
291 target_region, object_chain_region));
292 assert(target_region->ref_count > 0);
293 assert(object_chain_region->ref_count > 0);
294 if(target_region->object_chain)
295 return KERN_FAILURE;
296 object_ele = (shared_region_object_chain_t)
297 kalloc(sizeof (struct shared_region_object_chain));
298 shared_region_mapping_lock(object_chain_region);
299 target_region->object_chain = object_ele;
300 object_ele->object_chain_region = object_chain_region;
301 object_ele->next = object_chain_region->object_chain;
302 object_ele->depth = object_chain_region->depth;
303 object_chain_region->depth++;
304 target_region->alternate_next = object_chain_region->alternate_next;
305 shared_region_mapping_unlock(object_chain_region);
306 return KERN_SUCCESS;
307}
308
309/* LP64todo - need 64-bit safe version */
310kern_return_t
311shared_region_mapping_create(
312 ipc_port_t text_region,
313 vm_size_t text_size,
314 ipc_port_t data_region,
315 vm_size_t data_size,
316 vm_offset_t region_mappings,
317 vm_offset_t client_base,
318 shared_region_mapping_t *shared_region,
319 vm_offset_t alt_base,
5d5c5d0d
A
320 vm_offset_t alt_next,
321 int fs_base,
322 int system)
91447636
A
323{
324 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
325 *shared_region = (shared_region_mapping_t)
326 kalloc(sizeof (struct shared_region_mapping));
327 if(*shared_region == NULL) {
328 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
329 "failure\n"));
330 return KERN_FAILURE;
331 }
332 shared_region_mapping_lock_init((*shared_region));
333 (*shared_region)->text_region = text_region;
334 (*shared_region)->text_size = text_size;
5d5c5d0d
A
335 (*shared_region)->fs_base = fs_base;
336 (*shared_region)->system = system;
91447636
A
337 (*shared_region)->data_region = data_region;
338 (*shared_region)->data_size = data_size;
339 (*shared_region)->region_mappings = region_mappings;
340 (*shared_region)->client_base = client_base;
341 (*shared_region)->ref_count = 1;
342 (*shared_region)->next = NULL;
343 (*shared_region)->object_chain = NULL;
344 (*shared_region)->self = *shared_region;
345 (*shared_region)->flags = 0;
346 (*shared_region)->depth = 0;
347 (*shared_region)->default_env_list = NULL;
348 (*shared_region)->alternate_base = alt_base;
349 (*shared_region)->alternate_next = alt_next;
350 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
351 *shared_region));
352 return KERN_SUCCESS;
353}
354
355/* LP64todo - need 64-bit safe version */
356kern_return_t
357shared_region_mapping_info(
358 shared_region_mapping_t shared_region,
359 ipc_port_t *text_region,
360 vm_size_t *text_size,
361 ipc_port_t *data_region,
362 vm_size_t *data_size,
363 vm_offset_t *region_mappings,
364 vm_offset_t *client_base,
365 vm_offset_t *alt_base,
366 vm_offset_t *alt_next,
367 unsigned int *fs_base,
368 unsigned int *system,
369 int *flags,
370 shared_region_mapping_t *next)
371{
372 shared_region_mapping_lock(shared_region);
373
374 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
375 shared_region));
376 assert(shared_region->ref_count > 0);
377 *text_region = shared_region->text_region;
378 *text_size = shared_region->text_size;
379 *data_region = shared_region->data_region;
380 *data_size = shared_region->data_size;
381 *region_mappings = shared_region->region_mappings;
382 *client_base = shared_region->client_base;
383 *alt_base = shared_region->alternate_base;
384 *alt_next = shared_region->alternate_next;
385 *flags = shared_region->flags;
386 *fs_base = shared_region->fs_base;
387 *system = shared_region->system;
388 *next = shared_region->next;
389
390 shared_region_mapping_unlock(shared_region);
5d5c5d0d
A
391
392 return KERN_SUCCESS;
91447636
A
393}
394
395kern_return_t
396shared_region_mapping_ref(
397 shared_region_mapping_t shared_region)
398{
399 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
400 "ref_count=%d + 1\n",
401 shared_region,
402 shared_region ? shared_region->ref_count : 0));
403 if(shared_region == NULL)
404 return KERN_SUCCESS;
405 assert(shared_region->ref_count > 0);
406 hw_atomic_add(&shared_region->ref_count, 1);
407 return KERN_SUCCESS;
408}
409
410static kern_return_t
411shared_region_mapping_dealloc_lock(
412 shared_region_mapping_t shared_region,
413 int need_sfh_lock,
414 int need_drl_lock)
415{
416 struct shared_region_task_mappings sm_info;
417 shared_region_mapping_t next = NULL;
5d5c5d0d 418 unsigned int ref_count;
91447636
A
419
420 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
421 "(shared_region=%p,%d,%d) ref_count=%d\n",
422 shared_region, need_sfh_lock, need_drl_lock,
423 shared_region ? shared_region->ref_count : 0));
424 while (shared_region) {
425 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
426 "ref_count=%d\n",
427 shared_region, shared_region->ref_count));
428 assert(shared_region->ref_count > 0);
429 if ((ref_count =
430 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
431 shared_region_mapping_lock(shared_region);
432
433 sm_info.text_region = shared_region->text_region;
434 sm_info.text_size = shared_region->text_size;
435 sm_info.data_region = shared_region->data_region;
436 sm_info.data_size = shared_region->data_size;
437 sm_info.region_mappings = shared_region->region_mappings;
438 sm_info.client_base = shared_region->client_base;
439 sm_info.alternate_base = shared_region->alternate_base;
440 sm_info.alternate_next = shared_region->alternate_next;
441 sm_info.flags = shared_region->flags;
442 sm_info.self = (vm_offset_t)shared_region;
443
444 if(shared_region->region_mappings) {
445 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
446 }
447 if(((vm_named_entry_t)
448 (shared_region->text_region->ip_kobject))
449 ->backing.map->pmap) {
450 pmap_remove(((vm_named_entry_t)
451 (shared_region->text_region->ip_kobject))
452 ->backing.map->pmap,
453 sm_info.client_base,
454 sm_info.client_base + sm_info.text_size);
455 }
456 ipc_port_release_send(shared_region->text_region);
457 if(shared_region->data_region)
458 ipc_port_release_send(shared_region->data_region);
459 if (shared_region->object_chain) {
460 next = shared_region->object_chain->object_chain_region;
461 kfree(shared_region->object_chain,
462 sizeof (struct shared_region_object_chain));
463 } else {
464 next = NULL;
465 }
466 shared_region_mapping_unlock(shared_region);
467 SHARED_REGION_DEBUG(
468 ("shared_region_mapping_dealloc_lock(%p): "
469 "freeing\n",
470 shared_region));
471 bzero((void *)shared_region,
472 sizeof (*shared_region)); /* FBDP debug */
473 kfree(shared_region,
474 sizeof (struct shared_region_mapping));
475 shared_region = next;
476 } else {
477 /* Stale indicates that a system region is no */
478 /* longer in the default environment list. */
479 if((ref_count == 1) &&
480 (shared_region->flags & SHARED_REGION_SYSTEM)
481 && !(shared_region->flags & SHARED_REGION_STALE)) {
482 SHARED_REGION_DEBUG(
483 ("shared_region_mapping_dealloc_lock"
484 "(%p): removing stale\n",
485 shared_region));
486 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
487 }
488 break;
489 }
490 }
491 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
492 shared_region));
493 return KERN_SUCCESS;
494}
495
496/*
497 * Stub function; always indicates that the lock needs to be taken in the
498 * call to lsf_remove_regions_mappings_lock().
499 */
500kern_return_t
501shared_region_mapping_dealloc(
502 shared_region_mapping_t shared_region)
503{
504 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
505 "(shared_region=%p)\n",
506 shared_region));
507 if (shared_region) {
508 assert(shared_region->ref_count > 0);
509 }
510 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
511}
512
513static
514kern_return_t
515shared_region_object_create(
516 vm_size_t size,
517 ipc_port_t *object_handle)
518{
519 vm_named_entry_t user_entry;
520 ipc_port_t user_handle;
521
522 ipc_port_t previous;
523 vm_map_t new_map;
524
525 user_entry = (vm_named_entry_t)
526 kalloc(sizeof (struct vm_named_entry));
527 if(user_entry == NULL) {
528 return KERN_FAILURE;
529 }
530 named_entry_lock_init(user_entry);
531 user_handle = ipc_port_alloc_kernel();
532
533
534 ip_lock(user_handle);
535
536 /* make a sonce right */
537 user_handle->ip_sorights++;
538 ip_reference(user_handle);
539
540 user_handle->ip_destination = IP_NULL;
541 user_handle->ip_receiver_name = MACH_PORT_NULL;
542 user_handle->ip_receiver = ipc_space_kernel;
543
544 /* make a send right */
545 user_handle->ip_mscount++;
546 user_handle->ip_srights++;
547 ip_reference(user_handle);
548
549 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
550 /* nsrequest unlocks user_handle */
551
552 /* Create a named object based on a submap of specified size */
553
5d5c5d0d 554 new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
91447636
A
555 user_entry->backing.map = new_map;
556 user_entry->internal = TRUE;
557 user_entry->is_sub_map = TRUE;
558 user_entry->is_pager = FALSE;
559 user_entry->offset = 0;
560 user_entry->protection = VM_PROT_ALL;
561 user_entry->size = size;
562 user_entry->ref_count = 1;
563
564 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
565 IKOT_NAMED_ENTRY);
566 *object_handle = user_handle;
567 return KERN_SUCCESS;
568}
55e303ae
A
569
570/* called for the non-default, private branch shared region support */
571/* system default fields for fs_base and system supported are not */
572/* relevant as the system default flag is not set */
1c79356b
A
573kern_return_t
574shared_file_create_system_region(
5d5c5d0d
A
575 shared_region_mapping_t *shared_region,
576 int fs_base,
577 int system)
1c79356b
A
578{
579 ipc_port_t text_handle;
580 ipc_port_t data_handle;
581 long text_size;
582 long data_size;
583 vm_offset_t mapping_array;
584 kern_return_t kret;
585
91447636
A
586 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
587
1c79356b
A
588 text_size = 0x10000000;
589 data_size = 0x10000000;
590
591 kret = shared_file_init(&text_handle,
592 text_size, &data_handle, data_size, &mapping_array);
91447636
A
593 if(kret) {
594 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
595 "shared_file_init failed kret=0x%x\n",
596 kret));
1c79356b 597 return kret;
91447636 598 }
5d5c5d0d
A
599 kret = shared_region_mapping_create(text_handle, text_size,
600 data_handle, data_size,
601 mapping_array,
602 GLOBAL_SHARED_TEXT_SEGMENT,
603 shared_region,
604 SHARED_ALTERNATE_LOAD_BASE,
605 SHARED_ALTERNATE_LOAD_BASE,
606 fs_base,
607 system);
91447636
A
608 if(kret) {
609 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
610 "shared_region_mapping_create failed "
611 "kret=0x%x\n",
612 kret));
1c79356b 613 return kret;
91447636 614 }
1c79356b 615 (*shared_region)->flags = 0;
55e303ae
A
616 if(com_mapping_resource) {
617 shared_region_mapping_ref(com_mapping_resource);
618 (*shared_region)->next = com_mapping_resource;
619 }
620
91447636
A
621 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
622 "-> shared_region=%p\n",
623 *shared_region));
1c79356b
A
624 return KERN_SUCCESS;
625}
626
55e303ae
A
627/*
628 * load a new default for a specified environment into the default share
629 * regions list. If a previous default exists for the envrionment specification
630 * it is returned along with its reference. It is expected that the new
631 * sytem region structure passes a reference.
632 */
633
634shared_region_mapping_t
635update_default_shared_region(
636 shared_region_mapping_t new_system_region)
637{
638 shared_region_mapping_t old_system_region;
639 unsigned int fs_base;
640 unsigned int system;
641
91447636
A
642 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
643 new_system_region));
644 assert(new_system_region->ref_count > 0);
55e303ae
A
645 fs_base = new_system_region->fs_base;
646 system = new_system_region->system;
647 new_system_region->flags |= SHARED_REGION_SYSTEM;
648 default_regions_list_lock();
649 old_system_region = default_environment_shared_regions;
650
651 if((old_system_region != NULL) &&
652 (old_system_region->fs_base == fs_base) &&
653 (old_system_region->system == system)) {
654 new_system_region->default_env_list =
655 old_system_region->default_env_list;
91447636 656 old_system_region->default_env_list = NULL;
55e303ae 657 default_environment_shared_regions = new_system_region;
55e303ae 658 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
659 default_regions_list_unlock();
660 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
661 "old=%p stale 1\n",
662 new_system_region, old_system_region));
663 assert(old_system_region->ref_count > 0);
55e303ae
A
664 return old_system_region;
665 }
666 if (old_system_region) {
667 while(old_system_region->default_env_list != NULL) {
668 if((old_system_region->default_env_list->fs_base == fs_base) &&
669 (old_system_region->default_env_list->system == system)) {
91447636
A
670 shared_region_mapping_t tmp_system_region;
671
672 tmp_system_region =
673 old_system_region->default_env_list;
55e303ae 674 new_system_region->default_env_list =
91447636
A
675 tmp_system_region->default_env_list;
676 tmp_system_region->default_env_list = NULL;
55e303ae
A
677 old_system_region->default_env_list =
678 new_system_region;
91447636 679 old_system_region = tmp_system_region;
55e303ae 680 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
681 default_regions_list_unlock();
682 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
683 ": old=%p stale 2\n",
684 new_system_region,
685 old_system_region));
686 assert(old_system_region->ref_count > 0);
55e303ae
A
687 return old_system_region;
688 }
689 old_system_region = old_system_region->default_env_list;
690 }
691 }
692 /* If we get here, we are at the end of the system list and we */
693 /* did not find a pre-existing entry */
694 if(old_system_region) {
91447636
A
695 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
696 "adding after old=%p\n",
697 new_system_region, old_system_region));
698 assert(old_system_region->ref_count > 0);
55e303ae
A
699 old_system_region->default_env_list = new_system_region;
700 } else {
91447636
A
701 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
702 "new default\n",
703 new_system_region));
55e303ae
A
704 default_environment_shared_regions = new_system_region;
705 }
91447636 706 assert(new_system_region->ref_count > 0);
55e303ae
A
707 default_regions_list_unlock();
708 return NULL;
709}
710
711/*
712 * lookup a system_shared_region for the environment specified. If one is
713 * found, it is returned along with a reference against the structure
714 */
715
716shared_region_mapping_t
717lookup_default_shared_region(
718 unsigned int fs_base,
719 unsigned int system)
720{
721 shared_region_mapping_t system_region;
722 default_regions_list_lock();
723 system_region = default_environment_shared_regions;
724
91447636
A
725 SHARED_REGION_DEBUG(("lookup_default_shared_region"
726 "(base=0x%x, system=0x%x)\n",
727 fs_base, system));
55e303ae 728 while(system_region != NULL) {
91447636
A
729 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
730 ": system_region=%p base=0x%x system=0x%x"
731 " ref_count=%d\n",
732 fs_base, system, system_region,
733 system_region->fs_base,
734 system_region->system,
735 system_region->ref_count));
736 assert(system_region->ref_count > 0);
55e303ae
A
737 if((system_region->fs_base == fs_base) &&
738 (system_region->system == system)) {
739 break;
740 }
741 system_region = system_region->default_env_list;
742 }
743 if(system_region)
744 shared_region_mapping_ref(system_region);
745 default_regions_list_unlock();
91447636
A
746 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
747 system_region));
55e303ae
A
748 return system_region;
749}
750
751/*
752 * remove a system_region default if it appears in the default regions list.
753 * Drop a reference on removal.
754 */
755
756__private_extern__ void
757remove_default_shared_region_lock(
758 shared_region_mapping_t system_region,
91447636
A
759 int need_sfh_lock,
760 int need_drl_lock)
55e303ae
A
761{
762 shared_region_mapping_t old_system_region;
55e303ae 763
91447636
A
764 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
765 "(system_region=%p, %d, %d)\n",
766 system_region, need_sfh_lock, need_drl_lock));
767 if (need_drl_lock) {
768 default_regions_list_lock();
769 }
55e303ae
A
770 old_system_region = default_environment_shared_regions;
771
772 if(old_system_region == NULL) {
91447636
A
773 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
774 "-> default_env=NULL\n",
775 system_region));
776 if (need_drl_lock) {
777 default_regions_list_unlock();
778 }
55e303ae
A
779 return;
780 }
781
91447636
A
782 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
783 "default_env=%p\n",
784 system_region, old_system_region));
785 assert(old_system_region->ref_count > 0);
55e303ae
A
786 if (old_system_region == system_region) {
787 default_environment_shared_regions
788 = old_system_region->default_env_list;
91447636 789 old_system_region->default_env_list = NULL;
55e303ae 790 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
791 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
792 "old=%p ref_count=%d STALE\n",
793 system_region, old_system_region,
794 old_system_region->ref_count));
55e303ae 795 shared_region_mapping_dealloc_lock(old_system_region,
91447636
A
796 need_sfh_lock,
797 0);
798 if (need_drl_lock) {
799 default_regions_list_unlock();
800 }
55e303ae
A
801 return;
802 }
803
804 while(old_system_region->default_env_list != NULL) {
91447636
A
805 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
806 "old=%p->default_env=%p\n",
807 system_region, old_system_region,
808 old_system_region->default_env_list));
809 assert(old_system_region->default_env_list->ref_count > 0);
55e303ae
A
810 if(old_system_region->default_env_list == system_region) {
811 shared_region_mapping_t dead_region;
812 dead_region = old_system_region->default_env_list;
813 old_system_region->default_env_list =
91447636
A
814 dead_region->default_env_list;
815 dead_region->default_env_list = NULL;
55e303ae 816 dead_region->flags |= SHARED_REGION_STALE;
91447636
A
817 SHARED_REGION_DEBUG(
818 ("remove_default_shared_region_lock(%p): "
819 "dead=%p ref_count=%d stale\n",
820 system_region, dead_region,
821 dead_region->ref_count));
55e303ae 822 shared_region_mapping_dealloc_lock(dead_region,
91447636
A
823 need_sfh_lock,
824 0);
825 if (need_drl_lock) {
826 default_regions_list_unlock();
827 }
55e303ae
A
828 return;
829 }
830 old_system_region = old_system_region->default_env_list;
831 }
91447636
A
832 if (need_drl_lock) {
833 default_regions_list_unlock();
834 }
55e303ae
A
835}
836
837/*
838 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
839 * the only caller. Remove this stub function and the corresponding symbol
840 * export for Merlot.
841 */
842void
843remove_default_shared_region(
844 shared_region_mapping_t system_region)
845{
91447636
A
846 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
847 system_region));
848 if (system_region) {
849 assert(system_region->ref_count > 0);
850 }
851 remove_default_shared_region_lock(system_region, 1, 1);
55e303ae
A
852}
853
854void
91447636 855remove_all_shared_regions(void)
55e303ae
A
856{
857 shared_region_mapping_t system_region;
858 shared_region_mapping_t next_system_region;
859
91447636
A
860 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
861 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
862 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
55e303ae
A
863 default_regions_list_lock();
864 system_region = default_environment_shared_regions;
865
866 if(system_region == NULL) {
867 default_regions_list_unlock();
868 return;
869 }
870
871 while(system_region != NULL) {
872 next_system_region = system_region->default_env_list;
91447636 873 system_region->default_env_list = NULL;
55e303ae 874 system_region->flags |= SHARED_REGION_STALE;
91447636
A
875 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
876 "%p ref_count=%d stale\n",
877 system_region, system_region->ref_count));
878 assert(system_region->ref_count > 0);
879 shared_region_mapping_dealloc_lock(system_region, 1, 0);
55e303ae
A
880 system_region = next_system_region;
881 }
882 default_environment_shared_regions = NULL;
883 default_regions_list_unlock();
91447636
A
884 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
885 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
886 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
55e303ae
A
887}
888
889/* shared_com_boot_time_init initializes the common page shared data and */
890/* text region. This region is semi independent of the split libs */
891/* and so its policies have to be handled differently by the code that */
892/* manipulates the mapping of shared region environments. However, */
893/* the shared region delivery system supports both */
91447636
A
894void shared_com_boot_time_init(void); /* forward */
895void
896shared_com_boot_time_init(void)
55e303ae
A
897{
898 kern_return_t kret;
899 vm_named_entry_t named_entry;
900
91447636
A
901 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
902 if(com_region_handle32) {
903 panic("shared_com_boot_time_init: "
904 "com_region_handle32 already set\n");
905 }
906 if(com_region_handle64) {
55e303ae 907 panic("shared_com_boot_time_init: "
91447636 908 "com_region_handle64 already set\n");
55e303ae
A
909 }
910
91447636
A
911 /* create com page regions, 1 each for 32 and 64-bit code */
912 if((kret = shared_region_object_create(
5d5c5d0d 913 com_region_size32,
91447636
A
914 &com_region_handle32))) {
915 panic("shared_com_boot_time_init: "
916 "unable to create 32-bit comm page\n");
917 return;
918 }
919 if((kret = shared_region_object_create(
5d5c5d0d 920 com_region_size64,
91447636 921 &com_region_handle64))) {
55e303ae 922 panic("shared_com_boot_time_init: "
91447636 923 "unable to create 64-bit comm page\n");
55e303ae
A
924 return;
925 }
91447636 926
55e303ae 927 /* now set export the underlying region/map */
91447636
A
928 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
929 com_region_map32 = named_entry->backing.map;
930 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
931 com_region_map64 = named_entry->backing.map;
932
55e303ae 933 /* wrap the com region in its own shared file mapping structure */
91447636
A
934 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
935 kret = shared_region_mapping_create(com_region_handle32,
5d5c5d0d
A
936 com_region_size32,
937 NULL, 0, 0,
938 _COMM_PAGE_BASE_ADDRESS,
939 &com_mapping_resource,
940 0, 0,
941 ENV_DEFAULT_ROOT, cpu_type());
91447636
A
942 if (kret) {
943 panic("shared_region_mapping_create failed for commpage");
944 }
55e303ae
A
945}
946
91447636 947void
1c79356b 948shared_file_boot_time_init(
55e303ae
A
949 unsigned int fs_base,
950 unsigned int system)
1c79356b 951{
5d5c5d0d
A
952 mach_port_t text_region_handle;
953 mach_port_t data_region_handle;
91447636
A
954 long text_region_size;
955 long data_region_size;
9bccf70c 956 shared_region_mapping_t new_system_region;
55e303ae 957 shared_region_mapping_t old_default_env;
1c79356b 958
91447636
A
959 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
960 "(base=0x%x,system=0x%x)\n",
961 fs_base, system));
962 text_region_size = 0x10000000;
963 data_region_size = 0x10000000;
5d5c5d0d 964 shared_file_init(&text_region_handle,
91447636 965 text_region_size,
5d5c5d0d 966 &data_region_handle,
91447636
A
967 data_region_size,
968 &shared_file_mapping_array);
9bccf70c 969
5d5c5d0d 970 shared_region_mapping_create(text_region_handle,
91447636 971 text_region_size,
5d5c5d0d 972 data_region_handle,
91447636
A
973 data_region_size,
974 shared_file_mapping_array,
975 GLOBAL_SHARED_TEXT_SEGMENT,
976 &new_system_region,
977 SHARED_ALTERNATE_LOAD_BASE,
5d5c5d0d
A
978 SHARED_ALTERNATE_LOAD_BASE,
979 fs_base, system);
55e303ae 980
55e303ae
A
981 new_system_region->flags = SHARED_REGION_SYSTEM;
982
983 /* grab an extra reference for the caller */
984 /* remember to grab before call to update */
985 shared_region_mapping_ref(new_system_region);
986 old_default_env = update_default_shared_region(new_system_region);
9bccf70c
A
987 /* hold an extra reference because these are the system */
988 /* shared regions. */
55e303ae
A
989 if(old_default_env)
990 shared_region_mapping_dealloc(old_default_env);
991 if(com_mapping_resource == NULL) {
992 shared_com_boot_time_init();
993 }
994 shared_region_mapping_ref(com_mapping_resource);
995 new_system_region->next = com_mapping_resource;
996 vm_set_shared_region(current_task(), new_system_region);
91447636
A
997 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
998 fs_base, system));
1c79356b
A
999}
1000
1001
1002/* called at boot time, allocates two regions, each 256 megs in size */
1003/* these regions are later mapped into task spaces, allowing them to */
1004/* share the contents of the regions. shared_file_init is part of */
1005/* a shared_memory_server which not only allocates the backing maps */
1006/* but also coordinates requests for space. */
1007
1008
9bccf70c 1009static kern_return_t
1c79356b 1010shared_file_init(
91447636 1011 ipc_port_t *text_region_handle,
1c79356b 1012 vm_size_t text_region_size,
91447636 1013 ipc_port_t *data_region_handle,
1c79356b 1014 vm_size_t data_region_size,
91447636 1015 vm_offset_t *file_mapping_array)
1c79356b 1016{
1c79356b 1017 shared_file_info_t *sf_head;
5d5c5d0d 1018 vm_size_t data_table_size;
1c79356b 1019 int hash_size;
1c79356b
A
1020 kern_return_t kret;
1021
1022 vm_object_t buf_object;
1023 vm_map_entry_t entry;
1024 vm_size_t alloced;
1025 vm_offset_t b;
1026 vm_page_t p;
1027
91447636 1028 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1c79356b 1029 /* create text and data maps/regions */
91447636
A
1030 kret = shared_region_object_create(
1031 text_region_size,
1032 text_region_handle);
1033 if (kret) {
1c79356b
A
1034 return kret;
1035 }
91447636
A
1036 kret = shared_region_object_create(
1037 data_region_size,
1038 data_region_handle);
1039 if (kret) {
1040 ipc_port_release_send(*text_region_handle);
1c79356b
A
1041 return kret;
1042 }
1043
1044 data_table_size = data_region_size >> 9;
1045 hash_size = data_region_size >> 14;
1c79356b
A
1046
1047 if(shared_file_mapping_array == 0) {
91447636 1048 vm_map_address_t map_addr;
1c79356b
A
1049 buf_object = vm_object_allocate(data_table_size);
1050
91447636 1051 if(vm_map_find_space(kernel_map, &map_addr,
5d5c5d0d 1052 data_table_size, 0, 0, &entry)
91447636 1053 != KERN_SUCCESS) {
1c79356b
A
1054 panic("shared_file_init: no space");
1055 }
91447636
A
1056 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1057 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1058 vm_map_unlock(kernel_map);
1059 entry->object.vm_object = buf_object;
1060 entry->offset = 0;
1061
91447636 1062 for (b = *file_mapping_array, alloced = 0;
1c79356b 1063 alloced < (hash_size +
91447636 1064 round_page(sizeof(struct sf_mapping)));
1c79356b
A
1065 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1066 vm_object_lock(buf_object);
1067 p = vm_page_alloc(buf_object, alloced);
1068 if (p == VM_PAGE_NULL) {
1069 panic("shared_file_init: no space");
1070 }
1071 p->busy = FALSE;
1072 vm_object_unlock(buf_object);
55e303ae 1073 pmap_enter(kernel_pmap, b, p->phys_page,
9bccf70c 1074 VM_PROT_READ | VM_PROT_WRITE,
55e303ae
A
1075 ((unsigned int)(p->object->wimg_bits))
1076 & VM_WIMG_MASK,
1077 TRUE);
1c79356b
A
1078 }
1079
1080
1081 /* initialize loaded file array */
91447636 1082 sf_head = (shared_file_info_t *)*file_mapping_array;
1c79356b 1083 sf_head->hash = (queue_head_t *)
91447636 1084 (((int)*file_mapping_array) +
1c79356b
A
1085 sizeof(struct shared_file_info));
1086 sf_head->hash_size = hash_size/sizeof(queue_head_t);
91447636 1087 mutex_init(&(sf_head->lock), 0);
1c79356b
A
1088 sf_head->hash_init = FALSE;
1089
1090
1091 mach_make_memory_entry(kernel_map, &data_table_size,
91447636 1092 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1c79356b
A
1093 NULL);
1094
91447636
A
1095 if (vm_map_wire(kernel_map,
1096 vm_map_trunc_page(*file_mapping_array),
1097 vm_map_round_page(*file_mapping_array +
1098 hash_size +
1099 round_page(sizeof(struct sf_mapping))),
1c79356b
A
1100 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1101 panic("shared_file_init: No memory for data table");
1102 }
1103
1104 lsf_zone = zinit(sizeof(struct load_file_ele),
1105 data_table_size -
55e303ae 1106 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1c79356b
A
1107 0, "load_file_server");
1108
1109 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1110 zone_change(lsf_zone, Z_COLLECT, FALSE);
1111 zone_change(lsf_zone, Z_EXPAND, FALSE);
1112 zone_change(lsf_zone, Z_FOREIGN, TRUE);
55e303ae
A
1113
1114 /* initialize the global default environment lock */
91447636 1115 mutex_init(&default_regions_list_lock_data, 0);
55e303ae 1116
1c79356b 1117 } else {
91447636 1118 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1119 }
1120
91447636 1121 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
5d5c5d0d 1122 return KERN_SUCCESS;
1c79356b
A
1123}
1124
91447636
A
1125static kern_return_t
1126shared_file_header_init(
1127 shared_file_info_t *shared_file_header)
1128{
1129 vm_size_t hash_table_size;
1130 vm_size_t hash_table_offset;
1131 int i;
1132 /* wire hash entry pool only as needed, since we are the only */
1133 /* users, we take a few liberties with the population of our */
1134 /* zone. */
1135 static int allocable_hash_pages;
1136 static vm_offset_t hash_cram_address;
1137
1138
1139 hash_table_size = shared_file_header->hash_size
1140 * sizeof (struct queue_entry);
1141 hash_table_offset = hash_table_size +
1142 round_page(sizeof (struct sf_mapping));
1143 for (i = 0; i < shared_file_header->hash_size; i++)
1144 queue_init(&shared_file_header->hash[i]);
1145
1146 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1147 / PAGE_SIZE);
1148 hash_cram_address = ((vm_offset_t) shared_file_header)
1149 + hash_table_offset;
1150 shared_file_available_hash_ele = 0;
1151
1152 shared_file_header->hash_init = TRUE;
1153
1154 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1155 int cram_pages, cram_size;
1156
1157 cram_pages = allocable_hash_pages > 3 ?
1158 3 : allocable_hash_pages;
1159 cram_size = cram_pages * PAGE_SIZE;
1160 if (vm_map_wire(kernel_map, hash_cram_address,
1161 hash_cram_address + cram_size,
1162 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
5d5c5d0d
A
1163 SHARED_REGION_TRACE(
1164 SHARED_REGION_TRACE_ERROR,
1165 ("shared_region: shared_file_header_init: "
1166 "No memory for data table\n"));
91447636
A
1167 return KERN_NO_SPACE;
1168 }
1169 allocable_hash_pages -= cram_pages;
1170 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1171 shared_file_available_hash_ele
1172 += cram_size/sizeof(struct load_file_ele);
1173 hash_cram_address += cram_size;
1174 }
1175
1176 return KERN_SUCCESS;
1177}
1178
c0fea474 1179
5d5c5d0d
A
1180extern void shared_region_dump_file_entry(
1181 int trace_level,
1182 load_struct_t *entry); /* forward */
1183
1184void shared_region_dump_file_entry(
1185 int trace_level,
1186 load_struct_t *entry)
1187{
1188 int i;
1189 loaded_mapping_t *mapping;
1190
1191 if (trace_level > shared_region_trace_level) {
1192 return;
1193 }
1194 printf("shared region: %p: "
1195 "file_entry %p base_address=0x%x file_offset=0x%x "
1196 "%d mappings\n",
1197 current_thread(), entry,
1198 entry->base_address, entry->file_offset, entry->mapping_cnt);
1199 mapping = entry->mappings;
1200 for (i = 0; i < entry->mapping_cnt; i++) {
1201 printf("shared region: %p:\t#%d: "
1202 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1203 current_thread(),
1204 i,
1205 mapping->mapping_offset,
1206 mapping->size,
1207 mapping->file_offset,
1208 mapping->protection);
1209 mapping = mapping->next;
1210 }
1211}
1212
1213extern void shared_region_dump_mappings(
1214 int trace_level,
1215 struct shared_file_mapping_np *mappings,
1216 int map_cnt,
1217 mach_vm_offset_t base_offset); /* forward */
1218
1219void shared_region_dump_mappings(
1220 int trace_level,
1221 struct shared_file_mapping_np *mappings,
1222 int map_cnt,
1223 mach_vm_offset_t base_offset)
1224{
1225 int i;
1226
1227 if (trace_level > shared_region_trace_level) {
1228 return;
1229 }
1230
1231 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1232 current_thread(), map_cnt, (uint64_t) base_offset);
1233 for (i = 0; i < map_cnt; i++) {
1234 printf("shared region: %p:\t#%d: "
1235 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1236 "prot=(%d,%d)\n",
1237 current_thread(),
1238 i,
1239 (uint64_t) mappings[i].sfm_address,
1240 (uint64_t) mappings[i].sfm_size,
1241 (uint64_t) mappings[i].sfm_file_offset,
1242 mappings[i].sfm_max_prot,
1243 mappings[i].sfm_init_prot);
1244 }
1245}
1246
1247extern void shared_region_dump_conflict_info(
1248 int trace_level,
1249 vm_map_t map,
1250 vm_map_offset_t offset,
1251 vm_map_size_t size); /* forward */
1252
1253void
1254shared_region_dump_conflict_info(
1255 int trace_level,
1256 vm_map_t map,
1257 vm_map_offset_t offset,
1258 vm_map_size_t size)
1259{
1260 vm_map_entry_t entry;
1261 vm_object_t object;
1262 memory_object_t mem_object;
1263 kern_return_t kr;
1264 char *filename;
1265
1266 if (trace_level > shared_region_trace_level) {
1267 return;
1268 }
1269
1270 object = VM_OBJECT_NULL;
1271
1272 vm_map_lock_read(map);
1273 if (!vm_map_lookup_entry(map, offset, &entry)) {
1274 entry = entry->vme_next;
1275 }
1276
1277 if (entry != vm_map_to_entry(map)) {
1278 if (entry->is_sub_map) {
1279 printf("shared region: %p: conflict with submap "
1280 "at 0x%llx size 0x%llx !?\n",
1281 current_thread(),
1282 (uint64_t) offset,
1283 (uint64_t) size);
1284 goto done;
1285 }
1286
1287 object = entry->object.vm_object;
1288 if (object == VM_OBJECT_NULL) {
1289 printf("shared region: %p: conflict with NULL object "
1290 "at 0x%llx size 0x%llx !?\n",
1291 current_thread(),
1292 (uint64_t) offset,
1293 (uint64_t) size);
1294 object = VM_OBJECT_NULL;
1295 goto done;
1296 }
1297
1298 vm_object_lock(object);
1299 while (object->shadow != VM_OBJECT_NULL) {
1300 vm_object_t shadow;
1301
1302 shadow = object->shadow;
1303 vm_object_lock(shadow);
1304 vm_object_unlock(object);
1305 object = shadow;
1306 }
1307
1308 if (object->internal) {
1309 printf("shared region: %p: conflict with anonymous "
1310 "at 0x%llx size 0x%llx\n",
1311 current_thread(),
1312 (uint64_t) offset,
1313 (uint64_t) size);
1314 goto done;
1315 }
1316 if (! object->pager_ready) {
1317 printf("shared region: %p: conflict with uninitialized "
1318 "at 0x%llx size 0x%llx\n",
1319 current_thread(),
1320 (uint64_t) offset,
1321 (uint64_t) size);
1322 goto done;
1323 }
1324
1325 mem_object = object->pager;
1326
1327 /*
1328 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1329 */
1330 kr = vnode_pager_get_object_filename(mem_object,
1331 &filename);
1332 if (kr != KERN_SUCCESS) {
1333 filename = NULL;
1334 }
1335 printf("shared region: %p: conflict with '%s' "
1336 "at 0x%llx size 0x%llx\n",
1337 current_thread(),
1338 filename ? filename : "<unknown>",
1339 (uint64_t) offset,
1340 (uint64_t) size);
1341 }
1342done:
1343 if (object != VM_OBJECT_NULL) {
1344 vm_object_unlock(object);
1345 }
1346 vm_map_unlock_read(map);
1347}
1348
91447636
A
1349/*
1350 * map_shared_file:
1351 *
1352 * Attempt to map a split library into the shared region. Check if the mappings
1353 * are already in place.
1354 */
1355kern_return_t
1356map_shared_file(
1357 int map_cnt,
1358 struct shared_file_mapping_np *mappings,
1359 memory_object_control_t file_control,
1360 memory_object_size_t file_size,
1361 shared_region_task_mappings_t sm_info,
1362 mach_vm_offset_t base_offset,
1363 mach_vm_offset_t *slide_p)
1364{
1365 vm_object_t file_object;
1366 shared_file_info_t *shared_file_header;
1367 load_struct_t *file_entry;
1368 loaded_mapping_t *file_mapping;
1369 int i;
1370 kern_return_t ret;
1371 mach_vm_offset_t slide;
1372
1373 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1374
1375 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1376
1377 mutex_lock(&shared_file_header->lock);
1378
1379 /* If this is the first call to this routine, take the opportunity */
1380 /* to initialize the hash table which will be used to look-up */
1381 /* mappings based on the file object */
1382
1383 if(shared_file_header->hash_init == FALSE) {
1384 ret = shared_file_header_init(shared_file_header);
1385 if (ret != KERN_SUCCESS) {
5d5c5d0d
A
1386 SHARED_REGION_TRACE(
1387 SHARED_REGION_TRACE_ERROR,
1388 ("shared_region: %p: map_shared_file: "
1389 "shared_file_header_init() failed kr=0x%x\n",
1390 current_thread(), ret));
91447636
A
1391 mutex_unlock(&shared_file_header->lock);
1392 return KERN_NO_SPACE;
1393 }
1394 }
1395
1396
1397 /* Find the entry in the map associated with the current mapping */
1398 /* of the file object */
1399 file_object = memory_object_control_to_vm_object(file_control);
1400
1401 file_entry = lsf_hash_lookup(shared_file_header->hash,
1402 (void *) file_object,
1403 mappings[0].sfm_file_offset,
1404 shared_file_header->hash_size,
1405 TRUE, TRUE, sm_info);
1406 if (file_entry) {
1407 /* File is loaded, check the load manifest for exact match */
1408 /* we simplify by requiring that the elements be the same */
1409 /* size and in the same order rather than checking for */
1410 /* semantic equivalence. */
1411
1412 i = 0;
1413 file_mapping = file_entry->mappings;
1414 while(file_mapping != NULL) {
1415 if(i>=map_cnt) {
5d5c5d0d
A
1416 SHARED_REGION_TRACE(
1417 SHARED_REGION_TRACE_CONFLICT,
1418 ("shared_region: %p: map_shared_file: "
1419 "already mapped with "
1420 "more than %d mappings\n",
1421 current_thread(), map_cnt));
1422 shared_region_dump_file_entry(
1423 SHARED_REGION_TRACE_INFO,
1424 file_entry);
1425 shared_region_dump_mappings(
1426 SHARED_REGION_TRACE_INFO,
1427 mappings, map_cnt, base_offset);
1428
91447636
A
1429 mutex_unlock(&shared_file_header->lock);
1430 return KERN_INVALID_ARGUMENT;
1431 }
1432 if(((mappings[i].sfm_address)
1433 & SHARED_DATA_REGION_MASK) !=
1434 file_mapping->mapping_offset ||
1435 mappings[i].sfm_size != file_mapping->size ||
1436 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1437 mappings[i].sfm_init_prot != file_mapping->protection) {
5d5c5d0d
A
1438 SHARED_REGION_TRACE(
1439 SHARED_REGION_TRACE_CONFLICT,
1440 ("shared_region: %p: "
1441 "mapping #%d differs\n",
1442 current_thread(), i));
1443 shared_region_dump_file_entry(
1444 SHARED_REGION_TRACE_INFO,
1445 file_entry);
1446 shared_region_dump_mappings(
1447 SHARED_REGION_TRACE_INFO,
1448 mappings, map_cnt, base_offset);
1449
91447636
A
1450 break;
1451 }
1452 file_mapping = file_mapping->next;
1453 i++;
1454 }
1455 if(i!=map_cnt) {
5d5c5d0d
A
1456 SHARED_REGION_TRACE(
1457 SHARED_REGION_TRACE_CONFLICT,
1458 ("shared_region: %p: map_shared_file: "
1459 "already mapped with "
1460 "%d mappings instead of %d\n",
1461 current_thread(), i, map_cnt));
1462 shared_region_dump_file_entry(
1463 SHARED_REGION_TRACE_INFO,
1464 file_entry);
1465 shared_region_dump_mappings(
1466 SHARED_REGION_TRACE_INFO,
1467 mappings, map_cnt, base_offset);
1468
91447636
A
1469 mutex_unlock(&shared_file_header->lock);
1470 return KERN_INVALID_ARGUMENT;
1471 }
1472
1473 slide = file_entry->base_address - base_offset;
1474 if (slide_p != NULL) {
1475 /*
1476 * File already mapped but at different address,
1477 * and the caller is OK with the sliding.
1478 */
1479 *slide_p = slide;
1480 ret = KERN_SUCCESS;
1481 } else {
1482 /*
1483 * The caller doesn't want any sliding. The file needs
1484 * to be mapped at the requested address or not mapped.
1485 */
1486 if (slide != 0) {
1487 /*
1488 * The file is already mapped but at a different
1489 * address.
1490 * We fail.
1491 * XXX should we attempt to load at
1492 * requested address too ?
1493 */
1494 ret = KERN_FAILURE;
5d5c5d0d
A
1495 SHARED_REGION_TRACE(
1496 SHARED_REGION_TRACE_CONFLICT,
1497 ("shared_region: %p: "
1498 "map_shared_file: already mapped, "
1499 "would need to slide 0x%llx\n",
1500 current_thread(),
1501 slide));
91447636
A
1502 } else {
1503 /*
1504 * The file is already mapped at the correct
1505 * address.
1506 * We're done !
1507 */
1508 ret = KERN_SUCCESS;
1509 }
1510 }
1511 mutex_unlock(&shared_file_header->lock);
1512 return ret;
1513 } else {
1514 /* File is not loaded, lets attempt to load it */
1515 ret = lsf_map(mappings, map_cnt,
1516 (void *)file_control,
1517 file_size,
1518 sm_info,
1519 base_offset,
1520 slide_p);
1521 if(ret == KERN_NO_SPACE) {
1522 shared_region_mapping_t regions;
1523 shared_region_mapping_t system_region;
1524 regions = (shared_region_mapping_t)sm_info->self;
1525 regions->flags |= SHARED_REGION_FULL;
1526 system_region = lookup_default_shared_region(
1527 regions->fs_base, regions->system);
1528 if (system_region == regions) {
1529 shared_region_mapping_t new_system_shared_region;
1530 shared_file_boot_time_init(
1531 regions->fs_base, regions->system);
1532 /* current task must stay with its current */
1533 /* regions, drop count on system_shared_region */
1534 /* and put back our original set */
1535 vm_get_shared_region(current_task(),
1536 &new_system_shared_region);
1537 shared_region_mapping_dealloc_lock(
1538 new_system_shared_region, 0, 1);
1539 vm_set_shared_region(current_task(), regions);
1540 } else if (system_region != NULL) {
55e303ae 1541 shared_region_mapping_dealloc_lock(
91447636 1542 system_region, 0, 1);
55e303ae 1543 }
1c79356b
A
1544 }
1545 mutex_unlock(&shared_file_header->lock);
1546 return ret;
1547 }
1548}
1549
91447636
A
1550/*
1551 * shared_region_cleanup:
1552 *
1553 * Deallocates all the mappings in the shared region, except those explicitly
1554 * specified in the "ranges" set of address ranges.
1555 */
1556kern_return_t
1557shared_region_cleanup(
1558 unsigned int range_count,
1559 struct shared_region_range_np *ranges,
1560 shared_region_task_mappings_t sm_info)
1561{
1562 kern_return_t kr;
1563 ipc_port_t region_handle;
1564 vm_named_entry_t region_named_entry;
1565 vm_map_t text_submap, data_submap, submap, next_submap;
1566 unsigned int i_range;
1567 vm_map_offset_t range_start, range_end;
1568 vm_map_offset_t submap_base, submap_end, submap_offset;
1569 vm_map_size_t delete_size;
1570
1571 struct shared_region_range_np tmp_range;
1572 unsigned int sort_index, sorted_index;
1573 vm_map_offset_t sort_min_address;
1574 unsigned int sort_min_index;
1575
1576 /*
1577 * Since we want to deallocate the holes between the "ranges",
1578 * sort the array by increasing addresses.
1579 */
1580 for (sorted_index = 0;
1581 sorted_index < range_count;
1582 sorted_index++) {
1583
1584 /* first remaining entry is our new starting point */
1585 sort_min_index = sorted_index;
1586 sort_min_address = ranges[sort_min_index].srr_address;
1587
1588 /* find the lowest mapping_offset in the remaining entries */
1589 for (sort_index = sorted_index + 1;
1590 sort_index < range_count;
1591 sort_index++) {
1592 if (ranges[sort_index].srr_address < sort_min_address) {
1593 /* lowest address so far... */
1594 sort_min_index = sort_index;
1595 sort_min_address =
1596 ranges[sort_min_index].srr_address;
1597 }
1598 }
1599
1600 if (sort_min_index != sorted_index) {
1601 /* swap entries */
1602 tmp_range = ranges[sort_min_index];
1603 ranges[sort_min_index] = ranges[sorted_index];
1604 ranges[sorted_index] = tmp_range;
1605 }
1606 }
1607
1608 region_handle = (ipc_port_t) sm_info->text_region;
1609 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1610 text_submap = region_named_entry->backing.map;
1611
1612 region_handle = (ipc_port_t) sm_info->data_region;
1613 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1614 data_submap = region_named_entry->backing.map;
1615
1616 submap = text_submap;
1617 next_submap = submap;
1618 submap_base = sm_info->client_base;
1619 submap_offset = 0;
1620 submap_end = submap_base + sm_info->text_size;
1621 for (i_range = 0;
1622 i_range < range_count;
1623 i_range++) {
1624
1625 /* get the next range of addresses to keep */
1626 range_start = ranges[i_range].srr_address;
1627 range_end = range_start + ranges[i_range].srr_size;
1628 /* align them to page boundaries */
1629 range_start = vm_map_trunc_page(range_start);
1630 range_end = vm_map_round_page(range_end);
1631
1632 /* make sure we don't go beyond the submap's boundaries */
1633 if (range_start < submap_base) {
1634 range_start = submap_base;
1635 } else if (range_start >= submap_end) {
1636 range_start = submap_end;
1637 }
1638 if (range_end < submap_base) {
1639 range_end = submap_base;
1640 } else if (range_end >= submap_end) {
1641 range_end = submap_end;
1642 }
1643
1644 if (range_start > submap_base + submap_offset) {
1645 /*
1646 * Deallocate everything between the last offset in the
1647 * submap and the start of this range.
1648 */
1649 delete_size = range_start -
1650 (submap_base + submap_offset);
1651 (void) vm_deallocate(submap,
1652 submap_offset,
1653 delete_size);
1654 } else {
1655 delete_size = 0;
1656 }
1657
1658 /* skip to the end of the range */
1659 submap_offset += delete_size + (range_end - range_start);
1660
1661 if (submap_base + submap_offset >= submap_end) {
1662 /* get to next submap */
1663
1664 if (submap == data_submap) {
1665 /* no other submap after data: done ! */
1666 break;
1667 }
1668
1669 /* get original range again */
1670 range_start = ranges[i_range].srr_address;
1671 range_end = range_start + ranges[i_range].srr_size;
1672 range_start = vm_map_trunc_page(range_start);
1673 range_end = vm_map_round_page(range_end);
1674
1675 if (range_end > submap_end) {
1676 /*
1677 * This last range overlaps with the next
1678 * submap. We need to process it again
1679 * after switching submaps. Otherwise, we'll
1680 * just continue with the next range.
1681 */
1682 i_range--;
1683 }
1684
1685 if (submap == text_submap) {
1686 /*
1687 * Switch to the data submap.
1688 */
1689 submap = data_submap;
1690 submap_offset = 0;
1691 submap_base = sm_info->client_base +
1692 sm_info->text_size;
1693 submap_end = submap_base + sm_info->data_size;
1694 }
1695 }
1696 }
1697
1698 if (submap_base + submap_offset < submap_end) {
1699 /* delete remainder of this submap, from "offset" to the end */
1700 (void) vm_deallocate(submap,
1701 submap_offset,
1702 submap_end - submap_base - submap_offset);
1703 /* if nothing to keep in data submap, delete it all */
1704 if (submap == text_submap) {
1705 submap = data_submap;
1706 submap_offset = 0;
1707 submap_base = sm_info->client_base + sm_info->text_size;
1708 submap_end = submap_base + sm_info->data_size;
1709 (void) vm_deallocate(data_submap,
1710 0,
1711 submap_end - submap_base);
1712 }
1713 }
1714
1715 kr = KERN_SUCCESS;
1716 return kr;
1717}
1718
1c79356b
A
1719/* A hash lookup function for the list of loaded files in */
1720/* shared_memory_server space. */
1721
9bccf70c 1722static load_struct_t *
1c79356b
A
1723lsf_hash_lookup(
1724 queue_head_t *hash_table,
1725 void *file_object,
5d5c5d0d 1726 vm_offset_t recognizableOffset,
1c79356b 1727 int size,
91447636 1728 boolean_t regular,
1c79356b
A
1729 boolean_t alternate,
1730 shared_region_task_mappings_t sm_info)
1731{
1732 register queue_t bucket;
1733 load_struct_t *entry;
1734 shared_region_mapping_t target_region;
1735 int depth;
1736
91447636
A
1737 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1738 "reg=%d alt=%d sm_info=%p\n",
1739 hash_table, file_object, recognizableOffset, size,
1740 regular, alternate, sm_info));
1741
1c79356b
A
1742 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1743 for (entry = (load_struct_t *)queue_first(bucket);
1744 !queue_end(bucket, &entry->links);
1745 entry = (load_struct_t *)queue_next(&entry->links)) {
55e303ae 1746
55e303ae
A
1747 if ((entry->file_object == (int)file_object) &&
1748 (entry->file_offset == recognizableOffset)) {
1c79356b
A
1749 target_region = (shared_region_mapping_t)sm_info->self;
1750 depth = target_region->depth;
1751 while(target_region) {
1752 if((!(sm_info->self)) ||
1753 ((target_region == entry->regions_instance) &&
1754 (target_region->depth >= entry->depth))) {
91447636
A
1755 if(alternate &&
1756 entry->base_address >= sm_info->alternate_base) {
1757 LSF_DEBUG(("lsf_hash_lookup: "
1758 "alt=%d found entry %p "
1759 "(base=0x%x "
1760 "alt_base=0x%x)\n",
1761 alternate, entry,
1762 entry->base_address,
1763 sm_info->alternate_base));
1764 return entry;
1765 }
1766 if (regular &&
1767 entry->base_address < sm_info->alternate_base) {
1768 LSF_DEBUG(("lsf_hash_lookup: "
1769 "reg=%d found entry %p "
1770 "(base=0x%x "
1771 "alt_base=0x%x)\n",
1772 regular, entry,
1773 entry->base_address,
1774 sm_info->alternate_base));
1775 return entry;
1c79356b
A
1776 }
1777 }
1778 if(target_region->object_chain) {
1779 target_region = (shared_region_mapping_t)
1780 target_region->object_chain->object_chain_region;
1781 depth = target_region->object_chain->depth;
1782 } else {
1783 target_region = NULL;
1784 }
1785 }
1786 }
1787 }
1788
91447636
A
1789 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1790 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1791 hash_table, file_object, recognizableOffset, size,
1792 regular, alternate, sm_info));
1c79356b
A
1793 return (load_struct_t *)0;
1794}
1795
55e303ae
A
1796__private_extern__ load_struct_t *
1797lsf_remove_regions_mappings_lock(
1c79356b 1798 shared_region_mapping_t region,
55e303ae 1799 shared_region_task_mappings_t sm_info,
91447636 1800 int need_sfh_lock)
1c79356b
A
1801{
1802 int i;
1803 register queue_t bucket;
1804 shared_file_info_t *shared_file_header;
1805 load_struct_t *entry;
1806 load_struct_t *next_entry;
1c79356b
A
1807
1808 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1809
91447636
A
1810 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1811 "sfh=%p\n",
1812 region, sm_info, shared_file_header));
1813 if (need_sfh_lock)
55e303ae 1814 mutex_lock(&shared_file_header->lock);
1c79356b 1815 if(shared_file_header->hash_init == FALSE) {
91447636 1816 if (need_sfh_lock)
55e303ae 1817 mutex_unlock(&shared_file_header->lock);
91447636
A
1818 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1819 "(region=%p,sm_info=%p): not inited\n",
1820 region, sm_info));
1c79356b
A
1821 return NULL;
1822 }
1823 for(i = 0; i<shared_file_header->hash_size; i++) {
1824 bucket = &shared_file_header->hash[i];
1825 for (entry = (load_struct_t *)queue_first(bucket);
1826 !queue_end(bucket, &entry->links);) {
1827 next_entry = (load_struct_t *)queue_next(&entry->links);
1828 if(region == entry->regions_instance) {
91447636
A
1829 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1830 "entry %p region %p: "
1831 "unloading\n",
1832 entry, region));
1833 lsf_unload((void *)entry->file_object,
1c79356b 1834 entry->base_address, sm_info);
91447636
A
1835 } else {
1836 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1837 "entry %p region %p target region %p: "
1838 "not unloading\n",
1839 entry, entry->regions_instance, region));
1c79356b 1840 }
91447636 1841
1c79356b
A
1842 entry = next_entry;
1843 }
1844 }
91447636 1845 if (need_sfh_lock)
55e303ae 1846 mutex_unlock(&shared_file_header->lock);
91447636
A
1847 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1848
1849 return NULL; /* XXX */
55e303ae
A
1850}
1851
1852/*
1853 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1854 * only caller. Remove this stub function and the corresponding symbol
1855 * export for Merlot.
1856 */
1857load_struct_t *
1858lsf_remove_regions_mappings(
1859 shared_region_mapping_t region,
1860 shared_region_task_mappings_t sm_info)
1861{
1862 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1c79356b
A
1863}
1864
1865/* Removes a map_list, (list of loaded extents) for a file from */
1866/* the loaded file hash table. */
1867
9bccf70c 1868static load_struct_t *
1c79356b 1869lsf_hash_delete(
5d5c5d0d 1870 load_struct_t *target_entry, /* optional: NULL if not relevant */
1c79356b
A
1871 void *file_object,
1872 vm_offset_t base_offset,
1873 shared_region_task_mappings_t sm_info)
1874{
1875 register queue_t bucket;
1876 shared_file_info_t *shared_file_header;
1877 load_struct_t *entry;
91447636 1878
5d5c5d0d
A
1879 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
1880 target_entry, file_object, base_offset, sm_info));
1c79356b
A
1881
1882 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1883
1884 bucket = &shared_file_header->hash
1885 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1886
1887 for (entry = (load_struct_t *)queue_first(bucket);
1888 !queue_end(bucket, &entry->links);
1889 entry = (load_struct_t *)queue_next(&entry->links)) {
1890 if((!(sm_info->self)) || ((shared_region_mapping_t)
1891 sm_info->self == entry->regions_instance)) {
5d5c5d0d
A
1892 if ((target_entry == NULL ||
1893 entry == target_entry) &&
1894 (entry->file_object == (int) file_object) &&
1895 (entry->base_address == base_offset)) {
1c79356b
A
1896 queue_remove(bucket, entry,
1897 load_struct_ptr_t, links);
91447636 1898 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1c79356b
A
1899 return entry;
1900 }
1901 }
1902 }
1903
91447636 1904 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1c79356b
A
1905 return (load_struct_t *)0;
1906}
1907
1908/* Inserts a new map_list, (list of loaded file extents), into the */
1909/* server loaded file hash table. */
1910
9bccf70c 1911static void
1c79356b
A
1912lsf_hash_insert(
1913 load_struct_t *entry,
1914 shared_region_task_mappings_t sm_info)
1915{
1916 shared_file_info_t *shared_file_header;
1917
91447636
A
1918 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1919 entry, sm_info, entry->file_object, entry->base_address));
1920
1c79356b
A
1921 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1922 queue_enter(&shared_file_header->hash
1923 [load_file_hash(entry->file_object,
1924 shared_file_header->hash_size)],
1925 entry, load_struct_ptr_t, links);
1926}
1927
91447636
A
1928
1929
1930/*
1931 * lsf_slide:
1932 *
1933 * Look in the shared region, starting from the end, for a place to fit all the
1934 * mappings while respecting their relative offsets.
1935 */
1936static kern_return_t
1937lsf_slide(
1938 unsigned int map_cnt,
1939 struct shared_file_mapping_np *mappings_in,
1940 shared_region_task_mappings_t sm_info,
1941 mach_vm_offset_t *base_offset_p)
1942{
1943 mach_vm_offset_t max_mapping_offset;
1944 int i;
1945 vm_map_entry_t map_entry, prev_entry, next_entry;
1946 mach_vm_offset_t prev_hole_start, prev_hole_end;
1947 mach_vm_offset_t mapping_offset, mapping_end_offset;
1948 mach_vm_offset_t base_offset;
1949 mach_vm_size_t mapping_size;
1950 mach_vm_offset_t wiggle_room, wiggle;
1951 vm_map_t text_map, data_map, map;
1952 vm_named_entry_t region_entry;
1953 ipc_port_t region_handle;
1954 kern_return_t kr;
1955
1956 struct shared_file_mapping_np *mappings, tmp_mapping;
1957 unsigned int sort_index, sorted_index;
1958 vm_map_offset_t sort_min_address;
1959 unsigned int sort_min_index;
1960
1961 /*
1962 * Sort the mappings array, so that we can try and fit them in
1963 * in the right order as we progress along the VM maps.
1964 *
1965 * We can't modify the original array (the original order is
1966 * important when doing lookups of the mappings), so copy it first.
1967 */
1968
1969 kr = kmem_alloc(kernel_map,
1970 (vm_offset_t *) &mappings,
1971 (vm_size_t) (map_cnt * sizeof (mappings[0])));
1972 if (kr != KERN_SUCCESS) {
1973 return KERN_NO_SPACE;
1974 }
1975
1976 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
1977
1978 max_mapping_offset = 0;
1979 for (sorted_index = 0;
1980 sorted_index < map_cnt;
1981 sorted_index++) {
1982
1983 /* first remaining entry is our new starting point */
1984 sort_min_index = sorted_index;
1985 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
1986 SHARED_TEXT_REGION_MASK) +
1987 mappings[sort_min_index].sfm_size);
1988 sort_min_address = mapping_end_offset;
1989 /* compute the highest mapping_offset as well... */
1990 if (mapping_end_offset > max_mapping_offset) {
1991 max_mapping_offset = mapping_end_offset;
1992 }
1993 /* find the lowest mapping_offset in the remaining entries */
1994 for (sort_index = sorted_index + 1;
1995 sort_index < map_cnt;
1996 sort_index++) {
1997
1998 mapping_end_offset =
1999 ((mappings[sort_index].sfm_address &
2000 SHARED_TEXT_REGION_MASK) +
2001 mappings[sort_index].sfm_size);
2002
2003 if (mapping_end_offset < sort_min_address) {
2004 /* lowest mapping_offset so far... */
2005 sort_min_index = sort_index;
2006 sort_min_address = mapping_end_offset;
2007 }
2008 }
2009 if (sort_min_index != sorted_index) {
2010 /* swap entries */
2011 tmp_mapping = mappings[sort_min_index];
2012 mappings[sort_min_index] = mappings[sorted_index];
2013 mappings[sorted_index] = tmp_mapping;
2014 }
2015
2016 }
2017
2018 max_mapping_offset = vm_map_round_page(max_mapping_offset);
2019
2020 /* start from the end of the shared area */
2021 base_offset = sm_info->text_size;
2022
2023 /* can all the mappings fit ? */
2024 if (max_mapping_offset > base_offset) {
2025 kmem_free(kernel_map,
2026 (vm_offset_t) mappings,
2027 map_cnt * sizeof (mappings[0]));
2028 return KERN_FAILURE;
2029 }
2030
2031 /*
2032 * Align the last mapping to the end of the submaps
2033 * and start from there.
2034 */
2035 base_offset -= max_mapping_offset;
2036
2037 region_handle = (ipc_port_t) sm_info->text_region;
2038 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2039 text_map = region_entry->backing.map;
2040
2041 region_handle = (ipc_port_t) sm_info->data_region;
2042 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2043 data_map = region_entry->backing.map;
2044
2045 vm_map_lock_read(text_map);
2046 vm_map_lock_read(data_map);
2047
2048start_over:
2049 /*
2050 * At first, we can wiggle all the way from our starting point
2051 * (base_offset) towards the start of the map (0), if needed.
2052 */
2053 wiggle_room = base_offset;
2054
2055 for (i = (signed) map_cnt - 1; i >= 0; i--) {
5d5c5d0d
A
2056 if (mappings[i].sfm_size == 0) {
2057 /* nothing to map here... */
2058 continue;
2059 }
91447636
A
2060 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2061 /* copy-on-write mappings are in the data submap */
2062 map = data_map;
2063 } else {
2064 /* other mappings are in the text submap */
2065 map = text_map;
2066 }
2067 /* get the offset within the appropriate submap */
2068 mapping_offset = (mappings[i].sfm_address &
2069 SHARED_TEXT_REGION_MASK);
2070 mapping_size = mappings[i].sfm_size;
2071 mapping_end_offset = mapping_offset + mapping_size;
2072 mapping_offset = vm_map_trunc_page(mapping_offset);
2073 mapping_end_offset = vm_map_round_page(mapping_end_offset);
2074 mapping_size = mapping_end_offset - mapping_offset;
2075
2076 for (;;) {
2077 if (vm_map_lookup_entry(map,
2078 base_offset + mapping_offset,
2079 &map_entry)) {
2080 /*
2081 * The start address for that mapping
2082 * is already mapped: no fit.
2083 * Locate the hole immediately before this map
2084 * entry.
2085 */
2086 prev_hole_end = map_entry->vme_start;
2087 prev_entry = map_entry->vme_prev;
2088 if (prev_entry == vm_map_to_entry(map)) {
2089 /* no previous entry */
2090 prev_hole_start = map->min_offset;
2091 } else {
2092 /* previous entry ends here */
2093 prev_hole_start = prev_entry->vme_end;
2094 }
2095 } else {
2096 /*
2097 * The start address for that mapping is not
2098 * mapped.
2099 * Locate the start and end of the hole
2100 * at that location.
2101 */
2102 /* map_entry is the previous entry */
2103 if (map_entry == vm_map_to_entry(map)) {
2104 /* no previous entry */
2105 prev_hole_start = map->min_offset;
2106 } else {
2107 /* previous entry ends there */
2108 prev_hole_start = map_entry->vme_end;
2109 }
2110 next_entry = map_entry->vme_next;
2111 if (next_entry == vm_map_to_entry(map)) {
2112 /* no next entry */
2113 prev_hole_end = map->max_offset;
2114 } else {
2115 prev_hole_end = next_entry->vme_start;
2116 }
2117 }
2118
2119 if (prev_hole_end <= base_offset + mapping_offset) {
2120 /* hole is to our left: try and wiggle to fit */
2121 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
2122 if (wiggle > base_offset) {
2123 /* we're getting out of the map */
2124 kr = KERN_FAILURE;
2125 goto done;
2126 }
2127 base_offset -= wiggle;
2128 if (wiggle > wiggle_room) {
2129 /* can't wiggle that much: start over */
2130 goto start_over;
2131 }
2132 /* account for the wiggling done */
2133 wiggle_room -= wiggle;
2134 }
2135
2136 if (prev_hole_end >
2137 base_offset + mapping_offset + mapping_size) {
2138 /*
2139 * The hole extends further to the right
2140 * than what we need. Ignore the extra space.
2141 */
2142 prev_hole_end = (base_offset + mapping_offset +
2143 mapping_size);
2144 }
2145
2146 if (prev_hole_end <
2147 base_offset + mapping_offset + mapping_size) {
2148 /*
2149 * The hole is not big enough to establish
2150 * the mapping right there: wiggle towards
2151 * the beginning of the hole so that the end
2152 * of our mapping fits in the hole...
2153 */
2154 wiggle = base_offset + mapping_offset
2155 + mapping_size - prev_hole_end;
2156 if (wiggle > base_offset) {
2157 /* we're getting out of the map */
2158 kr = KERN_FAILURE;
2159 goto done;
2160 }
2161 base_offset -= wiggle;
2162 if (wiggle > wiggle_room) {
2163 /* can't wiggle that much: start over */
2164 goto start_over;
2165 }
2166 /* account for the wiggling done */
2167 wiggle_room -= wiggle;
2168
2169 /* keep searching from this new base */
2170 continue;
2171 }
2172
2173 if (prev_hole_start > base_offset + mapping_offset) {
2174 /* no hole found: keep looking */
2175 continue;
2176 }
2177
2178 /* compute wiggling room at this hole */
2179 wiggle = base_offset + mapping_offset - prev_hole_start;
2180 if (wiggle < wiggle_room) {
2181 /* less wiggle room than before... */
2182 wiggle_room = wiggle;
2183 }
2184
2185 /* found a hole that fits: skip to next mapping */
2186 break;
2187 } /* while we look for a hole */
2188 } /* for each mapping */
2189
2190 *base_offset_p = base_offset;
2191 kr = KERN_SUCCESS;
2192
2193done:
2194 vm_map_unlock_read(text_map);
2195 vm_map_unlock_read(data_map);
2196
2197 kmem_free(kernel_map,
2198 (vm_offset_t) mappings,
2199 map_cnt * sizeof (mappings[0]));
2200
2201 return kr;
2202}
2203
2204/*
2205 * lsf_map:
2206 *
2207 * Attempt to establish the mappings for a split library into the shared region.
2208 */
2209static kern_return_t
2210lsf_map(
2211 struct shared_file_mapping_np *mappings,
2212 int map_cnt,
2213 void *file_control,
2214 memory_object_offset_t file_size,
2215 shared_region_task_mappings_t sm_info,
2216 mach_vm_offset_t base_offset,
2217 mach_vm_offset_t *slide_p)
2218{
2219 load_struct_t *entry;
2220 loaded_mapping_t *file_mapping;
2221 loaded_mapping_t **tptr;
2222 ipc_port_t region_handle;
2223 vm_named_entry_t region_entry;
2224 mach_port_t map_port;
2225 vm_object_t file_object;
2226 kern_return_t kr;
2227 int i;
2228 mach_vm_offset_t original_base_offset;
5d5c5d0d 2229 mach_vm_size_t total_size;
91447636
A
2230
2231 /* get the VM object from the file's memory object handle */
2232 file_object = memory_object_control_to_vm_object(file_control);
2233
2234 original_base_offset = base_offset;
2235
2236 LSF_DEBUG(("lsf_map"
2237 "(cnt=%d,file=%p,sm_info=%p)"
2238 "\n",
2239 map_cnt, file_object,
2240 sm_info));
2241
2242restart_after_slide:
2243 /* get a new "load_struct_t" to described the mappings for that file */
2244 entry = (load_struct_t *)zalloc(lsf_zone);
2245 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
2246 LSF_DEBUG(("lsf_map"
2247 "(cnt=%d,file=%p,sm_info=%p) "
2248 "entry=%p\n",
2249 map_cnt, file_object,
2250 sm_info, entry));
2251 if (entry == NULL) {
5d5c5d0d
A
2252 SHARED_REGION_TRACE(
2253 SHARED_REGION_TRACE_ERROR,
2254 ("shared_region: %p: "
2255 "lsf_map: unable to allocate entry\n",
2256 current_thread()));
91447636
A
2257 return KERN_NO_SPACE;
2258 }
2259 shared_file_available_hash_ele--;
2260 entry->file_object = (int)file_object;
2261 entry->mapping_cnt = map_cnt;
2262 entry->mappings = NULL;
2263 entry->links.prev = (queue_entry_t) 0;
2264 entry->links.next = (queue_entry_t) 0;
2265 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2266 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2267 entry->file_offset = mappings[0].sfm_file_offset;
2268
2269 /* insert the new file entry in the hash table, for later lookups */
2270 lsf_hash_insert(entry, sm_info);
2271
2272 /* where we should add the next mapping description for that file */
2273 tptr = &(entry->mappings);
2274
2275 entry->base_address = base_offset;
5d5c5d0d 2276 total_size = 0;
91447636
A
2277
2278 /* establish each requested mapping */
2279 for (i = 0; i < map_cnt; i++) {
2280 mach_vm_offset_t target_address;
2281 mach_vm_offset_t region_mask;
2282
2283 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2284 region_handle = (ipc_port_t)sm_info->data_region;
2285 region_mask = SHARED_DATA_REGION_MASK;
2286 if ((((mappings[i].sfm_address + base_offset)
2287 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2288 (((mappings[i].sfm_address + base_offset +
2289 mappings[i].sfm_size - 1)
2290 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
5d5c5d0d
A
2291 SHARED_REGION_TRACE(
2292 SHARED_REGION_TRACE_ERROR,
2293 ("shared_region: %p: lsf_map: "
2294 "RW mapping #%d not in segment",
2295 current_thread(), i));
2296 shared_region_dump_mappings(
2297 SHARED_REGION_TRACE_ERROR,
2298 mappings, map_cnt, base_offset);
2299
2300 lsf_deallocate(entry,
2301 file_object,
2302 entry->base_address,
2303 sm_info,
2304 TRUE);
91447636
A
2305 return KERN_INVALID_ARGUMENT;
2306 }
2307 } else {
2308 region_mask = SHARED_TEXT_REGION_MASK;
2309 region_handle = (ipc_port_t)sm_info->text_region;
2310 if (((mappings[i].sfm_address + base_offset)
2311 & GLOBAL_SHARED_SEGMENT_MASK) ||
2312 ((mappings[i].sfm_address + base_offset +
2313 mappings[i].sfm_size - 1)
2314 & GLOBAL_SHARED_SEGMENT_MASK)) {
5d5c5d0d
A
2315 SHARED_REGION_TRACE(
2316 SHARED_REGION_TRACE_ERROR,
2317 ("shared_region: %p: lsf_map: "
2318 "RO mapping #%d not in segment",
2319 current_thread(), i));
2320 shared_region_dump_mappings(
2321 SHARED_REGION_TRACE_ERROR,
2322 mappings, map_cnt, base_offset);
2323
2324 lsf_deallocate(entry,
2325 file_object,
2326 entry->base_address,
2327 sm_info,
2328 TRUE);
91447636
A
2329 return KERN_INVALID_ARGUMENT;
2330 }
2331 }
2332 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2333 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2334 (file_size))) {
5d5c5d0d
A
2335 SHARED_REGION_TRACE(
2336 SHARED_REGION_TRACE_ERROR,
2337 ("shared_region: %p: lsf_map: "
2338 "ZF mapping #%d beyond EOF",
2339 current_thread(), i));
2340 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR,
2341 mappings, map_cnt,
2342 base_offset);
2343
2344
2345 lsf_deallocate(entry,
2346 file_object,
2347 entry->base_address,
2348 sm_info,
2349 TRUE);
91447636
A
2350 return KERN_INVALID_ARGUMENT;
2351 }
2352 target_address = entry->base_address +
2353 ((mappings[i].sfm_address) & region_mask);
2354 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2355 map_port = MACH_PORT_NULL;
2356 } else {
2357 map_port = (ipc_port_t) file_object->pager;
2358 }
2359 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2360
5d5c5d0d
A
2361 total_size += mappings[i].sfm_size;
2362 if (mappings[i].sfm_size == 0) {
2363 /* nothing to map... */
2364 kr = KERN_SUCCESS;
2365 } else {
2366 kr = mach_vm_map(
2367 region_entry->backing.map,
91447636
A
2368 &target_address,
2369 vm_map_round_page(mappings[i].sfm_size),
2370 0,
2371 VM_FLAGS_FIXED,
2372 map_port,
2373 mappings[i].sfm_file_offset,
2374 TRUE,
2375 (mappings[i].sfm_init_prot &
2376 (VM_PROT_READ|VM_PROT_EXECUTE)),
2377 (mappings[i].sfm_max_prot &
2378 (VM_PROT_READ|VM_PROT_EXECUTE)),
5d5c5d0d
A
2379 VM_INHERIT_DEFAULT);
2380 }
2381 if (kr != KERN_SUCCESS) {
2382 vm_offset_t old_base_address;
2383
2384 old_base_address = entry->base_address;
2385 lsf_deallocate(entry,
2386 file_object,
2387 entry->base_address,
2388 sm_info,
2389 TRUE);
2390 entry = NULL;
91447636
A
2391
2392 if (slide_p != NULL) {
2393 /*
2394 * Requested mapping failed but the caller
2395 * is OK with sliding the library in the
2396 * shared region, so let's try and slide it...
2397 */
2398
5d5c5d0d
A
2399 SHARED_REGION_TRACE(
2400 SHARED_REGION_TRACE_CONFLICT,
2401 ("shared_region: %p: lsf_map: "
2402 "mapping #%d failed to map, "
2403 "kr=0x%x, sliding...\n",
2404 current_thread(), i, kr));
2405 shared_region_dump_mappings(
2406 SHARED_REGION_TRACE_INFO,
2407 mappings, map_cnt, base_offset);
2408 shared_region_dump_conflict_info(
2409 SHARED_REGION_TRACE_CONFLICT,
2410 region_entry->backing.map,
2411 (old_base_address +
2412 ((mappings[i].sfm_address)
2413 & region_mask)),
2414 vm_map_round_page(mappings[i].sfm_size));
2415
91447636
A
2416 /* lookup an appropriate spot */
2417 kr = lsf_slide(map_cnt, mappings,
2418 sm_info, &base_offset);
2419 if (kr == KERN_SUCCESS) {
2420 /* try and map it there ... */
91447636
A
2421 goto restart_after_slide;
2422 }
2423 /* couldn't slide ... */
2424 }
5d5c5d0d
A
2425
2426 SHARED_REGION_TRACE(
2427 SHARED_REGION_TRACE_CONFLICT,
2428 ("shared_region: %p: lsf_map: "
2429 "mapping #%d failed to map, "
2430 "kr=0x%x, no sliding\n",
2431 current_thread(), i, kr));
2432 shared_region_dump_mappings(
2433 SHARED_REGION_TRACE_INFO,
2434 mappings, map_cnt, base_offset);
2435 shared_region_dump_conflict_info(
2436 SHARED_REGION_TRACE_CONFLICT,
2437 region_entry->backing.map,
2438 (old_base_address +
2439 ((mappings[i].sfm_address)
2440 & region_mask)),
2441 vm_map_round_page(mappings[i].sfm_size));
91447636
A
2442 return KERN_FAILURE;
2443 }
2444
2445 /* record this mapping */
1c79356b 2446 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
91447636 2447 if (file_mapping == NULL) {
5d5c5d0d
A
2448 lsf_deallocate(entry,
2449 file_object,
2450 entry->base_address,
2451 sm_info,
2452 TRUE);
2453 SHARED_REGION_TRACE(
2454 SHARED_REGION_TRACE_ERROR,
2455 ("shared_region: %p: "
2456 "lsf_map: unable to allocate mapping\n",
2457 current_thread()));
91447636
A
2458 return KERN_NO_SPACE;
2459 }
1c79356b 2460 shared_file_available_hash_ele--;
91447636 2461 file_mapping->mapping_offset = (mappings[i].sfm_address)
1c79356b 2462 & region_mask;
91447636
A
2463 file_mapping->size = mappings[i].sfm_size;
2464 file_mapping->file_offset = mappings[i].sfm_file_offset;
2465 file_mapping->protection = mappings[i].sfm_init_prot;
1c79356b 2466 file_mapping->next = NULL;
91447636
A
2467 LSF_DEBUG(("lsf_map: file_mapping %p "
2468 "for offset=0x%x size=0x%x\n",
2469 file_mapping, file_mapping->mapping_offset,
2470 file_mapping->size));
2471
2472 /* and link it to the file entry */
1c79356b 2473 *tptr = file_mapping;
91447636
A
2474
2475 /* where to put the next mapping's description */
1c79356b
A
2476 tptr = &(file_mapping->next);
2477 }
91447636
A
2478
2479 if (slide_p != NULL) {
2480 *slide_p = base_offset - original_base_offset;
2481 }
2482
5d5c5d0d
A
2483 if ((sm_info->flags & SHARED_REGION_STANDALONE) ||
2484 (total_size == 0)) {
91447636 2485 /*
5d5c5d0d
A
2486 * Two cases:
2487 * 1. we have a standalone and private shared region, so we
91447636
A
2488 * don't really need to keep the information about each file
2489 * and each mapping. Just deallocate it all.
5d5c5d0d
A
2490 * 2. the total size of the mappings is 0, so nothing at all
2491 * was mapped. Let's not waste kernel resources to describe
2492 * nothing.
2493 *
91447636
A
2494 * XXX we still have the hash table, though...
2495 */
5d5c5d0d 2496 lsf_deallocate(entry, file_object, entry->base_address, sm_info,
91447636
A
2497 FALSE);
2498 }
2499
2500 LSF_DEBUG(("lsf_map: done\n"));
2501 return KERN_SUCCESS;
1c79356b
A
2502}
2503
2504
2505/* finds the file_object extent list in the shared memory hash table */
2506/* If one is found the associated extents in shared memory are deallocated */
2507/* and the extent list is freed */
2508
9bccf70c 2509static void
1c79356b
A
2510lsf_unload(
2511 void *file_object,
2512 vm_offset_t base_offset,
2513 shared_region_task_mappings_t sm_info)
91447636 2514{
5d5c5d0d 2515 lsf_deallocate(NULL, file_object, base_offset, sm_info, TRUE);
91447636
A
2516}
2517
2518/*
2519 * lsf_deallocate:
2520 *
2521 * Deallocates all the "shared region" internal data structures describing
2522 * the file and its mappings.
2523 * Also deallocate the actual file mappings if requested ("unload" arg).
2524 */
2525static void
2526lsf_deallocate(
5d5c5d0d 2527 load_struct_t *target_entry,
91447636
A
2528 void *file_object,
2529 vm_offset_t base_offset,
2530 shared_region_task_mappings_t sm_info,
2531 boolean_t unload)
1c79356b
A
2532{
2533 load_struct_t *entry;
1c79356b
A
2534 loaded_mapping_t *map_ele;
2535 loaded_mapping_t *back_ptr;
5d5c5d0d 2536 kern_return_t kr;
1c79356b 2537
5d5c5d0d
A
2538 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2539 target_entry, file_object, base_offset, sm_info, unload));
2540 entry = lsf_hash_delete(target_entry,
2541 file_object,
2542 base_offset,
2543 sm_info);
2544 if (entry) {
1c79356b
A
2545 map_ele = entry->mappings;
2546 while(map_ele != NULL) {
91447636
A
2547 if (unload) {
2548 ipc_port_t region_handle;
2549 vm_named_entry_t region_entry;
2550
2551 if(map_ele->protection & VM_PROT_COW) {
2552 region_handle = (ipc_port_t)
2553 sm_info->data_region;
2554 } else {
2555 region_handle = (ipc_port_t)
2556 sm_info->text_region;
2557 }
2558 region_entry = (vm_named_entry_t)
2559 region_handle->ip_kobject;
2560
5d5c5d0d
A
2561 kr = vm_deallocate(region_entry->backing.map,
2562 (entry->base_address +
2563 map_ele->mapping_offset),
2564 map_ele->size);
2565 assert(kr == KERN_SUCCESS);
1c79356b 2566 }
1c79356b
A
2567 back_ptr = map_ele;
2568 map_ele = map_ele->next;
91447636
A
2569 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2570 "offset 0x%x size 0x%x\n",
2571 back_ptr, back_ptr->mapping_offset,
2572 back_ptr->size));
2573 zfree(lsf_zone, back_ptr);
5d5c5d0d 2574 shared_file_available_hash_ele++;
1c79356b 2575 }
91447636
A
2576 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2577 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2578 zfree(lsf_zone, entry);
1c79356b
A
2579 shared_file_available_hash_ele++;
2580 }
5d5c5d0d 2581 LSF_DEBUG(("lsf_deallocate: done\n"));
1c79356b 2582}
9bccf70c
A
2583
2584/* integer is from 1 to 100 and represents percent full */
2585unsigned int
91447636 2586lsf_mapping_pool_gauge(void)
9bccf70c
A
2587{
2588 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2589}