]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_memory_server.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
CommitLineData
1c79356b 1/*
89b3af67 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 *
30 * File: vm/vm_shared_memory_server.c
31 * Author: Chris Youngworth
32 *
33 * Support routines for an in-kernel shared memory allocator
34 */
35
91447636
A
36#include <debug.h>
37
38#include <mach/mach_types.h>
1c79356b
A
39#include <mach/kern_return.h>
40#include <mach/vm_inherit.h>
91447636 41#include <mach/vm_map.h>
55e303ae 42#include <machine/cpu_capabilities.h>
91447636
A
43
44#include <kern/kern_types.h>
45#include <kern/ipc_kobject.h>
46#include <kern/thread.h>
47#include <kern/zalloc.h>
48#include <kern/kalloc.h>
49
50#include <ipc/ipc_types.h>
51#include <ipc/ipc_port.h>
52
1c79356b
A
53#include <vm/vm_kern.h>
54#include <vm/vm_map.h>
55#include <vm/vm_page.h>
89b3af67 56#include <vm/vm_protos.h>
1c79356b 57
91447636 58#include <mach/mach_vm.h>
9bccf70c
A
59#include <mach/shared_memory_server.h>
60#include <vm/vm_shared_memory_server.h>
61
89b3af67
A
62int shared_region_trace_level = SHARED_REGION_TRACE_ERROR;
63
91447636
A
64#if DEBUG
65int lsf_debug = 0;
66int lsf_alloc_debug = 0;
67#define LSF_DEBUG(args) \
68 MACRO_BEGIN \
69 if (lsf_debug) { \
70 kprintf args; \
71 } \
72 MACRO_END
73#define LSF_ALLOC_DEBUG(args) \
74 MACRO_BEGIN \
75 if (lsf_alloc_debug) { \
76 kprintf args; \
77 } \
78 MACRO_END
79#else /* DEBUG */
80#define LSF_DEBUG(args)
81#define LSF_ALLOC_DEBUG(args)
82#endif /* DEBUG */
83
9bccf70c 84/* forward declarations */
91447636
A
85static kern_return_t
86shared_region_object_create(
87 vm_size_t size,
88 ipc_port_t *object_handle);
89
90static kern_return_t
91shared_region_mapping_dealloc_lock(
92 shared_region_mapping_t shared_region,
93 int need_sfh_lock,
94 int need_drl_lock);
95
96
9bccf70c
A
97static kern_return_t
98shared_file_init(
91447636 99 ipc_port_t *text_region_handle,
9bccf70c 100 vm_size_t text_region_size,
91447636 101 ipc_port_t *data_region_handle,
9bccf70c 102 vm_size_t data_region_size,
91447636
A
103 vm_offset_t *file_mapping_array);
104
105static kern_return_t
106shared_file_header_init(
107 shared_file_info_t *shared_file_header);
9bccf70c
A
108
109static load_struct_t *
110lsf_hash_lookup(
111 queue_head_t *hash_table,
112 void *file_object,
55e303ae 113 vm_offset_t recognizableOffset,
9bccf70c 114 int size,
91447636 115 boolean_t regular,
9bccf70c
A
116 boolean_t alternate,
117 shared_region_task_mappings_t sm_info);
118
119static load_struct_t *
120lsf_hash_delete(
89b3af67 121 load_struct_t *target_entry, /* optional */
9bccf70c
A
122 void *file_object,
123 vm_offset_t base_offset,
124 shared_region_task_mappings_t sm_info);
125
126static void
127lsf_hash_insert(
128 load_struct_t *entry,
129 shared_region_task_mappings_t sm_info);
130
91447636
A
131static kern_return_t
132lsf_slide(
133 unsigned int map_cnt,
134 struct shared_file_mapping_np *mappings,
135 shared_region_task_mappings_t sm_info,
136 mach_vm_offset_t *base_offset_p);
137
138static kern_return_t
139lsf_map(
140 struct shared_file_mapping_np *mappings,
141 int map_cnt,
142 void *file_control,
143 memory_object_size_t file_size,
144 shared_region_task_mappings_t sm_info,
145 mach_vm_offset_t base_offset,
146 mach_vm_offset_t *slide_p);
147
9bccf70c
A
148static void
149lsf_unload(
150 void *file_object,
151 vm_offset_t base_offset,
152 shared_region_task_mappings_t sm_info);
153
91447636
A
154static void
155lsf_deallocate(
89b3af67 156 load_struct_t *target_entry, /* optional */
91447636
A
157 void *file_object,
158 vm_offset_t base_offset,
159 shared_region_task_mappings_t sm_info,
160 boolean_t unload);
161
9bccf70c
A
162
163#define load_file_hash(file_object, size) \
164 ((((natural_t)file_object) & 0xffffff) % size)
1c79356b 165
9bccf70c 166/* Implementation */
1c79356b 167vm_offset_t shared_file_mapping_array = 0;
55e303ae
A
168
169shared_region_mapping_t default_environment_shared_regions = NULL;
170static decl_mutex_data(,default_regions_list_lock_data)
171
172#define default_regions_list_lock() \
173 mutex_lock(&default_regions_list_lock_data)
174#define default_regions_list_lock_try() \
175 mutex_try(&default_regions_list_lock_data)
176#define default_regions_list_unlock() \
177 mutex_unlock(&default_regions_list_lock_data)
178
1c79356b
A
179
180ipc_port_t sfma_handle = NULL;
181zone_t lsf_zone;
182
183int shared_file_available_hash_ele;
184
55e303ae 185/* com region support */
91447636
A
186ipc_port_t com_region_handle32 = NULL;
187ipc_port_t com_region_handle64 = NULL;
188vm_map_t com_region_map32 = NULL;
189vm_map_t com_region_map64 = NULL;
89b3af67
A
190vm_size_t com_region_size32 = _COMM_PAGE32_AREA_LENGTH;
191vm_size_t com_region_size64 = _COMM_PAGE64_AREA_LENGTH;
55e303ae
A
192shared_region_mapping_t com_mapping_resource = NULL;
193
91447636
A
194
195#if DEBUG
196int shared_region_debug = 0;
197#endif /* DEBUG */
198
199
200kern_return_t
201vm_get_shared_region(
202 task_t task,
203 shared_region_mapping_t *shared_region)
204{
205 *shared_region = (shared_region_mapping_t) task->system_shared_region;
206 if (*shared_region) {
207 assert((*shared_region)->ref_count > 0);
208 }
209 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
210 task, *shared_region));
211 return KERN_SUCCESS;
212}
213
214kern_return_t
215vm_set_shared_region(
216 task_t task,
217 shared_region_mapping_t shared_region)
218{
89b3af67
A
219 shared_region_mapping_t old_region;
220
91447636 221 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
89b3af67
A
222 "shared_region=%p[%x,%x,%x])\n",
223 task, shared_region,
224 shared_region ? shared_region->fs_base : 0,
225 shared_region ? shared_region->system : 0,
226 shared_region ? shared_region->flags : 0));
91447636
A
227 if (shared_region) {
228 assert(shared_region->ref_count > 0);
229 }
89b3af67
A
230
231 old_region = task->system_shared_region;
232 SHARED_REGION_TRACE(
233 SHARED_REGION_TRACE_INFO,
234 ("shared_region: %p set_region(task=%p)"
235 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
236 current_thread(), task,
237 old_region,
238 old_region ? old_region->fs_base : 0,
239 old_region ? old_region->system : 0,
240 old_region ? old_region->flags : 0,
241 shared_region,
242 shared_region ? shared_region->fs_base : 0,
243 shared_region ? shared_region->system : 0,
244 shared_region ? shared_region->flags : 0));
245
91447636
A
246 task->system_shared_region = shared_region;
247 return KERN_SUCCESS;
248}
249
250/*
251 * shared_region_object_chain_detach:
252 *
253 * Mark the shared region as being detached or standalone. This means
254 * that we won't keep track of which file is mapped and how, for this shared
255 * region. And we don't have a "shadow" shared region.
256 * This is used when we clone a private shared region and we intend to remove
257 * some mappings from it. It won't need to maintain mappings info because it's
258 * now private. It can't have a "shadow" shared region because we don't want
259 * to see the shadow of the mappings we're about to remove.
260 */
261void
262shared_region_object_chain_detached(
263 shared_region_mapping_t target_region)
264{
265 shared_region_mapping_lock(target_region);
266 target_region->flags |= SHARED_REGION_STANDALONE;
267 shared_region_mapping_unlock(target_region);
268}
269
270/*
271 * shared_region_object_chain_attach:
272 *
273 * Link "target_region" to "object_chain_region". "object_chain_region"
274 * is treated as a shadow of "target_region" for the purpose of looking up
275 * mappings. Since the "target_region" preserves all the mappings of the
276 * older "object_chain_region", we won't duplicate all the mappings info and
277 * we'll just lookup the next region in the "object_chain" if we can't find
278 * what we're looking for in the "target_region". See lsf_hash_lookup().
279 */
280kern_return_t
281shared_region_object_chain_attach(
282 shared_region_mapping_t target_region,
283 shared_region_mapping_t object_chain_region)
284{
285 shared_region_object_chain_t object_ele;
286
287 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
288 "target_region=%p, object_chain_region=%p\n",
289 target_region, object_chain_region));
290 assert(target_region->ref_count > 0);
291 assert(object_chain_region->ref_count > 0);
292 if(target_region->object_chain)
293 return KERN_FAILURE;
294 object_ele = (shared_region_object_chain_t)
295 kalloc(sizeof (struct shared_region_object_chain));
296 shared_region_mapping_lock(object_chain_region);
297 target_region->object_chain = object_ele;
298 object_ele->object_chain_region = object_chain_region;
299 object_ele->next = object_chain_region->object_chain;
300 object_ele->depth = object_chain_region->depth;
301 object_chain_region->depth++;
302 target_region->alternate_next = object_chain_region->alternate_next;
303 shared_region_mapping_unlock(object_chain_region);
304 return KERN_SUCCESS;
305}
306
307/* LP64todo - need 64-bit safe version */
308kern_return_t
309shared_region_mapping_create(
310 ipc_port_t text_region,
311 vm_size_t text_size,
312 ipc_port_t data_region,
313 vm_size_t data_size,
314 vm_offset_t region_mappings,
315 vm_offset_t client_base,
316 shared_region_mapping_t *shared_region,
317 vm_offset_t alt_base,
89b3af67
A
318 vm_offset_t alt_next,
319 int fs_base,
320 int system)
91447636
A
321{
322 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
323 *shared_region = (shared_region_mapping_t)
324 kalloc(sizeof (struct shared_region_mapping));
325 if(*shared_region == NULL) {
326 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
327 "failure\n"));
328 return KERN_FAILURE;
329 }
330 shared_region_mapping_lock_init((*shared_region));
331 (*shared_region)->text_region = text_region;
332 (*shared_region)->text_size = text_size;
89b3af67
A
333 (*shared_region)->fs_base = fs_base;
334 (*shared_region)->system = system;
91447636
A
335 (*shared_region)->data_region = data_region;
336 (*shared_region)->data_size = data_size;
337 (*shared_region)->region_mappings = region_mappings;
338 (*shared_region)->client_base = client_base;
339 (*shared_region)->ref_count = 1;
340 (*shared_region)->next = NULL;
341 (*shared_region)->object_chain = NULL;
342 (*shared_region)->self = *shared_region;
343 (*shared_region)->flags = 0;
344 (*shared_region)->depth = 0;
345 (*shared_region)->default_env_list = NULL;
346 (*shared_region)->alternate_base = alt_base;
347 (*shared_region)->alternate_next = alt_next;
348 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
349 *shared_region));
350 return KERN_SUCCESS;
351}
352
353/* LP64todo - need 64-bit safe version */
354kern_return_t
355shared_region_mapping_info(
356 shared_region_mapping_t shared_region,
357 ipc_port_t *text_region,
358 vm_size_t *text_size,
359 ipc_port_t *data_region,
360 vm_size_t *data_size,
361 vm_offset_t *region_mappings,
362 vm_offset_t *client_base,
363 vm_offset_t *alt_base,
364 vm_offset_t *alt_next,
365 unsigned int *fs_base,
366 unsigned int *system,
367 int *flags,
368 shared_region_mapping_t *next)
369{
370 shared_region_mapping_lock(shared_region);
371
372 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
373 shared_region));
374 assert(shared_region->ref_count > 0);
375 *text_region = shared_region->text_region;
376 *text_size = shared_region->text_size;
377 *data_region = shared_region->data_region;
378 *data_size = shared_region->data_size;
379 *region_mappings = shared_region->region_mappings;
380 *client_base = shared_region->client_base;
381 *alt_base = shared_region->alternate_base;
382 *alt_next = shared_region->alternate_next;
383 *flags = shared_region->flags;
384 *fs_base = shared_region->fs_base;
385 *system = shared_region->system;
386 *next = shared_region->next;
387
388 shared_region_mapping_unlock(shared_region);
89b3af67
A
389
390 return KERN_SUCCESS;
91447636
A
391}
392
393kern_return_t
394shared_region_mapping_ref(
395 shared_region_mapping_t shared_region)
396{
397 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
398 "ref_count=%d + 1\n",
399 shared_region,
400 shared_region ? shared_region->ref_count : 0));
401 if(shared_region == NULL)
402 return KERN_SUCCESS;
403 assert(shared_region->ref_count > 0);
404 hw_atomic_add(&shared_region->ref_count, 1);
405 return KERN_SUCCESS;
406}
407
408static kern_return_t
409shared_region_mapping_dealloc_lock(
410 shared_region_mapping_t shared_region,
411 int need_sfh_lock,
412 int need_drl_lock)
413{
414 struct shared_region_task_mappings sm_info;
415 shared_region_mapping_t next = NULL;
89b3af67 416 unsigned int ref_count;
91447636
A
417
418 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
419 "(shared_region=%p,%d,%d) ref_count=%d\n",
420 shared_region, need_sfh_lock, need_drl_lock,
421 shared_region ? shared_region->ref_count : 0));
422 while (shared_region) {
423 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
424 "ref_count=%d\n",
425 shared_region, shared_region->ref_count));
426 assert(shared_region->ref_count > 0);
427 if ((ref_count =
428 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
429 shared_region_mapping_lock(shared_region);
430
431 sm_info.text_region = shared_region->text_region;
432 sm_info.text_size = shared_region->text_size;
433 sm_info.data_region = shared_region->data_region;
434 sm_info.data_size = shared_region->data_size;
435 sm_info.region_mappings = shared_region->region_mappings;
436 sm_info.client_base = shared_region->client_base;
437 sm_info.alternate_base = shared_region->alternate_base;
438 sm_info.alternate_next = shared_region->alternate_next;
439 sm_info.flags = shared_region->flags;
440 sm_info.self = (vm_offset_t)shared_region;
441
442 if(shared_region->region_mappings) {
443 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
444 }
445 if(((vm_named_entry_t)
446 (shared_region->text_region->ip_kobject))
447 ->backing.map->pmap) {
448 pmap_remove(((vm_named_entry_t)
449 (shared_region->text_region->ip_kobject))
450 ->backing.map->pmap,
451 sm_info.client_base,
452 sm_info.client_base + sm_info.text_size);
453 }
454 ipc_port_release_send(shared_region->text_region);
455 if(shared_region->data_region)
456 ipc_port_release_send(shared_region->data_region);
457 if (shared_region->object_chain) {
458 next = shared_region->object_chain->object_chain_region;
459 kfree(shared_region->object_chain,
460 sizeof (struct shared_region_object_chain));
461 } else {
462 next = NULL;
463 }
464 shared_region_mapping_unlock(shared_region);
465 SHARED_REGION_DEBUG(
466 ("shared_region_mapping_dealloc_lock(%p): "
467 "freeing\n",
468 shared_region));
469 bzero((void *)shared_region,
470 sizeof (*shared_region)); /* FBDP debug */
471 kfree(shared_region,
472 sizeof (struct shared_region_mapping));
473 shared_region = next;
474 } else {
475 /* Stale indicates that a system region is no */
476 /* longer in the default environment list. */
477 if((ref_count == 1) &&
478 (shared_region->flags & SHARED_REGION_SYSTEM)
479 && !(shared_region->flags & SHARED_REGION_STALE)) {
480 SHARED_REGION_DEBUG(
481 ("shared_region_mapping_dealloc_lock"
482 "(%p): removing stale\n",
483 shared_region));
484 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
485 }
486 break;
487 }
488 }
489 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
490 shared_region));
491 return KERN_SUCCESS;
492}
493
494/*
495 * Stub function; always indicates that the lock needs to be taken in the
496 * call to lsf_remove_regions_mappings_lock().
497 */
498kern_return_t
499shared_region_mapping_dealloc(
500 shared_region_mapping_t shared_region)
501{
502 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
503 "(shared_region=%p)\n",
504 shared_region));
505 if (shared_region) {
506 assert(shared_region->ref_count > 0);
507 }
508 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
509}
510
511static
512kern_return_t
513shared_region_object_create(
514 vm_size_t size,
515 ipc_port_t *object_handle)
516{
517 vm_named_entry_t user_entry;
518 ipc_port_t user_handle;
519
520 ipc_port_t previous;
521 vm_map_t new_map;
522
523 user_entry = (vm_named_entry_t)
524 kalloc(sizeof (struct vm_named_entry));
525 if(user_entry == NULL) {
526 return KERN_FAILURE;
527 }
528 named_entry_lock_init(user_entry);
529 user_handle = ipc_port_alloc_kernel();
530
531
532 ip_lock(user_handle);
533
534 /* make a sonce right */
535 user_handle->ip_sorights++;
536 ip_reference(user_handle);
537
538 user_handle->ip_destination = IP_NULL;
539 user_handle->ip_receiver_name = MACH_PORT_NULL;
540 user_handle->ip_receiver = ipc_space_kernel;
541
542 /* make a send right */
543 user_handle->ip_mscount++;
544 user_handle->ip_srights++;
545 ip_reference(user_handle);
546
547 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
548 /* nsrequest unlocks user_handle */
549
550 /* Create a named object based on a submap of specified size */
551
89b3af67 552 new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
91447636
A
553 user_entry->backing.map = new_map;
554 user_entry->internal = TRUE;
555 user_entry->is_sub_map = TRUE;
556 user_entry->is_pager = FALSE;
557 user_entry->offset = 0;
558 user_entry->protection = VM_PROT_ALL;
559 user_entry->size = size;
560 user_entry->ref_count = 1;
561
562 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
563 IKOT_NAMED_ENTRY);
564 *object_handle = user_handle;
565 return KERN_SUCCESS;
566}
55e303ae
A
567
568/* called for the non-default, private branch shared region support */
569/* system default fields for fs_base and system supported are not */
570/* relevant as the system default flag is not set */
1c79356b
A
571kern_return_t
572shared_file_create_system_region(
89b3af67
A
573 shared_region_mapping_t *shared_region,
574 int fs_base,
575 int system)
1c79356b
A
576{
577 ipc_port_t text_handle;
578 ipc_port_t data_handle;
579 long text_size;
580 long data_size;
581 vm_offset_t mapping_array;
582 kern_return_t kret;
583
91447636
A
584 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
585
1c79356b
A
586 text_size = 0x10000000;
587 data_size = 0x10000000;
588
589 kret = shared_file_init(&text_handle,
590 text_size, &data_handle, data_size, &mapping_array);
91447636
A
591 if(kret) {
592 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
593 "shared_file_init failed kret=0x%x\n",
594 kret));
1c79356b 595 return kret;
91447636 596 }
89b3af67
A
597 kret = shared_region_mapping_create(text_handle, text_size,
598 data_handle, data_size,
599 mapping_array,
600 GLOBAL_SHARED_TEXT_SEGMENT,
601 shared_region,
602 SHARED_ALTERNATE_LOAD_BASE,
603 SHARED_ALTERNATE_LOAD_BASE,
604 fs_base,
605 system);
91447636
A
606 if(kret) {
607 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
608 "shared_region_mapping_create failed "
609 "kret=0x%x\n",
610 kret));
1c79356b 611 return kret;
91447636 612 }
1c79356b 613 (*shared_region)->flags = 0;
55e303ae
A
614 if(com_mapping_resource) {
615 shared_region_mapping_ref(com_mapping_resource);
616 (*shared_region)->next = com_mapping_resource;
617 }
618
91447636
A
619 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
620 "-> shared_region=%p\n",
621 *shared_region));
1c79356b
A
622 return KERN_SUCCESS;
623}
624
55e303ae
A
625/*
626 * load a new default for a specified environment into the default share
627 * regions list. If a previous default exists for the envrionment specification
628 * it is returned along with its reference. It is expected that the new
629 * sytem region structure passes a reference.
630 */
631
632shared_region_mapping_t
633update_default_shared_region(
634 shared_region_mapping_t new_system_region)
635{
636 shared_region_mapping_t old_system_region;
637 unsigned int fs_base;
638 unsigned int system;
639
91447636
A
640 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
641 new_system_region));
642 assert(new_system_region->ref_count > 0);
55e303ae
A
643 fs_base = new_system_region->fs_base;
644 system = new_system_region->system;
645 new_system_region->flags |= SHARED_REGION_SYSTEM;
646 default_regions_list_lock();
647 old_system_region = default_environment_shared_regions;
648
649 if((old_system_region != NULL) &&
650 (old_system_region->fs_base == fs_base) &&
651 (old_system_region->system == system)) {
652 new_system_region->default_env_list =
653 old_system_region->default_env_list;
91447636 654 old_system_region->default_env_list = NULL;
55e303ae 655 default_environment_shared_regions = new_system_region;
55e303ae 656 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
657 default_regions_list_unlock();
658 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
659 "old=%p stale 1\n",
660 new_system_region, old_system_region));
661 assert(old_system_region->ref_count > 0);
55e303ae
A
662 return old_system_region;
663 }
664 if (old_system_region) {
665 while(old_system_region->default_env_list != NULL) {
666 if((old_system_region->default_env_list->fs_base == fs_base) &&
667 (old_system_region->default_env_list->system == system)) {
91447636
A
668 shared_region_mapping_t tmp_system_region;
669
670 tmp_system_region =
671 old_system_region->default_env_list;
55e303ae 672 new_system_region->default_env_list =
91447636
A
673 tmp_system_region->default_env_list;
674 tmp_system_region->default_env_list = NULL;
55e303ae
A
675 old_system_region->default_env_list =
676 new_system_region;
91447636 677 old_system_region = tmp_system_region;
55e303ae 678 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
679 default_regions_list_unlock();
680 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
681 ": old=%p stale 2\n",
682 new_system_region,
683 old_system_region));
684 assert(old_system_region->ref_count > 0);
55e303ae
A
685 return old_system_region;
686 }
687 old_system_region = old_system_region->default_env_list;
688 }
689 }
690 /* If we get here, we are at the end of the system list and we */
691 /* did not find a pre-existing entry */
692 if(old_system_region) {
91447636
A
693 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
694 "adding after old=%p\n",
695 new_system_region, old_system_region));
696 assert(old_system_region->ref_count > 0);
55e303ae
A
697 old_system_region->default_env_list = new_system_region;
698 } else {
91447636
A
699 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
700 "new default\n",
701 new_system_region));
55e303ae
A
702 default_environment_shared_regions = new_system_region;
703 }
91447636 704 assert(new_system_region->ref_count > 0);
55e303ae
A
705 default_regions_list_unlock();
706 return NULL;
707}
708
709/*
710 * lookup a system_shared_region for the environment specified. If one is
711 * found, it is returned along with a reference against the structure
712 */
713
714shared_region_mapping_t
715lookup_default_shared_region(
716 unsigned int fs_base,
717 unsigned int system)
718{
719 shared_region_mapping_t system_region;
720 default_regions_list_lock();
721 system_region = default_environment_shared_regions;
722
91447636
A
723 SHARED_REGION_DEBUG(("lookup_default_shared_region"
724 "(base=0x%x, system=0x%x)\n",
725 fs_base, system));
55e303ae 726 while(system_region != NULL) {
91447636
A
727 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
728 ": system_region=%p base=0x%x system=0x%x"
729 " ref_count=%d\n",
730 fs_base, system, system_region,
731 system_region->fs_base,
732 system_region->system,
733 system_region->ref_count));
734 assert(system_region->ref_count > 0);
55e303ae
A
735 if((system_region->fs_base == fs_base) &&
736 (system_region->system == system)) {
737 break;
738 }
739 system_region = system_region->default_env_list;
740 }
741 if(system_region)
742 shared_region_mapping_ref(system_region);
743 default_regions_list_unlock();
91447636
A
744 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
745 system_region));
55e303ae
A
746 return system_region;
747}
748
749/*
750 * remove a system_region default if it appears in the default regions list.
751 * Drop a reference on removal.
752 */
753
754__private_extern__ void
755remove_default_shared_region_lock(
756 shared_region_mapping_t system_region,
91447636
A
757 int need_sfh_lock,
758 int need_drl_lock)
55e303ae
A
759{
760 shared_region_mapping_t old_system_region;
55e303ae 761
91447636
A
762 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
763 "(system_region=%p, %d, %d)\n",
764 system_region, need_sfh_lock, need_drl_lock));
765 if (need_drl_lock) {
766 default_regions_list_lock();
767 }
55e303ae
A
768 old_system_region = default_environment_shared_regions;
769
770 if(old_system_region == NULL) {
91447636
A
771 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
772 "-> default_env=NULL\n",
773 system_region));
774 if (need_drl_lock) {
775 default_regions_list_unlock();
776 }
55e303ae
A
777 return;
778 }
779
91447636
A
780 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
781 "default_env=%p\n",
782 system_region, old_system_region));
783 assert(old_system_region->ref_count > 0);
55e303ae
A
784 if (old_system_region == system_region) {
785 default_environment_shared_regions
786 = old_system_region->default_env_list;
91447636 787 old_system_region->default_env_list = NULL;
55e303ae 788 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
789 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
790 "old=%p ref_count=%d STALE\n",
791 system_region, old_system_region,
792 old_system_region->ref_count));
55e303ae 793 shared_region_mapping_dealloc_lock(old_system_region,
91447636
A
794 need_sfh_lock,
795 0);
796 if (need_drl_lock) {
797 default_regions_list_unlock();
798 }
55e303ae
A
799 return;
800 }
801
802 while(old_system_region->default_env_list != NULL) {
91447636
A
803 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
804 "old=%p->default_env=%p\n",
805 system_region, old_system_region,
806 old_system_region->default_env_list));
807 assert(old_system_region->default_env_list->ref_count > 0);
55e303ae
A
808 if(old_system_region->default_env_list == system_region) {
809 shared_region_mapping_t dead_region;
810 dead_region = old_system_region->default_env_list;
811 old_system_region->default_env_list =
91447636
A
812 dead_region->default_env_list;
813 dead_region->default_env_list = NULL;
55e303ae 814 dead_region->flags |= SHARED_REGION_STALE;
91447636
A
815 SHARED_REGION_DEBUG(
816 ("remove_default_shared_region_lock(%p): "
817 "dead=%p ref_count=%d stale\n",
818 system_region, dead_region,
819 dead_region->ref_count));
55e303ae 820 shared_region_mapping_dealloc_lock(dead_region,
91447636
A
821 need_sfh_lock,
822 0);
823 if (need_drl_lock) {
824 default_regions_list_unlock();
825 }
55e303ae
A
826 return;
827 }
828 old_system_region = old_system_region->default_env_list;
829 }
91447636
A
830 if (need_drl_lock) {
831 default_regions_list_unlock();
832 }
55e303ae
A
833}
834
835/*
836 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
837 * the only caller. Remove this stub function and the corresponding symbol
838 * export for Merlot.
839 */
840void
841remove_default_shared_region(
842 shared_region_mapping_t system_region)
843{
91447636
A
844 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
845 system_region));
846 if (system_region) {
847 assert(system_region->ref_count > 0);
848 }
849 remove_default_shared_region_lock(system_region, 1, 1);
55e303ae
A
850}
851
852void
91447636 853remove_all_shared_regions(void)
55e303ae
A
854{
855 shared_region_mapping_t system_region;
856 shared_region_mapping_t next_system_region;
857
91447636
A
858 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
859 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
860 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
55e303ae
A
861 default_regions_list_lock();
862 system_region = default_environment_shared_regions;
863
864 if(system_region == NULL) {
865 default_regions_list_unlock();
866 return;
867 }
868
869 while(system_region != NULL) {
870 next_system_region = system_region->default_env_list;
91447636 871 system_region->default_env_list = NULL;
55e303ae 872 system_region->flags |= SHARED_REGION_STALE;
91447636
A
873 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
874 "%p ref_count=%d stale\n",
875 system_region, system_region->ref_count));
876 assert(system_region->ref_count > 0);
877 shared_region_mapping_dealloc_lock(system_region, 1, 0);
55e303ae
A
878 system_region = next_system_region;
879 }
880 default_environment_shared_regions = NULL;
881 default_regions_list_unlock();
91447636
A
882 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
883 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
884 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
55e303ae
A
885}
886
887/* shared_com_boot_time_init initializes the common page shared data and */
888/* text region. This region is semi independent of the split libs */
889/* and so its policies have to be handled differently by the code that */
890/* manipulates the mapping of shared region environments. However, */
891/* the shared region delivery system supports both */
91447636
A
892void shared_com_boot_time_init(void); /* forward */
893void
894shared_com_boot_time_init(void)
55e303ae
A
895{
896 kern_return_t kret;
897 vm_named_entry_t named_entry;
898
91447636
A
899 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
900 if(com_region_handle32) {
901 panic("shared_com_boot_time_init: "
902 "com_region_handle32 already set\n");
903 }
904 if(com_region_handle64) {
55e303ae 905 panic("shared_com_boot_time_init: "
91447636 906 "com_region_handle64 already set\n");
55e303ae
A
907 }
908
91447636
A
909 /* create com page regions, 1 each for 32 and 64-bit code */
910 if((kret = shared_region_object_create(
89b3af67 911 com_region_size32,
91447636
A
912 &com_region_handle32))) {
913 panic("shared_com_boot_time_init: "
914 "unable to create 32-bit comm page\n");
915 return;
916 }
917 if((kret = shared_region_object_create(
89b3af67 918 com_region_size64,
91447636 919 &com_region_handle64))) {
55e303ae 920 panic("shared_com_boot_time_init: "
91447636 921 "unable to create 64-bit comm page\n");
55e303ae
A
922 return;
923 }
91447636 924
55e303ae 925 /* now set export the underlying region/map */
91447636
A
926 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
927 com_region_map32 = named_entry->backing.map;
928 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
929 com_region_map64 = named_entry->backing.map;
930
55e303ae 931 /* wrap the com region in its own shared file mapping structure */
91447636
A
932 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
933 kret = shared_region_mapping_create(com_region_handle32,
89b3af67
A
934 com_region_size32,
935 NULL, 0, 0,
936 _COMM_PAGE_BASE_ADDRESS,
937 &com_mapping_resource,
938 0, 0,
939 ENV_DEFAULT_ROOT, cpu_type());
91447636
A
940 if (kret) {
941 panic("shared_region_mapping_create failed for commpage");
942 }
55e303ae
A
943}
944
91447636 945void
1c79356b 946shared_file_boot_time_init(
55e303ae
A
947 unsigned int fs_base,
948 unsigned int system)
1c79356b 949{
89b3af67
A
950 mach_port_t text_region_handle;
951 mach_port_t data_region_handle;
91447636
A
952 long text_region_size;
953 long data_region_size;
9bccf70c 954 shared_region_mapping_t new_system_region;
55e303ae 955 shared_region_mapping_t old_default_env;
1c79356b 956
91447636
A
957 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
958 "(base=0x%x,system=0x%x)\n",
959 fs_base, system));
960 text_region_size = 0x10000000;
961 data_region_size = 0x10000000;
89b3af67 962 shared_file_init(&text_region_handle,
91447636 963 text_region_size,
89b3af67 964 &data_region_handle,
91447636
A
965 data_region_size,
966 &shared_file_mapping_array);
9bccf70c 967
89b3af67 968 shared_region_mapping_create(text_region_handle,
91447636 969 text_region_size,
89b3af67 970 data_region_handle,
91447636
A
971 data_region_size,
972 shared_file_mapping_array,
973 GLOBAL_SHARED_TEXT_SEGMENT,
974 &new_system_region,
975 SHARED_ALTERNATE_LOAD_BASE,
89b3af67
A
976 SHARED_ALTERNATE_LOAD_BASE,
977 fs_base, system);
55e303ae 978
55e303ae
A
979 new_system_region->flags = SHARED_REGION_SYSTEM;
980
981 /* grab an extra reference for the caller */
982 /* remember to grab before call to update */
983 shared_region_mapping_ref(new_system_region);
984 old_default_env = update_default_shared_region(new_system_region);
9bccf70c
A
985 /* hold an extra reference because these are the system */
986 /* shared regions. */
55e303ae
A
987 if(old_default_env)
988 shared_region_mapping_dealloc(old_default_env);
989 if(com_mapping_resource == NULL) {
990 shared_com_boot_time_init();
991 }
992 shared_region_mapping_ref(com_mapping_resource);
993 new_system_region->next = com_mapping_resource;
994 vm_set_shared_region(current_task(), new_system_region);
91447636
A
995 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
996 fs_base, system));
1c79356b
A
997}
998
999
1000/* called at boot time, allocates two regions, each 256 megs in size */
1001/* these regions are later mapped into task spaces, allowing them to */
1002/* share the contents of the regions. shared_file_init is part of */
1003/* a shared_memory_server which not only allocates the backing maps */
1004/* but also coordinates requests for space. */
1005
1006
9bccf70c 1007static kern_return_t
1c79356b 1008shared_file_init(
91447636 1009 ipc_port_t *text_region_handle,
1c79356b 1010 vm_size_t text_region_size,
91447636 1011 ipc_port_t *data_region_handle,
1c79356b 1012 vm_size_t data_region_size,
91447636 1013 vm_offset_t *file_mapping_array)
1c79356b 1014{
1c79356b 1015 shared_file_info_t *sf_head;
89b3af67 1016 vm_size_t data_table_size;
1c79356b 1017 int hash_size;
1c79356b
A
1018 kern_return_t kret;
1019
1020 vm_object_t buf_object;
1021 vm_map_entry_t entry;
1022 vm_size_t alloced;
1023 vm_offset_t b;
1024 vm_page_t p;
1025
91447636 1026 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1c79356b 1027 /* create text and data maps/regions */
91447636
A
1028 kret = shared_region_object_create(
1029 text_region_size,
1030 text_region_handle);
1031 if (kret) {
1c79356b
A
1032 return kret;
1033 }
91447636
A
1034 kret = shared_region_object_create(
1035 data_region_size,
1036 data_region_handle);
1037 if (kret) {
1038 ipc_port_release_send(*text_region_handle);
1c79356b
A
1039 return kret;
1040 }
1041
1042 data_table_size = data_region_size >> 9;
1043 hash_size = data_region_size >> 14;
1c79356b
A
1044
1045 if(shared_file_mapping_array == 0) {
91447636 1046 vm_map_address_t map_addr;
1c79356b
A
1047 buf_object = vm_object_allocate(data_table_size);
1048
91447636 1049 if(vm_map_find_space(kernel_map, &map_addr,
89b3af67 1050 data_table_size, 0, 0, &entry)
91447636 1051 != KERN_SUCCESS) {
1c79356b
A
1052 panic("shared_file_init: no space");
1053 }
91447636
A
1054 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1055 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1056 vm_map_unlock(kernel_map);
1057 entry->object.vm_object = buf_object;
1058 entry->offset = 0;
1059
91447636 1060 for (b = *file_mapping_array, alloced = 0;
1c79356b 1061 alloced < (hash_size +
91447636 1062 round_page(sizeof(struct sf_mapping)));
1c79356b
A
1063 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1064 vm_object_lock(buf_object);
1065 p = vm_page_alloc(buf_object, alloced);
1066 if (p == VM_PAGE_NULL) {
1067 panic("shared_file_init: no space");
1068 }
1069 p->busy = FALSE;
1070 vm_object_unlock(buf_object);
55e303ae 1071 pmap_enter(kernel_pmap, b, p->phys_page,
9bccf70c 1072 VM_PROT_READ | VM_PROT_WRITE,
55e303ae
A
1073 ((unsigned int)(p->object->wimg_bits))
1074 & VM_WIMG_MASK,
1075 TRUE);
1c79356b
A
1076 }
1077
1078
1079 /* initialize loaded file array */
91447636 1080 sf_head = (shared_file_info_t *)*file_mapping_array;
1c79356b 1081 sf_head->hash = (queue_head_t *)
91447636 1082 (((int)*file_mapping_array) +
1c79356b
A
1083 sizeof(struct shared_file_info));
1084 sf_head->hash_size = hash_size/sizeof(queue_head_t);
91447636 1085 mutex_init(&(sf_head->lock), 0);
1c79356b
A
1086 sf_head->hash_init = FALSE;
1087
1088
1089 mach_make_memory_entry(kernel_map, &data_table_size,
91447636 1090 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1c79356b
A
1091 NULL);
1092
91447636
A
1093 if (vm_map_wire(kernel_map,
1094 vm_map_trunc_page(*file_mapping_array),
1095 vm_map_round_page(*file_mapping_array +
1096 hash_size +
1097 round_page(sizeof(struct sf_mapping))),
1c79356b
A
1098 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1099 panic("shared_file_init: No memory for data table");
1100 }
1101
1102 lsf_zone = zinit(sizeof(struct load_file_ele),
1103 data_table_size -
55e303ae 1104 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1c79356b
A
1105 0, "load_file_server");
1106
1107 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1108 zone_change(lsf_zone, Z_COLLECT, FALSE);
1109 zone_change(lsf_zone, Z_EXPAND, FALSE);
1110 zone_change(lsf_zone, Z_FOREIGN, TRUE);
55e303ae
A
1111
1112 /* initialize the global default environment lock */
91447636 1113 mutex_init(&default_regions_list_lock_data, 0);
55e303ae 1114
1c79356b 1115 } else {
91447636 1116 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1117 }
1118
91447636 1119 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
89b3af67 1120 return KERN_SUCCESS;
1c79356b
A
1121}
1122
91447636
A
1123static kern_return_t
1124shared_file_header_init(
1125 shared_file_info_t *shared_file_header)
1126{
1127 vm_size_t hash_table_size;
1128 vm_size_t hash_table_offset;
1129 int i;
1130 /* wire hash entry pool only as needed, since we are the only */
1131 /* users, we take a few liberties with the population of our */
1132 /* zone. */
1133 static int allocable_hash_pages;
1134 static vm_offset_t hash_cram_address;
1135
1136
1137 hash_table_size = shared_file_header->hash_size
1138 * sizeof (struct queue_entry);
1139 hash_table_offset = hash_table_size +
1140 round_page(sizeof (struct sf_mapping));
1141 for (i = 0; i < shared_file_header->hash_size; i++)
1142 queue_init(&shared_file_header->hash[i]);
1143
1144 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1145 / PAGE_SIZE);
1146 hash_cram_address = ((vm_offset_t) shared_file_header)
1147 + hash_table_offset;
1148 shared_file_available_hash_ele = 0;
1149
1150 shared_file_header->hash_init = TRUE;
1151
1152 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1153 int cram_pages, cram_size;
1154
1155 cram_pages = allocable_hash_pages > 3 ?
1156 3 : allocable_hash_pages;
1157 cram_size = cram_pages * PAGE_SIZE;
1158 if (vm_map_wire(kernel_map, hash_cram_address,
1159 hash_cram_address + cram_size,
1160 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
89b3af67
A
1161 SHARED_REGION_TRACE(
1162 SHARED_REGION_TRACE_ERROR,
1163 ("shared_region: shared_file_header_init: "
1164 "No memory for data table\n"));
91447636
A
1165 return KERN_NO_SPACE;
1166 }
1167 allocable_hash_pages -= cram_pages;
1168 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1169 shared_file_available_hash_ele
1170 += cram_size/sizeof(struct load_file_ele);
1171 hash_cram_address += cram_size;
1172 }
1173
1174 return KERN_SUCCESS;
1175}
1176
c0fea474 1177
89b3af67
A
1178extern void shared_region_dump_file_entry(
1179 int trace_level,
1180 load_struct_t *entry); /* forward */
1181
1182void shared_region_dump_file_entry(
1183 int trace_level,
1184 load_struct_t *entry)
1185{
1186 int i;
1187 loaded_mapping_t *mapping;
1188
1189 if (trace_level > shared_region_trace_level) {
1190 return;
1191 }
1192 printf("shared region: %p: "
1193 "file_entry %p base_address=0x%x file_offset=0x%x "
1194 "%d mappings\n",
1195 current_thread(), entry,
1196 entry->base_address, entry->file_offset, entry->mapping_cnt);
1197 mapping = entry->mappings;
1198 for (i = 0; i < entry->mapping_cnt; i++) {
1199 printf("shared region: %p:\t#%d: "
1200 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1201 current_thread(),
1202 i,
1203 mapping->mapping_offset,
1204 mapping->size,
1205 mapping->file_offset,
1206 mapping->protection);
1207 mapping = mapping->next;
1208 }
1209}
1210
1211extern void shared_region_dump_mappings(
1212 int trace_level,
1213 struct shared_file_mapping_np *mappings,
1214 int map_cnt,
1215 mach_vm_offset_t base_offset); /* forward */
1216
1217void shared_region_dump_mappings(
1218 int trace_level,
1219 struct shared_file_mapping_np *mappings,
1220 int map_cnt,
1221 mach_vm_offset_t base_offset)
1222{
1223 int i;
1224
1225 if (trace_level > shared_region_trace_level) {
1226 return;
1227 }
1228
1229 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1230 current_thread(), map_cnt, (uint64_t) base_offset);
1231 for (i = 0; i < map_cnt; i++) {
1232 printf("shared region: %p:\t#%d: "
1233 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1234 "prot=(%d,%d)\n",
1235 current_thread(),
1236 i,
1237 (uint64_t) mappings[i].sfm_address,
1238 (uint64_t) mappings[i].sfm_size,
1239 (uint64_t) mappings[i].sfm_file_offset,
1240 mappings[i].sfm_max_prot,
1241 mappings[i].sfm_init_prot);
1242 }
1243}
1244
1245extern void shared_region_dump_conflict_info(
1246 int trace_level,
1247 vm_map_t map,
1248 vm_map_offset_t offset,
1249 vm_map_size_t size); /* forward */
1250
1251void
1252shared_region_dump_conflict_info(
1253 int trace_level,
1254 vm_map_t map,
1255 vm_map_offset_t offset,
1256 vm_map_size_t size)
1257{
1258 vm_map_entry_t entry;
1259 vm_object_t object;
1260 memory_object_t mem_object;
1261 kern_return_t kr;
1262 char *filename;
1263
1264 if (trace_level > shared_region_trace_level) {
1265 return;
1266 }
1267
1268 object = VM_OBJECT_NULL;
1269
1270 vm_map_lock_read(map);
1271 if (!vm_map_lookup_entry(map, offset, &entry)) {
1272 entry = entry->vme_next;
1273 }
1274
1275 if (entry != vm_map_to_entry(map)) {
1276 if (entry->is_sub_map) {
1277 printf("shared region: %p: conflict with submap "
1278 "at 0x%llx size 0x%llx !?\n",
1279 current_thread(),
1280 (uint64_t) offset,
1281 (uint64_t) size);
1282 goto done;
1283 }
1284
1285 object = entry->object.vm_object;
1286 if (object == VM_OBJECT_NULL) {
1287 printf("shared region: %p: conflict with NULL object "
1288 "at 0x%llx size 0x%llx !?\n",
1289 current_thread(),
1290 (uint64_t) offset,
1291 (uint64_t) size);
1292 object = VM_OBJECT_NULL;
1293 goto done;
1294 }
1295
1296 vm_object_lock(object);
1297 while (object->shadow != VM_OBJECT_NULL) {
1298 vm_object_t shadow;
1299
1300 shadow = object->shadow;
1301 vm_object_lock(shadow);
1302 vm_object_unlock(object);
1303 object = shadow;
1304 }
1305
1306 if (object->internal) {
1307 printf("shared region: %p: conflict with anonymous "
1308 "at 0x%llx size 0x%llx\n",
1309 current_thread(),
1310 (uint64_t) offset,
1311 (uint64_t) size);
1312 goto done;
1313 }
1314 if (! object->pager_ready) {
1315 printf("shared region: %p: conflict with uninitialized "
1316 "at 0x%llx size 0x%llx\n",
1317 current_thread(),
1318 (uint64_t) offset,
1319 (uint64_t) size);
1320 goto done;
1321 }
1322
1323 mem_object = object->pager;
1324
1325 /*
1326 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1327 */
1328 kr = vnode_pager_get_object_filename(mem_object,
1329 &filename);
1330 if (kr != KERN_SUCCESS) {
1331 filename = NULL;
1332 }
1333 printf("shared region: %p: conflict with '%s' "
1334 "at 0x%llx size 0x%llx\n",
1335 current_thread(),
1336 filename ? filename : "<unknown>",
1337 (uint64_t) offset,
1338 (uint64_t) size);
1339 }
1340done:
1341 if (object != VM_OBJECT_NULL) {
1342 vm_object_unlock(object);
1343 }
1344 vm_map_unlock_read(map);
1345}
1346
91447636
A
1347/*
1348 * map_shared_file:
1349 *
1350 * Attempt to map a split library into the shared region. Check if the mappings
1351 * are already in place.
1352 */
1353kern_return_t
1354map_shared_file(
1355 int map_cnt,
1356 struct shared_file_mapping_np *mappings,
1357 memory_object_control_t file_control,
1358 memory_object_size_t file_size,
1359 shared_region_task_mappings_t sm_info,
1360 mach_vm_offset_t base_offset,
1361 mach_vm_offset_t *slide_p)
1362{
1363 vm_object_t file_object;
1364 shared_file_info_t *shared_file_header;
1365 load_struct_t *file_entry;
1366 loaded_mapping_t *file_mapping;
1367 int i;
1368 kern_return_t ret;
1369 mach_vm_offset_t slide;
1370
1371 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1372
1373 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1374
1375 mutex_lock(&shared_file_header->lock);
1376
1377 /* If this is the first call to this routine, take the opportunity */
1378 /* to initialize the hash table which will be used to look-up */
1379 /* mappings based on the file object */
1380
1381 if(shared_file_header->hash_init == FALSE) {
1382 ret = shared_file_header_init(shared_file_header);
1383 if (ret != KERN_SUCCESS) {
89b3af67
A
1384 SHARED_REGION_TRACE(
1385 SHARED_REGION_TRACE_ERROR,
1386 ("shared_region: %p: map_shared_file: "
1387 "shared_file_header_init() failed kr=0x%x\n",
1388 current_thread(), ret));
91447636
A
1389 mutex_unlock(&shared_file_header->lock);
1390 return KERN_NO_SPACE;
1391 }
1392 }
1393
1394
1395 /* Find the entry in the map associated with the current mapping */
1396 /* of the file object */
1397 file_object = memory_object_control_to_vm_object(file_control);
1398
1399 file_entry = lsf_hash_lookup(shared_file_header->hash,
1400 (void *) file_object,
1401 mappings[0].sfm_file_offset,
1402 shared_file_header->hash_size,
1403 TRUE, TRUE, sm_info);
1404 if (file_entry) {
1405 /* File is loaded, check the load manifest for exact match */
1406 /* we simplify by requiring that the elements be the same */
1407 /* size and in the same order rather than checking for */
1408 /* semantic equivalence. */
1409
1410 i = 0;
1411 file_mapping = file_entry->mappings;
1412 while(file_mapping != NULL) {
1413 if(i>=map_cnt) {
89b3af67
A
1414 SHARED_REGION_TRACE(
1415 SHARED_REGION_TRACE_CONFLICT,
1416 ("shared_region: %p: map_shared_file: "
1417 "already mapped with "
1418 "more than %d mappings\n",
1419 current_thread(), map_cnt));
1420 shared_region_dump_file_entry(
1421 SHARED_REGION_TRACE_INFO,
1422 file_entry);
1423 shared_region_dump_mappings(
1424 SHARED_REGION_TRACE_INFO,
1425 mappings, map_cnt, base_offset);
1426
91447636
A
1427 mutex_unlock(&shared_file_header->lock);
1428 return KERN_INVALID_ARGUMENT;
1429 }
1430 if(((mappings[i].sfm_address)
1431 & SHARED_DATA_REGION_MASK) !=
1432 file_mapping->mapping_offset ||
1433 mappings[i].sfm_size != file_mapping->size ||
1434 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1435 mappings[i].sfm_init_prot != file_mapping->protection) {
89b3af67
A
1436 SHARED_REGION_TRACE(
1437 SHARED_REGION_TRACE_CONFLICT,
1438 ("shared_region: %p: "
1439 "mapping #%d differs\n",
1440 current_thread(), i));
1441 shared_region_dump_file_entry(
1442 SHARED_REGION_TRACE_INFO,
1443 file_entry);
1444 shared_region_dump_mappings(
1445 SHARED_REGION_TRACE_INFO,
1446 mappings, map_cnt, base_offset);
1447
91447636
A
1448 break;
1449 }
1450 file_mapping = file_mapping->next;
1451 i++;
1452 }
1453 if(i!=map_cnt) {
89b3af67
A
1454 SHARED_REGION_TRACE(
1455 SHARED_REGION_TRACE_CONFLICT,
1456 ("shared_region: %p: map_shared_file: "
1457 "already mapped with "
1458 "%d mappings instead of %d\n",
1459 current_thread(), i, map_cnt));
1460 shared_region_dump_file_entry(
1461 SHARED_REGION_TRACE_INFO,
1462 file_entry);
1463 shared_region_dump_mappings(
1464 SHARED_REGION_TRACE_INFO,
1465 mappings, map_cnt, base_offset);
1466
91447636
A
1467 mutex_unlock(&shared_file_header->lock);
1468 return KERN_INVALID_ARGUMENT;
1469 }
1470
1471 slide = file_entry->base_address - base_offset;
1472 if (slide_p != NULL) {
1473 /*
1474 * File already mapped but at different address,
1475 * and the caller is OK with the sliding.
1476 */
1477 *slide_p = slide;
1478 ret = KERN_SUCCESS;
1479 } else {
1480 /*
1481 * The caller doesn't want any sliding. The file needs
1482 * to be mapped at the requested address or not mapped.
1483 */
1484 if (slide != 0) {
1485 /*
1486 * The file is already mapped but at a different
1487 * address.
1488 * We fail.
1489 * XXX should we attempt to load at
1490 * requested address too ?
1491 */
1492 ret = KERN_FAILURE;
89b3af67
A
1493 SHARED_REGION_TRACE(
1494 SHARED_REGION_TRACE_CONFLICT,
1495 ("shared_region: %p: "
1496 "map_shared_file: already mapped, "
1497 "would need to slide 0x%llx\n",
1498 current_thread(),
1499 slide));
91447636
A
1500 } else {
1501 /*
1502 * The file is already mapped at the correct
1503 * address.
1504 * We're done !
1505 */
1506 ret = KERN_SUCCESS;
1507 }
1508 }
1509 mutex_unlock(&shared_file_header->lock);
1510 return ret;
1511 } else {
1512 /* File is not loaded, lets attempt to load it */
1513 ret = lsf_map(mappings, map_cnt,
1514 (void *)file_control,
1515 file_size,
1516 sm_info,
1517 base_offset,
1518 slide_p);
1519 if(ret == KERN_NO_SPACE) {
1520 shared_region_mapping_t regions;
1521 shared_region_mapping_t system_region;
1522 regions = (shared_region_mapping_t)sm_info->self;
1523 regions->flags |= SHARED_REGION_FULL;
1524 system_region = lookup_default_shared_region(
1525 regions->fs_base, regions->system);
1526 if (system_region == regions) {
1527 shared_region_mapping_t new_system_shared_region;
1528 shared_file_boot_time_init(
1529 regions->fs_base, regions->system);
1530 /* current task must stay with its current */
1531 /* regions, drop count on system_shared_region */
1532 /* and put back our original set */
1533 vm_get_shared_region(current_task(),
1534 &new_system_shared_region);
1535 shared_region_mapping_dealloc_lock(
1536 new_system_shared_region, 0, 1);
1537 vm_set_shared_region(current_task(), regions);
1538 } else if (system_region != NULL) {
55e303ae 1539 shared_region_mapping_dealloc_lock(
91447636 1540 system_region, 0, 1);
55e303ae 1541 }
1c79356b
A
1542 }
1543 mutex_unlock(&shared_file_header->lock);
1544 return ret;
1545 }
1546}
1547
91447636
A
1548/*
1549 * shared_region_cleanup:
1550 *
1551 * Deallocates all the mappings in the shared region, except those explicitly
1552 * specified in the "ranges" set of address ranges.
1553 */
1554kern_return_t
1555shared_region_cleanup(
1556 unsigned int range_count,
1557 struct shared_region_range_np *ranges,
1558 shared_region_task_mappings_t sm_info)
1559{
1560 kern_return_t kr;
1561 ipc_port_t region_handle;
1562 vm_named_entry_t region_named_entry;
1563 vm_map_t text_submap, data_submap, submap, next_submap;
1564 unsigned int i_range;
1565 vm_map_offset_t range_start, range_end;
1566 vm_map_offset_t submap_base, submap_end, submap_offset;
1567 vm_map_size_t delete_size;
1568
1569 struct shared_region_range_np tmp_range;
1570 unsigned int sort_index, sorted_index;
1571 vm_map_offset_t sort_min_address;
1572 unsigned int sort_min_index;
1573
1574 /*
1575 * Since we want to deallocate the holes between the "ranges",
1576 * sort the array by increasing addresses.
1577 */
1578 for (sorted_index = 0;
1579 sorted_index < range_count;
1580 sorted_index++) {
1581
1582 /* first remaining entry is our new starting point */
1583 sort_min_index = sorted_index;
1584 sort_min_address = ranges[sort_min_index].srr_address;
1585
1586 /* find the lowest mapping_offset in the remaining entries */
1587 for (sort_index = sorted_index + 1;
1588 sort_index < range_count;
1589 sort_index++) {
1590 if (ranges[sort_index].srr_address < sort_min_address) {
1591 /* lowest address so far... */
1592 sort_min_index = sort_index;
1593 sort_min_address =
1594 ranges[sort_min_index].srr_address;
1595 }
1596 }
1597
1598 if (sort_min_index != sorted_index) {
1599 /* swap entries */
1600 tmp_range = ranges[sort_min_index];
1601 ranges[sort_min_index] = ranges[sorted_index];
1602 ranges[sorted_index] = tmp_range;
1603 }
1604 }
1605
1606 region_handle = (ipc_port_t) sm_info->text_region;
1607 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1608 text_submap = region_named_entry->backing.map;
1609
1610 region_handle = (ipc_port_t) sm_info->data_region;
1611 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1612 data_submap = region_named_entry->backing.map;
1613
1614 submap = text_submap;
1615 next_submap = submap;
1616 submap_base = sm_info->client_base;
1617 submap_offset = 0;
1618 submap_end = submap_base + sm_info->text_size;
1619 for (i_range = 0;
1620 i_range < range_count;
1621 i_range++) {
1622
1623 /* get the next range of addresses to keep */
1624 range_start = ranges[i_range].srr_address;
1625 range_end = range_start + ranges[i_range].srr_size;
1626 /* align them to page boundaries */
1627 range_start = vm_map_trunc_page(range_start);
1628 range_end = vm_map_round_page(range_end);
1629
1630 /* make sure we don't go beyond the submap's boundaries */
1631 if (range_start < submap_base) {
1632 range_start = submap_base;
1633 } else if (range_start >= submap_end) {
1634 range_start = submap_end;
1635 }
1636 if (range_end < submap_base) {
1637 range_end = submap_base;
1638 } else if (range_end >= submap_end) {
1639 range_end = submap_end;
1640 }
1641
1642 if (range_start > submap_base + submap_offset) {
1643 /*
1644 * Deallocate everything between the last offset in the
1645 * submap and the start of this range.
1646 */
1647 delete_size = range_start -
1648 (submap_base + submap_offset);
1649 (void) vm_deallocate(submap,
1650 submap_offset,
1651 delete_size);
1652 } else {
1653 delete_size = 0;
1654 }
1655
1656 /* skip to the end of the range */
1657 submap_offset += delete_size + (range_end - range_start);
1658
1659 if (submap_base + submap_offset >= submap_end) {
1660 /* get to next submap */
1661
1662 if (submap == data_submap) {
1663 /* no other submap after data: done ! */
1664 break;
1665 }
1666
1667 /* get original range again */
1668 range_start = ranges[i_range].srr_address;
1669 range_end = range_start + ranges[i_range].srr_size;
1670 range_start = vm_map_trunc_page(range_start);
1671 range_end = vm_map_round_page(range_end);
1672
1673 if (range_end > submap_end) {
1674 /*
1675 * This last range overlaps with the next
1676 * submap. We need to process it again
1677 * after switching submaps. Otherwise, we'll
1678 * just continue with the next range.
1679 */
1680 i_range--;
1681 }
1682
1683 if (submap == text_submap) {
1684 /*
1685 * Switch to the data submap.
1686 */
1687 submap = data_submap;
1688 submap_offset = 0;
1689 submap_base = sm_info->client_base +
1690 sm_info->text_size;
1691 submap_end = submap_base + sm_info->data_size;
1692 }
1693 }
1694 }
1695
1696 if (submap_base + submap_offset < submap_end) {
1697 /* delete remainder of this submap, from "offset" to the end */
1698 (void) vm_deallocate(submap,
1699 submap_offset,
1700 submap_end - submap_base - submap_offset);
1701 /* if nothing to keep in data submap, delete it all */
1702 if (submap == text_submap) {
1703 submap = data_submap;
1704 submap_offset = 0;
1705 submap_base = sm_info->client_base + sm_info->text_size;
1706 submap_end = submap_base + sm_info->data_size;
1707 (void) vm_deallocate(data_submap,
1708 0,
1709 submap_end - submap_base);
1710 }
1711 }
1712
1713 kr = KERN_SUCCESS;
1714 return kr;
1715}
1716
1c79356b
A
1717/* A hash lookup function for the list of loaded files in */
1718/* shared_memory_server space. */
1719
9bccf70c 1720static load_struct_t *
1c79356b
A
1721lsf_hash_lookup(
1722 queue_head_t *hash_table,
1723 void *file_object,
89b3af67 1724 vm_offset_t recognizableOffset,
1c79356b 1725 int size,
91447636 1726 boolean_t regular,
1c79356b
A
1727 boolean_t alternate,
1728 shared_region_task_mappings_t sm_info)
1729{
1730 register queue_t bucket;
1731 load_struct_t *entry;
1732 shared_region_mapping_t target_region;
1733 int depth;
1734
91447636
A
1735 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1736 "reg=%d alt=%d sm_info=%p\n",
1737 hash_table, file_object, recognizableOffset, size,
1738 regular, alternate, sm_info));
1739
1c79356b
A
1740 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1741 for (entry = (load_struct_t *)queue_first(bucket);
1742 !queue_end(bucket, &entry->links);
1743 entry = (load_struct_t *)queue_next(&entry->links)) {
55e303ae 1744
55e303ae
A
1745 if ((entry->file_object == (int)file_object) &&
1746 (entry->file_offset == recognizableOffset)) {
1c79356b
A
1747 target_region = (shared_region_mapping_t)sm_info->self;
1748 depth = target_region->depth;
1749 while(target_region) {
1750 if((!(sm_info->self)) ||
1751 ((target_region == entry->regions_instance) &&
1752 (target_region->depth >= entry->depth))) {
91447636
A
1753 if(alternate &&
1754 entry->base_address >= sm_info->alternate_base) {
1755 LSF_DEBUG(("lsf_hash_lookup: "
1756 "alt=%d found entry %p "
1757 "(base=0x%x "
1758 "alt_base=0x%x)\n",
1759 alternate, entry,
1760 entry->base_address,
1761 sm_info->alternate_base));
1762 return entry;
1763 }
1764 if (regular &&
1765 entry->base_address < sm_info->alternate_base) {
1766 LSF_DEBUG(("lsf_hash_lookup: "
1767 "reg=%d found entry %p "
1768 "(base=0x%x "
1769 "alt_base=0x%x)\n",
1770 regular, entry,
1771 entry->base_address,
1772 sm_info->alternate_base));
1773 return entry;
1c79356b
A
1774 }
1775 }
1776 if(target_region->object_chain) {
1777 target_region = (shared_region_mapping_t)
1778 target_region->object_chain->object_chain_region;
1779 depth = target_region->object_chain->depth;
1780 } else {
1781 target_region = NULL;
1782 }
1783 }
1784 }
1785 }
1786
91447636
A
1787 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1788 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1789 hash_table, file_object, recognizableOffset, size,
1790 regular, alternate, sm_info));
1c79356b
A
1791 return (load_struct_t *)0;
1792}
1793
55e303ae
A
1794__private_extern__ load_struct_t *
1795lsf_remove_regions_mappings_lock(
1c79356b 1796 shared_region_mapping_t region,
55e303ae 1797 shared_region_task_mappings_t sm_info,
91447636 1798 int need_sfh_lock)
1c79356b
A
1799{
1800 int i;
1801 register queue_t bucket;
1802 shared_file_info_t *shared_file_header;
1803 load_struct_t *entry;
1804 load_struct_t *next_entry;
1c79356b
A
1805
1806 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1807
91447636
A
1808 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1809 "sfh=%p\n",
1810 region, sm_info, shared_file_header));
1811 if (need_sfh_lock)
55e303ae 1812 mutex_lock(&shared_file_header->lock);
1c79356b 1813 if(shared_file_header->hash_init == FALSE) {
91447636 1814 if (need_sfh_lock)
55e303ae 1815 mutex_unlock(&shared_file_header->lock);
91447636
A
1816 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1817 "(region=%p,sm_info=%p): not inited\n",
1818 region, sm_info));
1c79356b
A
1819 return NULL;
1820 }
1821 for(i = 0; i<shared_file_header->hash_size; i++) {
1822 bucket = &shared_file_header->hash[i];
1823 for (entry = (load_struct_t *)queue_first(bucket);
1824 !queue_end(bucket, &entry->links);) {
1825 next_entry = (load_struct_t *)queue_next(&entry->links);
1826 if(region == entry->regions_instance) {
91447636
A
1827 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1828 "entry %p region %p: "
1829 "unloading\n",
1830 entry, region));
1831 lsf_unload((void *)entry->file_object,
1c79356b 1832 entry->base_address, sm_info);
91447636
A
1833 } else {
1834 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1835 "entry %p region %p target region %p: "
1836 "not unloading\n",
1837 entry, entry->regions_instance, region));
1c79356b 1838 }
91447636 1839
1c79356b
A
1840 entry = next_entry;
1841 }
1842 }
91447636 1843 if (need_sfh_lock)
55e303ae 1844 mutex_unlock(&shared_file_header->lock);
91447636
A
1845 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1846
1847 return NULL; /* XXX */
55e303ae
A
1848}
1849
1850/*
1851 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1852 * only caller. Remove this stub function and the corresponding symbol
1853 * export for Merlot.
1854 */
1855load_struct_t *
1856lsf_remove_regions_mappings(
1857 shared_region_mapping_t region,
1858 shared_region_task_mappings_t sm_info)
1859{
1860 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1c79356b
A
1861}
1862
1863/* Removes a map_list, (list of loaded extents) for a file from */
1864/* the loaded file hash table. */
1865
9bccf70c 1866static load_struct_t *
1c79356b 1867lsf_hash_delete(
89b3af67 1868 load_struct_t *target_entry, /* optional: NULL if not relevant */
1c79356b
A
1869 void *file_object,
1870 vm_offset_t base_offset,
1871 shared_region_task_mappings_t sm_info)
1872{
1873 register queue_t bucket;
1874 shared_file_info_t *shared_file_header;
1875 load_struct_t *entry;
91447636 1876
89b3af67
A
1877 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
1878 target_entry, file_object, base_offset, sm_info));
1c79356b
A
1879
1880 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1881
1882 bucket = &shared_file_header->hash
1883 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1884
1885 for (entry = (load_struct_t *)queue_first(bucket);
1886 !queue_end(bucket, &entry->links);
1887 entry = (load_struct_t *)queue_next(&entry->links)) {
1888 if((!(sm_info->self)) || ((shared_region_mapping_t)
1889 sm_info->self == entry->regions_instance)) {
89b3af67
A
1890 if ((target_entry == NULL ||
1891 entry == target_entry) &&
1892 (entry->file_object == (int) file_object) &&
1893 (entry->base_address == base_offset)) {
1c79356b
A
1894 queue_remove(bucket, entry,
1895 load_struct_ptr_t, links);
91447636 1896 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1c79356b
A
1897 return entry;
1898 }
1899 }
1900 }
1901
91447636 1902 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1c79356b
A
1903 return (load_struct_t *)0;
1904}
1905
1906/* Inserts a new map_list, (list of loaded file extents), into the */
1907/* server loaded file hash table. */
1908
9bccf70c 1909static void
1c79356b
A
1910lsf_hash_insert(
1911 load_struct_t *entry,
1912 shared_region_task_mappings_t sm_info)
1913{
1914 shared_file_info_t *shared_file_header;
1915
91447636
A
1916 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1917 entry, sm_info, entry->file_object, entry->base_address));
1918
1c79356b
A
1919 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1920 queue_enter(&shared_file_header->hash
1921 [load_file_hash(entry->file_object,
1922 shared_file_header->hash_size)],
1923 entry, load_struct_ptr_t, links);
1924}
1925
91447636
A
1926
1927
1928/*
1929 * lsf_slide:
1930 *
1931 * Look in the shared region, starting from the end, for a place to fit all the
1932 * mappings while respecting their relative offsets.
1933 */
1934static kern_return_t
1935lsf_slide(
1936 unsigned int map_cnt,
1937 struct shared_file_mapping_np *mappings_in,
1938 shared_region_task_mappings_t sm_info,
1939 mach_vm_offset_t *base_offset_p)
1940{
1941 mach_vm_offset_t max_mapping_offset;
1942 int i;
1943 vm_map_entry_t map_entry, prev_entry, next_entry;
1944 mach_vm_offset_t prev_hole_start, prev_hole_end;
1945 mach_vm_offset_t mapping_offset, mapping_end_offset;
1946 mach_vm_offset_t base_offset;
1947 mach_vm_size_t mapping_size;
1948 mach_vm_offset_t wiggle_room, wiggle;
1949 vm_map_t text_map, data_map, map;
1950 vm_named_entry_t region_entry;
1951 ipc_port_t region_handle;
1952 kern_return_t kr;
1953
1954 struct shared_file_mapping_np *mappings, tmp_mapping;
1955 unsigned int sort_index, sorted_index;
1956 vm_map_offset_t sort_min_address;
1957 unsigned int sort_min_index;
1958
1959 /*
1960 * Sort the mappings array, so that we can try and fit them in
1961 * in the right order as we progress along the VM maps.
1962 *
1963 * We can't modify the original array (the original order is
1964 * important when doing lookups of the mappings), so copy it first.
1965 */
1966
1967 kr = kmem_alloc(kernel_map,
1968 (vm_offset_t *) &mappings,
1969 (vm_size_t) (map_cnt * sizeof (mappings[0])));
1970 if (kr != KERN_SUCCESS) {
1971 return KERN_NO_SPACE;
1972 }
1973
1974 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
1975
1976 max_mapping_offset = 0;
1977 for (sorted_index = 0;
1978 sorted_index < map_cnt;
1979 sorted_index++) {
1980
1981 /* first remaining entry is our new starting point */
1982 sort_min_index = sorted_index;
1983 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
1984 SHARED_TEXT_REGION_MASK) +
1985 mappings[sort_min_index].sfm_size);
1986 sort_min_address = mapping_end_offset;
1987 /* compute the highest mapping_offset as well... */
1988 if (mapping_end_offset > max_mapping_offset) {
1989 max_mapping_offset = mapping_end_offset;
1990 }
1991 /* find the lowest mapping_offset in the remaining entries */
1992 for (sort_index = sorted_index + 1;
1993 sort_index < map_cnt;
1994 sort_index++) {
1995
1996 mapping_end_offset =
1997 ((mappings[sort_index].sfm_address &
1998 SHARED_TEXT_REGION_MASK) +
1999 mappings[sort_index].sfm_size);
2000
2001 if (mapping_end_offset < sort_min_address) {
2002 /* lowest mapping_offset so far... */
2003 sort_min_index = sort_index;
2004 sort_min_address = mapping_end_offset;
2005 }
2006 }
2007 if (sort_min_index != sorted_index) {
2008 /* swap entries */
2009 tmp_mapping = mappings[sort_min_index];
2010 mappings[sort_min_index] = mappings[sorted_index];
2011 mappings[sorted_index] = tmp_mapping;
2012 }
2013
2014 }
2015
2016 max_mapping_offset = vm_map_round_page(max_mapping_offset);
2017
2018 /* start from the end of the shared area */
2019 base_offset = sm_info->text_size;
2020
2021 /* can all the mappings fit ? */
2022 if (max_mapping_offset > base_offset) {
2023 kmem_free(kernel_map,
2024 (vm_offset_t) mappings,
2025 map_cnt * sizeof (mappings[0]));
2026 return KERN_FAILURE;
2027 }
2028
2029 /*
2030 * Align the last mapping to the end of the submaps
2031 * and start from there.
2032 */
2033 base_offset -= max_mapping_offset;
2034
2035 region_handle = (ipc_port_t) sm_info->text_region;
2036 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2037 text_map = region_entry->backing.map;
2038
2039 region_handle = (ipc_port_t) sm_info->data_region;
2040 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2041 data_map = region_entry->backing.map;
2042
2043 vm_map_lock_read(text_map);
2044 vm_map_lock_read(data_map);
2045
2046start_over:
2047 /*
2048 * At first, we can wiggle all the way from our starting point
2049 * (base_offset) towards the start of the map (0), if needed.
2050 */
2051 wiggle_room = base_offset;
2052
2053 for (i = (signed) map_cnt - 1; i >= 0; i--) {
89b3af67
A
2054 if (mappings[i].sfm_size == 0) {
2055 /* nothing to map here... */
2056 continue;
2057 }
91447636
A
2058 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2059 /* copy-on-write mappings are in the data submap */
2060 map = data_map;
2061 } else {
2062 /* other mappings are in the text submap */
2063 map = text_map;
2064 }
2065 /* get the offset within the appropriate submap */
2066 mapping_offset = (mappings[i].sfm_address &
2067 SHARED_TEXT_REGION_MASK);
2068 mapping_size = mappings[i].sfm_size;
2069 mapping_end_offset = mapping_offset + mapping_size;
2070 mapping_offset = vm_map_trunc_page(mapping_offset);
2071 mapping_end_offset = vm_map_round_page(mapping_end_offset);
2072 mapping_size = mapping_end_offset - mapping_offset;
2073
2074 for (;;) {
2075 if (vm_map_lookup_entry(map,
2076 base_offset + mapping_offset,
2077 &map_entry)) {
2078 /*
2079 * The start address for that mapping
2080 * is already mapped: no fit.
2081 * Locate the hole immediately before this map
2082 * entry.
2083 */
2084 prev_hole_end = map_entry->vme_start;
2085 prev_entry = map_entry->vme_prev;
2086 if (prev_entry == vm_map_to_entry(map)) {
2087 /* no previous entry */
2088 prev_hole_start = map->min_offset;
2089 } else {
2090 /* previous entry ends here */
2091 prev_hole_start = prev_entry->vme_end;
2092 }
2093 } else {
2094 /*
2095 * The start address for that mapping is not
2096 * mapped.
2097 * Locate the start and end of the hole
2098 * at that location.
2099 */
2100 /* map_entry is the previous entry */
2101 if (map_entry == vm_map_to_entry(map)) {
2102 /* no previous entry */
2103 prev_hole_start = map->min_offset;
2104 } else {
2105 /* previous entry ends there */
2106 prev_hole_start = map_entry->vme_end;
2107 }
2108 next_entry = map_entry->vme_next;
2109 if (next_entry == vm_map_to_entry(map)) {
2110 /* no next entry */
2111 prev_hole_end = map->max_offset;
2112 } else {
2113 prev_hole_end = next_entry->vme_start;
2114 }
2115 }
2116
2117 if (prev_hole_end <= base_offset + mapping_offset) {
2118 /* hole is to our left: try and wiggle to fit */
2119 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
2120 if (wiggle > base_offset) {
2121 /* we're getting out of the map */
2122 kr = KERN_FAILURE;
2123 goto done;
2124 }
2125 base_offset -= wiggle;
2126 if (wiggle > wiggle_room) {
2127 /* can't wiggle that much: start over */
2128 goto start_over;
2129 }
2130 /* account for the wiggling done */
2131 wiggle_room -= wiggle;
2132 }
2133
2134 if (prev_hole_end >
2135 base_offset + mapping_offset + mapping_size) {
2136 /*
2137 * The hole extends further to the right
2138 * than what we need. Ignore the extra space.
2139 */
2140 prev_hole_end = (base_offset + mapping_offset +
2141 mapping_size);
2142 }
2143
2144 if (prev_hole_end <
2145 base_offset + mapping_offset + mapping_size) {
2146 /*
2147 * The hole is not big enough to establish
2148 * the mapping right there: wiggle towards
2149 * the beginning of the hole so that the end
2150 * of our mapping fits in the hole...
2151 */
2152 wiggle = base_offset + mapping_offset
2153 + mapping_size - prev_hole_end;
2154 if (wiggle > base_offset) {
2155 /* we're getting out of the map */
2156 kr = KERN_FAILURE;
2157 goto done;
2158 }
2159 base_offset -= wiggle;
2160 if (wiggle > wiggle_room) {
2161 /* can't wiggle that much: start over */
2162 goto start_over;
2163 }
2164 /* account for the wiggling done */
2165 wiggle_room -= wiggle;
2166
2167 /* keep searching from this new base */
2168 continue;
2169 }
2170
2171 if (prev_hole_start > base_offset + mapping_offset) {
2172 /* no hole found: keep looking */
2173 continue;
2174 }
2175
2176 /* compute wiggling room at this hole */
2177 wiggle = base_offset + mapping_offset - prev_hole_start;
2178 if (wiggle < wiggle_room) {
2179 /* less wiggle room than before... */
2180 wiggle_room = wiggle;
2181 }
2182
2183 /* found a hole that fits: skip to next mapping */
2184 break;
2185 } /* while we look for a hole */
2186 } /* for each mapping */
2187
2188 *base_offset_p = base_offset;
2189 kr = KERN_SUCCESS;
2190
2191done:
2192 vm_map_unlock_read(text_map);
2193 vm_map_unlock_read(data_map);
2194
2195 kmem_free(kernel_map,
2196 (vm_offset_t) mappings,
2197 map_cnt * sizeof (mappings[0]));
2198
2199 return kr;
2200}
2201
2202/*
2203 * lsf_map:
2204 *
2205 * Attempt to establish the mappings for a split library into the shared region.
2206 */
2207static kern_return_t
2208lsf_map(
2209 struct shared_file_mapping_np *mappings,
2210 int map_cnt,
2211 void *file_control,
2212 memory_object_offset_t file_size,
2213 shared_region_task_mappings_t sm_info,
2214 mach_vm_offset_t base_offset,
2215 mach_vm_offset_t *slide_p)
2216{
2217 load_struct_t *entry;
2218 loaded_mapping_t *file_mapping;
2219 loaded_mapping_t **tptr;
2220 ipc_port_t region_handle;
2221 vm_named_entry_t region_entry;
2222 mach_port_t map_port;
2223 vm_object_t file_object;
2224 kern_return_t kr;
2225 int i;
2226 mach_vm_offset_t original_base_offset;
89b3af67 2227 mach_vm_size_t total_size;
91447636
A
2228
2229 /* get the VM object from the file's memory object handle */
2230 file_object = memory_object_control_to_vm_object(file_control);
2231
2232 original_base_offset = base_offset;
2233
2234 LSF_DEBUG(("lsf_map"
2235 "(cnt=%d,file=%p,sm_info=%p)"
2236 "\n",
2237 map_cnt, file_object,
2238 sm_info));
2239
2240restart_after_slide:
2241 /* get a new "load_struct_t" to described the mappings for that file */
2242 entry = (load_struct_t *)zalloc(lsf_zone);
2243 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
2244 LSF_DEBUG(("lsf_map"
2245 "(cnt=%d,file=%p,sm_info=%p) "
2246 "entry=%p\n",
2247 map_cnt, file_object,
2248 sm_info, entry));
2249 if (entry == NULL) {
89b3af67
A
2250 SHARED_REGION_TRACE(
2251 SHARED_REGION_TRACE_ERROR,
2252 ("shared_region: %p: "
2253 "lsf_map: unable to allocate entry\n",
2254 current_thread()));
91447636
A
2255 return KERN_NO_SPACE;
2256 }
2257 shared_file_available_hash_ele--;
2258 entry->file_object = (int)file_object;
2259 entry->mapping_cnt = map_cnt;
2260 entry->mappings = NULL;
2261 entry->links.prev = (queue_entry_t) 0;
2262 entry->links.next = (queue_entry_t) 0;
2263 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2264 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2265 entry->file_offset = mappings[0].sfm_file_offset;
2266
2267 /* insert the new file entry in the hash table, for later lookups */
2268 lsf_hash_insert(entry, sm_info);
2269
2270 /* where we should add the next mapping description for that file */
2271 tptr = &(entry->mappings);
2272
2273 entry->base_address = base_offset;
89b3af67 2274 total_size = 0;
91447636
A
2275
2276 /* establish each requested mapping */
2277 for (i = 0; i < map_cnt; i++) {
2278 mach_vm_offset_t target_address;
2279 mach_vm_offset_t region_mask;
2280
2281 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2282 region_handle = (ipc_port_t)sm_info->data_region;
2283 region_mask = SHARED_DATA_REGION_MASK;
2284 if ((((mappings[i].sfm_address + base_offset)
2285 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2286 (((mappings[i].sfm_address + base_offset +
2287 mappings[i].sfm_size - 1)
2288 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
89b3af67
A
2289 SHARED_REGION_TRACE(
2290 SHARED_REGION_TRACE_ERROR,
2291 ("shared_region: %p: lsf_map: "
2292 "RW mapping #%d not in segment",
2293 current_thread(), i));
2294 shared_region_dump_mappings(
2295 SHARED_REGION_TRACE_ERROR,
2296 mappings, map_cnt, base_offset);
2297
2298 lsf_deallocate(entry,
2299 file_object,
2300 entry->base_address,
2301 sm_info,
2302 TRUE);
91447636
A
2303 return KERN_INVALID_ARGUMENT;
2304 }
2305 } else {
2306 region_mask = SHARED_TEXT_REGION_MASK;
2307 region_handle = (ipc_port_t)sm_info->text_region;
2308 if (((mappings[i].sfm_address + base_offset)
2309 & GLOBAL_SHARED_SEGMENT_MASK) ||
2310 ((mappings[i].sfm_address + base_offset +
2311 mappings[i].sfm_size - 1)
2312 & GLOBAL_SHARED_SEGMENT_MASK)) {
89b3af67
A
2313 SHARED_REGION_TRACE(
2314 SHARED_REGION_TRACE_ERROR,
2315 ("shared_region: %p: lsf_map: "
2316 "RO mapping #%d not in segment",
2317 current_thread(), i));
2318 shared_region_dump_mappings(
2319 SHARED_REGION_TRACE_ERROR,
2320 mappings, map_cnt, base_offset);
2321
2322 lsf_deallocate(entry,
2323 file_object,
2324 entry->base_address,
2325 sm_info,
2326 TRUE);
91447636
A
2327 return KERN_INVALID_ARGUMENT;
2328 }
2329 }
2330 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2331 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2332 (file_size))) {
89b3af67
A
2333 SHARED_REGION_TRACE(
2334 SHARED_REGION_TRACE_ERROR,
2335 ("shared_region: %p: lsf_map: "
2336 "ZF mapping #%d beyond EOF",
2337 current_thread(), i));
2338 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR,
2339 mappings, map_cnt,
2340 base_offset);
2341
2342
2343 lsf_deallocate(entry,
2344 file_object,
2345 entry->base_address,
2346 sm_info,
2347 TRUE);
91447636
A
2348 return KERN_INVALID_ARGUMENT;
2349 }
2350 target_address = entry->base_address +
2351 ((mappings[i].sfm_address) & region_mask);
2352 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2353 map_port = MACH_PORT_NULL;
2354 } else {
2355 map_port = (ipc_port_t) file_object->pager;
2356 }
2357 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2358
89b3af67
A
2359 total_size += mappings[i].sfm_size;
2360 if (mappings[i].sfm_size == 0) {
2361 /* nothing to map... */
2362 kr = KERN_SUCCESS;
2363 } else {
2364 kr = mach_vm_map(
2365 region_entry->backing.map,
91447636
A
2366 &target_address,
2367 vm_map_round_page(mappings[i].sfm_size),
2368 0,
2369 VM_FLAGS_FIXED,
2370 map_port,
2371 mappings[i].sfm_file_offset,
2372 TRUE,
2373 (mappings[i].sfm_init_prot &
2374 (VM_PROT_READ|VM_PROT_EXECUTE)),
2375 (mappings[i].sfm_max_prot &
2376 (VM_PROT_READ|VM_PROT_EXECUTE)),
89b3af67
A
2377 VM_INHERIT_DEFAULT);
2378 }
2379 if (kr != KERN_SUCCESS) {
2380 vm_offset_t old_base_address;
2381
2382 old_base_address = entry->base_address;
2383 lsf_deallocate(entry,
2384 file_object,
2385 entry->base_address,
2386 sm_info,
2387 TRUE);
2388 entry = NULL;
91447636
A
2389
2390 if (slide_p != NULL) {
2391 /*
2392 * Requested mapping failed but the caller
2393 * is OK with sliding the library in the
2394 * shared region, so let's try and slide it...
2395 */
2396
89b3af67
A
2397 SHARED_REGION_TRACE(
2398 SHARED_REGION_TRACE_CONFLICT,
2399 ("shared_region: %p: lsf_map: "
2400 "mapping #%d failed to map, "
2401 "kr=0x%x, sliding...\n",
2402 current_thread(), i, kr));
2403 shared_region_dump_mappings(
2404 SHARED_REGION_TRACE_INFO,
2405 mappings, map_cnt, base_offset);
2406 shared_region_dump_conflict_info(
2407 SHARED_REGION_TRACE_CONFLICT,
2408 region_entry->backing.map,
2409 (old_base_address +
2410 ((mappings[i].sfm_address)
2411 & region_mask)),
2412 vm_map_round_page(mappings[i].sfm_size));
2413
91447636
A
2414 /* lookup an appropriate spot */
2415 kr = lsf_slide(map_cnt, mappings,
2416 sm_info, &base_offset);
2417 if (kr == KERN_SUCCESS) {
2418 /* try and map it there ... */
91447636
A
2419 goto restart_after_slide;
2420 }
2421 /* couldn't slide ... */
2422 }
89b3af67
A
2423
2424 SHARED_REGION_TRACE(
2425 SHARED_REGION_TRACE_CONFLICT,
2426 ("shared_region: %p: lsf_map: "
2427 "mapping #%d failed to map, "
2428 "kr=0x%x, no sliding\n",
2429 current_thread(), i, kr));
2430 shared_region_dump_mappings(
2431 SHARED_REGION_TRACE_INFO,
2432 mappings, map_cnt, base_offset);
2433 shared_region_dump_conflict_info(
2434 SHARED_REGION_TRACE_CONFLICT,
2435 region_entry->backing.map,
2436 (old_base_address +
2437 ((mappings[i].sfm_address)
2438 & region_mask)),
2439 vm_map_round_page(mappings[i].sfm_size));
91447636
A
2440 return KERN_FAILURE;
2441 }
2442
2443 /* record this mapping */
1c79356b 2444 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
91447636 2445 if (file_mapping == NULL) {
89b3af67
A
2446 lsf_deallocate(entry,
2447 file_object,
2448 entry->base_address,
2449 sm_info,
2450 TRUE);
2451 SHARED_REGION_TRACE(
2452 SHARED_REGION_TRACE_ERROR,
2453 ("shared_region: %p: "
2454 "lsf_map: unable to allocate mapping\n",
2455 current_thread()));
91447636
A
2456 return KERN_NO_SPACE;
2457 }
1c79356b 2458 shared_file_available_hash_ele--;
91447636 2459 file_mapping->mapping_offset = (mappings[i].sfm_address)
1c79356b 2460 & region_mask;
91447636
A
2461 file_mapping->size = mappings[i].sfm_size;
2462 file_mapping->file_offset = mappings[i].sfm_file_offset;
2463 file_mapping->protection = mappings[i].sfm_init_prot;
1c79356b 2464 file_mapping->next = NULL;
91447636
A
2465 LSF_DEBUG(("lsf_map: file_mapping %p "
2466 "for offset=0x%x size=0x%x\n",
2467 file_mapping, file_mapping->mapping_offset,
2468 file_mapping->size));
2469
2470 /* and link it to the file entry */
1c79356b 2471 *tptr = file_mapping;
91447636
A
2472
2473 /* where to put the next mapping's description */
1c79356b
A
2474 tptr = &(file_mapping->next);
2475 }
91447636
A
2476
2477 if (slide_p != NULL) {
2478 *slide_p = base_offset - original_base_offset;
2479 }
2480
89b3af67
A
2481 if ((sm_info->flags & SHARED_REGION_STANDALONE) ||
2482 (total_size == 0)) {
91447636 2483 /*
89b3af67
A
2484 * Two cases:
2485 * 1. we have a standalone and private shared region, so we
91447636
A
2486 * don't really need to keep the information about each file
2487 * and each mapping. Just deallocate it all.
89b3af67
A
2488 * 2. the total size of the mappings is 0, so nothing at all
2489 * was mapped. Let's not waste kernel resources to describe
2490 * nothing.
2491 *
91447636
A
2492 * XXX we still have the hash table, though...
2493 */
89b3af67 2494 lsf_deallocate(entry, file_object, entry->base_address, sm_info,
91447636
A
2495 FALSE);
2496 }
2497
2498 LSF_DEBUG(("lsf_map: done\n"));
2499 return KERN_SUCCESS;
1c79356b
A
2500}
2501
2502
2503/* finds the file_object extent list in the shared memory hash table */
2504/* If one is found the associated extents in shared memory are deallocated */
2505/* and the extent list is freed */
2506
9bccf70c 2507static void
1c79356b
A
2508lsf_unload(
2509 void *file_object,
2510 vm_offset_t base_offset,
2511 shared_region_task_mappings_t sm_info)
91447636 2512{
89b3af67 2513 lsf_deallocate(NULL, file_object, base_offset, sm_info, TRUE);
91447636
A
2514}
2515
2516/*
2517 * lsf_deallocate:
2518 *
2519 * Deallocates all the "shared region" internal data structures describing
2520 * the file and its mappings.
2521 * Also deallocate the actual file mappings if requested ("unload" arg).
2522 */
2523static void
2524lsf_deallocate(
89b3af67 2525 load_struct_t *target_entry,
91447636
A
2526 void *file_object,
2527 vm_offset_t base_offset,
2528 shared_region_task_mappings_t sm_info,
2529 boolean_t unload)
1c79356b
A
2530{
2531 load_struct_t *entry;
1c79356b
A
2532 loaded_mapping_t *map_ele;
2533 loaded_mapping_t *back_ptr;
89b3af67 2534 kern_return_t kr;
1c79356b 2535
89b3af67
A
2536 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2537 target_entry, file_object, base_offset, sm_info, unload));
2538 entry = lsf_hash_delete(target_entry,
2539 file_object,
2540 base_offset,
2541 sm_info);
2542 if (entry) {
1c79356b
A
2543 map_ele = entry->mappings;
2544 while(map_ele != NULL) {
91447636
A
2545 if (unload) {
2546 ipc_port_t region_handle;
2547 vm_named_entry_t region_entry;
2548
2549 if(map_ele->protection & VM_PROT_COW) {
2550 region_handle = (ipc_port_t)
2551 sm_info->data_region;
2552 } else {
2553 region_handle = (ipc_port_t)
2554 sm_info->text_region;
2555 }
2556 region_entry = (vm_named_entry_t)
2557 region_handle->ip_kobject;
2558
89b3af67
A
2559 kr = vm_deallocate(region_entry->backing.map,
2560 (entry->base_address +
2561 map_ele->mapping_offset),
2562 map_ele->size);
2563 assert(kr == KERN_SUCCESS);
1c79356b 2564 }
1c79356b
A
2565 back_ptr = map_ele;
2566 map_ele = map_ele->next;
91447636
A
2567 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2568 "offset 0x%x size 0x%x\n",
2569 back_ptr, back_ptr->mapping_offset,
2570 back_ptr->size));
2571 zfree(lsf_zone, back_ptr);
89b3af67 2572 shared_file_available_hash_ele++;
1c79356b 2573 }
91447636
A
2574 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2575 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2576 zfree(lsf_zone, entry);
1c79356b
A
2577 shared_file_available_hash_ele++;
2578 }
89b3af67 2579 LSF_DEBUG(("lsf_deallocate: done\n"));
1c79356b 2580}
9bccf70c
A
2581
2582/* integer is from 1 to 100 and represents percent full */
2583unsigned int
91447636 2584lsf_mapping_pool_gauge(void)
9bccf70c
A
2585{
2586 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2587}