]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_memory_server.c
xnu-792.6.76.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
91447636
A
30#include <debug.h>
31
32#include <mach/mach_types.h>
1c79356b
A
33#include <mach/kern_return.h>
34#include <mach/vm_inherit.h>
91447636 35#include <mach/vm_map.h>
55e303ae 36#include <machine/cpu_capabilities.h>
91447636
A
37
38#include <kern/kern_types.h>
39#include <kern/ipc_kobject.h>
40#include <kern/thread.h>
41#include <kern/zalloc.h>
42#include <kern/kalloc.h>
43
44#include <ipc/ipc_types.h>
45#include <ipc/ipc_port.h>
46
1c79356b
A
47#include <vm/vm_kern.h>
48#include <vm/vm_map.h>
49#include <vm/vm_page.h>
50
91447636 51#include <mach/mach_vm.h>
9bccf70c
A
52#include <mach/shared_memory_server.h>
53#include <vm/vm_shared_memory_server.h>
54
91447636
A
55#if DEBUG
56int lsf_debug = 0;
57int lsf_alloc_debug = 0;
58#define LSF_DEBUG(args) \
59 MACRO_BEGIN \
60 if (lsf_debug) { \
61 kprintf args; \
62 } \
63 MACRO_END
64#define LSF_ALLOC_DEBUG(args) \
65 MACRO_BEGIN \
66 if (lsf_alloc_debug) { \
67 kprintf args; \
68 } \
69 MACRO_END
70#else /* DEBUG */
71#define LSF_DEBUG(args)
72#define LSF_ALLOC_DEBUG(args)
73#endif /* DEBUG */
74
9bccf70c 75/* forward declarations */
91447636
A
76static kern_return_t
77shared_region_object_create(
78 vm_size_t size,
79 ipc_port_t *object_handle);
80
81static kern_return_t
82shared_region_mapping_dealloc_lock(
83 shared_region_mapping_t shared_region,
84 int need_sfh_lock,
85 int need_drl_lock);
86
87
9bccf70c
A
88static kern_return_t
89shared_file_init(
91447636 90 ipc_port_t *text_region_handle,
9bccf70c 91 vm_size_t text_region_size,
91447636 92 ipc_port_t *data_region_handle,
9bccf70c 93 vm_size_t data_region_size,
91447636
A
94 vm_offset_t *file_mapping_array);
95
96static kern_return_t
97shared_file_header_init(
98 shared_file_info_t *shared_file_header);
9bccf70c
A
99
100static load_struct_t *
101lsf_hash_lookup(
102 queue_head_t *hash_table,
103 void *file_object,
55e303ae 104 vm_offset_t recognizableOffset,
9bccf70c 105 int size,
91447636 106 boolean_t regular,
9bccf70c
A
107 boolean_t alternate,
108 shared_region_task_mappings_t sm_info);
109
110static load_struct_t *
111lsf_hash_delete(
112 void *file_object,
113 vm_offset_t base_offset,
114 shared_region_task_mappings_t sm_info);
115
116static void
117lsf_hash_insert(
118 load_struct_t *entry,
119 shared_region_task_mappings_t sm_info);
120
121static kern_return_t
122lsf_load(
123 vm_offset_t mapped_file,
124 vm_size_t mapped_file_size,
125 vm_offset_t *base_address,
126 sf_mapping_t *mappings,
127 int map_cnt,
128 void *file_object,
129 int flags,
130 shared_region_task_mappings_t sm_info);
131
91447636
A
132static kern_return_t
133lsf_slide(
134 unsigned int map_cnt,
135 struct shared_file_mapping_np *mappings,
136 shared_region_task_mappings_t sm_info,
137 mach_vm_offset_t *base_offset_p);
138
139static kern_return_t
140lsf_map(
141 struct shared_file_mapping_np *mappings,
142 int map_cnt,
143 void *file_control,
144 memory_object_size_t file_size,
145 shared_region_task_mappings_t sm_info,
146 mach_vm_offset_t base_offset,
147 mach_vm_offset_t *slide_p);
148
9bccf70c
A
149static void
150lsf_unload(
151 void *file_object,
152 vm_offset_t base_offset,
153 shared_region_task_mappings_t sm_info);
154
91447636
A
155static void
156lsf_deallocate(
157 void *file_object,
158 vm_offset_t base_offset,
159 shared_region_task_mappings_t sm_info,
160 boolean_t unload);
161
9bccf70c
A
162
163#define load_file_hash(file_object, size) \
164 ((((natural_t)file_object) & 0xffffff) % size)
1c79356b 165
9bccf70c 166/* Implementation */
1c79356b
A
167vm_offset_t shared_file_text_region;
168vm_offset_t shared_file_data_region;
169
170ipc_port_t shared_text_region_handle;
171ipc_port_t shared_data_region_handle;
172vm_offset_t shared_file_mapping_array = 0;
55e303ae
A
173
174shared_region_mapping_t default_environment_shared_regions = NULL;
175static decl_mutex_data(,default_regions_list_lock_data)
176
177#define default_regions_list_lock() \
178 mutex_lock(&default_regions_list_lock_data)
179#define default_regions_list_lock_try() \
180 mutex_try(&default_regions_list_lock_data)
181#define default_regions_list_unlock() \
182 mutex_unlock(&default_regions_list_lock_data)
183
1c79356b
A
184
185ipc_port_t sfma_handle = NULL;
186zone_t lsf_zone;
187
188int shared_file_available_hash_ele;
189
55e303ae 190/* com region support */
91447636
A
191ipc_port_t com_region_handle32 = NULL;
192ipc_port_t com_region_handle64 = NULL;
193vm_map_t com_region_map32 = NULL;
194vm_map_t com_region_map64 = NULL;
55e303ae
A
195vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
196shared_region_mapping_t com_mapping_resource = NULL;
197
91447636
A
198
199#if DEBUG
200int shared_region_debug = 0;
201#endif /* DEBUG */
202
203
204kern_return_t
205vm_get_shared_region(
206 task_t task,
207 shared_region_mapping_t *shared_region)
208{
209 *shared_region = (shared_region_mapping_t) task->system_shared_region;
210 if (*shared_region) {
211 assert((*shared_region)->ref_count > 0);
212 }
213 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
214 task, *shared_region));
215 return KERN_SUCCESS;
216}
217
218kern_return_t
219vm_set_shared_region(
220 task_t task,
221 shared_region_mapping_t shared_region)
222{
223 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
224 "shared_region=%p)\n",
225 task, shared_region));
226 if (shared_region) {
227 assert(shared_region->ref_count > 0);
228 }
229 task->system_shared_region = shared_region;
230 return KERN_SUCCESS;
231}
232
233/*
234 * shared_region_object_chain_detach:
235 *
236 * Mark the shared region as being detached or standalone. This means
237 * that we won't keep track of which file is mapped and how, for this shared
238 * region. And we don't have a "shadow" shared region.
239 * This is used when we clone a private shared region and we intend to remove
240 * some mappings from it. It won't need to maintain mappings info because it's
241 * now private. It can't have a "shadow" shared region because we don't want
242 * to see the shadow of the mappings we're about to remove.
243 */
244void
245shared_region_object_chain_detached(
246 shared_region_mapping_t target_region)
247{
248 shared_region_mapping_lock(target_region);
249 target_region->flags |= SHARED_REGION_STANDALONE;
250 shared_region_mapping_unlock(target_region);
251}
252
253/*
254 * shared_region_object_chain_attach:
255 *
256 * Link "target_region" to "object_chain_region". "object_chain_region"
257 * is treated as a shadow of "target_region" for the purpose of looking up
258 * mappings. Since the "target_region" preserves all the mappings of the
259 * older "object_chain_region", we won't duplicate all the mappings info and
260 * we'll just lookup the next region in the "object_chain" if we can't find
261 * what we're looking for in the "target_region". See lsf_hash_lookup().
262 */
263kern_return_t
264shared_region_object_chain_attach(
265 shared_region_mapping_t target_region,
266 shared_region_mapping_t object_chain_region)
267{
268 shared_region_object_chain_t object_ele;
269
270 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
271 "target_region=%p, object_chain_region=%p\n",
272 target_region, object_chain_region));
273 assert(target_region->ref_count > 0);
274 assert(object_chain_region->ref_count > 0);
275 if(target_region->object_chain)
276 return KERN_FAILURE;
277 object_ele = (shared_region_object_chain_t)
278 kalloc(sizeof (struct shared_region_object_chain));
279 shared_region_mapping_lock(object_chain_region);
280 target_region->object_chain = object_ele;
281 object_ele->object_chain_region = object_chain_region;
282 object_ele->next = object_chain_region->object_chain;
283 object_ele->depth = object_chain_region->depth;
284 object_chain_region->depth++;
285 target_region->alternate_next = object_chain_region->alternate_next;
286 shared_region_mapping_unlock(object_chain_region);
287 return KERN_SUCCESS;
288}
289
290/* LP64todo - need 64-bit safe version */
291kern_return_t
292shared_region_mapping_create(
293 ipc_port_t text_region,
294 vm_size_t text_size,
295 ipc_port_t data_region,
296 vm_size_t data_size,
297 vm_offset_t region_mappings,
298 vm_offset_t client_base,
299 shared_region_mapping_t *shared_region,
300 vm_offset_t alt_base,
301 vm_offset_t alt_next)
302{
303 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
304 *shared_region = (shared_region_mapping_t)
305 kalloc(sizeof (struct shared_region_mapping));
306 if(*shared_region == NULL) {
307 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
308 "failure\n"));
309 return KERN_FAILURE;
310 }
311 shared_region_mapping_lock_init((*shared_region));
312 (*shared_region)->text_region = text_region;
313 (*shared_region)->text_size = text_size;
314 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
315 (*shared_region)->system = cpu_type();
316 (*shared_region)->data_region = data_region;
317 (*shared_region)->data_size = data_size;
318 (*shared_region)->region_mappings = region_mappings;
319 (*shared_region)->client_base = client_base;
320 (*shared_region)->ref_count = 1;
321 (*shared_region)->next = NULL;
322 (*shared_region)->object_chain = NULL;
323 (*shared_region)->self = *shared_region;
324 (*shared_region)->flags = 0;
325 (*shared_region)->depth = 0;
326 (*shared_region)->default_env_list = NULL;
327 (*shared_region)->alternate_base = alt_base;
328 (*shared_region)->alternate_next = alt_next;
329 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
330 *shared_region));
331 return KERN_SUCCESS;
332}
333
334/* LP64todo - need 64-bit safe version */
335kern_return_t
336shared_region_mapping_info(
337 shared_region_mapping_t shared_region,
338 ipc_port_t *text_region,
339 vm_size_t *text_size,
340 ipc_port_t *data_region,
341 vm_size_t *data_size,
342 vm_offset_t *region_mappings,
343 vm_offset_t *client_base,
344 vm_offset_t *alt_base,
345 vm_offset_t *alt_next,
346 unsigned int *fs_base,
347 unsigned int *system,
348 int *flags,
349 shared_region_mapping_t *next)
350{
351 shared_region_mapping_lock(shared_region);
352
353 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
354 shared_region));
355 assert(shared_region->ref_count > 0);
356 *text_region = shared_region->text_region;
357 *text_size = shared_region->text_size;
358 *data_region = shared_region->data_region;
359 *data_size = shared_region->data_size;
360 *region_mappings = shared_region->region_mappings;
361 *client_base = shared_region->client_base;
362 *alt_base = shared_region->alternate_base;
363 *alt_next = shared_region->alternate_next;
364 *flags = shared_region->flags;
365 *fs_base = shared_region->fs_base;
366 *system = shared_region->system;
367 *next = shared_region->next;
368
369 shared_region_mapping_unlock(shared_region);
370}
371
372/* LP64todo - need 64-bit safe version */
373kern_return_t
374shared_region_mapping_set_alt_next(
375 shared_region_mapping_t shared_region,
376 vm_offset_t alt_next)
377{
378 SHARED_REGION_DEBUG(("shared_region_mapping_set_alt_next"
379 "(shared_region=%p, alt_next=0%x)\n",
380 shared_region, alt_next));
381 assert(shared_region->ref_count > 0);
382 shared_region->alternate_next = alt_next;
383 return KERN_SUCCESS;
384}
385
386kern_return_t
387shared_region_mapping_ref(
388 shared_region_mapping_t shared_region)
389{
390 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
391 "ref_count=%d + 1\n",
392 shared_region,
393 shared_region ? shared_region->ref_count : 0));
394 if(shared_region == NULL)
395 return KERN_SUCCESS;
396 assert(shared_region->ref_count > 0);
397 hw_atomic_add(&shared_region->ref_count, 1);
398 return KERN_SUCCESS;
399}
400
401static kern_return_t
402shared_region_mapping_dealloc_lock(
403 shared_region_mapping_t shared_region,
404 int need_sfh_lock,
405 int need_drl_lock)
406{
407 struct shared_region_task_mappings sm_info;
408 shared_region_mapping_t next = NULL;
409 int ref_count;
410
411 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
412 "(shared_region=%p,%d,%d) ref_count=%d\n",
413 shared_region, need_sfh_lock, need_drl_lock,
414 shared_region ? shared_region->ref_count : 0));
415 while (shared_region) {
416 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
417 "ref_count=%d\n",
418 shared_region, shared_region->ref_count));
419 assert(shared_region->ref_count > 0);
420 if ((ref_count =
421 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
422 shared_region_mapping_lock(shared_region);
423
424 sm_info.text_region = shared_region->text_region;
425 sm_info.text_size = shared_region->text_size;
426 sm_info.data_region = shared_region->data_region;
427 sm_info.data_size = shared_region->data_size;
428 sm_info.region_mappings = shared_region->region_mappings;
429 sm_info.client_base = shared_region->client_base;
430 sm_info.alternate_base = shared_region->alternate_base;
431 sm_info.alternate_next = shared_region->alternate_next;
432 sm_info.flags = shared_region->flags;
433 sm_info.self = (vm_offset_t)shared_region;
434
435 if(shared_region->region_mappings) {
436 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
437 }
438 if(((vm_named_entry_t)
439 (shared_region->text_region->ip_kobject))
440 ->backing.map->pmap) {
441 pmap_remove(((vm_named_entry_t)
442 (shared_region->text_region->ip_kobject))
443 ->backing.map->pmap,
444 sm_info.client_base,
445 sm_info.client_base + sm_info.text_size);
446 }
447 ipc_port_release_send(shared_region->text_region);
448 if(shared_region->data_region)
449 ipc_port_release_send(shared_region->data_region);
450 if (shared_region->object_chain) {
451 next = shared_region->object_chain->object_chain_region;
452 kfree(shared_region->object_chain,
453 sizeof (struct shared_region_object_chain));
454 } else {
455 next = NULL;
456 }
457 shared_region_mapping_unlock(shared_region);
458 SHARED_REGION_DEBUG(
459 ("shared_region_mapping_dealloc_lock(%p): "
460 "freeing\n",
461 shared_region));
462 bzero((void *)shared_region,
463 sizeof (*shared_region)); /* FBDP debug */
464 kfree(shared_region,
465 sizeof (struct shared_region_mapping));
466 shared_region = next;
467 } else {
468 /* Stale indicates that a system region is no */
469 /* longer in the default environment list. */
470 if((ref_count == 1) &&
471 (shared_region->flags & SHARED_REGION_SYSTEM)
472 && !(shared_region->flags & SHARED_REGION_STALE)) {
473 SHARED_REGION_DEBUG(
474 ("shared_region_mapping_dealloc_lock"
475 "(%p): removing stale\n",
476 shared_region));
477 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
478 }
479 break;
480 }
481 }
482 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
483 shared_region));
484 return KERN_SUCCESS;
485}
486
487/*
488 * Stub function; always indicates that the lock needs to be taken in the
489 * call to lsf_remove_regions_mappings_lock().
490 */
491kern_return_t
492shared_region_mapping_dealloc(
493 shared_region_mapping_t shared_region)
494{
495 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
496 "(shared_region=%p)\n",
497 shared_region));
498 if (shared_region) {
499 assert(shared_region->ref_count > 0);
500 }
501 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
502}
503
504static
505kern_return_t
506shared_region_object_create(
507 vm_size_t size,
508 ipc_port_t *object_handle)
509{
510 vm_named_entry_t user_entry;
511 ipc_port_t user_handle;
512
513 ipc_port_t previous;
514 vm_map_t new_map;
515
516 user_entry = (vm_named_entry_t)
517 kalloc(sizeof (struct vm_named_entry));
518 if(user_entry == NULL) {
519 return KERN_FAILURE;
520 }
521 named_entry_lock_init(user_entry);
522 user_handle = ipc_port_alloc_kernel();
523
524
525 ip_lock(user_handle);
526
527 /* make a sonce right */
528 user_handle->ip_sorights++;
529 ip_reference(user_handle);
530
531 user_handle->ip_destination = IP_NULL;
532 user_handle->ip_receiver_name = MACH_PORT_NULL;
533 user_handle->ip_receiver = ipc_space_kernel;
534
535 /* make a send right */
536 user_handle->ip_mscount++;
537 user_handle->ip_srights++;
538 ip_reference(user_handle);
539
540 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
541 /* nsrequest unlocks user_handle */
542
543 /* Create a named object based on a submap of specified size */
544
545 new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
546 user_entry->backing.map = new_map;
547 user_entry->internal = TRUE;
548 user_entry->is_sub_map = TRUE;
549 user_entry->is_pager = FALSE;
550 user_entry->offset = 0;
551 user_entry->protection = VM_PROT_ALL;
552 user_entry->size = size;
553 user_entry->ref_count = 1;
554
555 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
556 IKOT_NAMED_ENTRY);
557 *object_handle = user_handle;
558 return KERN_SUCCESS;
559}
55e303ae
A
560
561/* called for the non-default, private branch shared region support */
562/* system default fields for fs_base and system supported are not */
563/* relevant as the system default flag is not set */
1c79356b
A
564kern_return_t
565shared_file_create_system_region(
566 shared_region_mapping_t *shared_region)
567{
568 ipc_port_t text_handle;
569 ipc_port_t data_handle;
570 long text_size;
571 long data_size;
572 vm_offset_t mapping_array;
573 kern_return_t kret;
574
91447636
A
575 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
576
1c79356b
A
577 text_size = 0x10000000;
578 data_size = 0x10000000;
579
580 kret = shared_file_init(&text_handle,
581 text_size, &data_handle, data_size, &mapping_array);
91447636
A
582 if(kret) {
583 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
584 "shared_file_init failed kret=0x%x\n",
585 kret));
1c79356b 586 return kret;
91447636 587 }
1c79356b
A
588 kret = shared_region_mapping_create(text_handle,
589 text_size, data_handle, data_size, mapping_array,
590 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
55e303ae 591 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
91447636
A
592 if(kret) {
593 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
594 "shared_region_mapping_create failed "
595 "kret=0x%x\n",
596 kret));
1c79356b 597 return kret;
91447636 598 }
1c79356b 599 (*shared_region)->flags = 0;
55e303ae
A
600 if(com_mapping_resource) {
601 shared_region_mapping_ref(com_mapping_resource);
602 (*shared_region)->next = com_mapping_resource;
603 }
604
91447636
A
605 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
606 "-> shared_region=%p\n",
607 *shared_region));
1c79356b
A
608 return KERN_SUCCESS;
609}
610
55e303ae
A
611/*
612 * load a new default for a specified environment into the default share
613 * regions list. If a previous default exists for the envrionment specification
614 * it is returned along with its reference. It is expected that the new
615 * sytem region structure passes a reference.
616 */
617
618shared_region_mapping_t
619update_default_shared_region(
620 shared_region_mapping_t new_system_region)
621{
622 shared_region_mapping_t old_system_region;
623 unsigned int fs_base;
624 unsigned int system;
625
91447636
A
626 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
627 new_system_region));
628 assert(new_system_region->ref_count > 0);
55e303ae
A
629 fs_base = new_system_region->fs_base;
630 system = new_system_region->system;
631 new_system_region->flags |= SHARED_REGION_SYSTEM;
632 default_regions_list_lock();
633 old_system_region = default_environment_shared_regions;
634
635 if((old_system_region != NULL) &&
636 (old_system_region->fs_base == fs_base) &&
637 (old_system_region->system == system)) {
638 new_system_region->default_env_list =
639 old_system_region->default_env_list;
91447636 640 old_system_region->default_env_list = NULL;
55e303ae 641 default_environment_shared_regions = new_system_region;
55e303ae 642 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
643 default_regions_list_unlock();
644 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
645 "old=%p stale 1\n",
646 new_system_region, old_system_region));
647 assert(old_system_region->ref_count > 0);
55e303ae
A
648 return old_system_region;
649 }
650 if (old_system_region) {
651 while(old_system_region->default_env_list != NULL) {
652 if((old_system_region->default_env_list->fs_base == fs_base) &&
653 (old_system_region->default_env_list->system == system)) {
91447636
A
654 shared_region_mapping_t tmp_system_region;
655
656 tmp_system_region =
657 old_system_region->default_env_list;
55e303ae 658 new_system_region->default_env_list =
91447636
A
659 tmp_system_region->default_env_list;
660 tmp_system_region->default_env_list = NULL;
55e303ae
A
661 old_system_region->default_env_list =
662 new_system_region;
91447636 663 old_system_region = tmp_system_region;
55e303ae 664 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
665 default_regions_list_unlock();
666 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
667 ": old=%p stale 2\n",
668 new_system_region,
669 old_system_region));
670 assert(old_system_region->ref_count > 0);
55e303ae
A
671 return old_system_region;
672 }
673 old_system_region = old_system_region->default_env_list;
674 }
675 }
676 /* If we get here, we are at the end of the system list and we */
677 /* did not find a pre-existing entry */
678 if(old_system_region) {
91447636
A
679 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
680 "adding after old=%p\n",
681 new_system_region, old_system_region));
682 assert(old_system_region->ref_count > 0);
55e303ae
A
683 old_system_region->default_env_list = new_system_region;
684 } else {
91447636
A
685 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
686 "new default\n",
687 new_system_region));
55e303ae
A
688 default_environment_shared_regions = new_system_region;
689 }
91447636 690 assert(new_system_region->ref_count > 0);
55e303ae
A
691 default_regions_list_unlock();
692 return NULL;
693}
694
695/*
696 * lookup a system_shared_region for the environment specified. If one is
697 * found, it is returned along with a reference against the structure
698 */
699
700shared_region_mapping_t
701lookup_default_shared_region(
702 unsigned int fs_base,
703 unsigned int system)
704{
705 shared_region_mapping_t system_region;
706 default_regions_list_lock();
707 system_region = default_environment_shared_regions;
708
91447636
A
709 SHARED_REGION_DEBUG(("lookup_default_shared_region"
710 "(base=0x%x, system=0x%x)\n",
711 fs_base, system));
55e303ae 712 while(system_region != NULL) {
91447636
A
713 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
714 ": system_region=%p base=0x%x system=0x%x"
715 " ref_count=%d\n",
716 fs_base, system, system_region,
717 system_region->fs_base,
718 system_region->system,
719 system_region->ref_count));
720 assert(system_region->ref_count > 0);
55e303ae
A
721 if((system_region->fs_base == fs_base) &&
722 (system_region->system == system)) {
723 break;
724 }
725 system_region = system_region->default_env_list;
726 }
727 if(system_region)
728 shared_region_mapping_ref(system_region);
729 default_regions_list_unlock();
91447636
A
730 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
731 system_region));
55e303ae
A
732 return system_region;
733}
734
735/*
736 * remove a system_region default if it appears in the default regions list.
737 * Drop a reference on removal.
738 */
739
740__private_extern__ void
741remove_default_shared_region_lock(
742 shared_region_mapping_t system_region,
91447636
A
743 int need_sfh_lock,
744 int need_drl_lock)
55e303ae
A
745{
746 shared_region_mapping_t old_system_region;
55e303ae 747
91447636
A
748 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
749 "(system_region=%p, %d, %d)\n",
750 system_region, need_sfh_lock, need_drl_lock));
751 if (need_drl_lock) {
752 default_regions_list_lock();
753 }
55e303ae
A
754 old_system_region = default_environment_shared_regions;
755
756 if(old_system_region == NULL) {
91447636
A
757 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
758 "-> default_env=NULL\n",
759 system_region));
760 if (need_drl_lock) {
761 default_regions_list_unlock();
762 }
55e303ae
A
763 return;
764 }
765
91447636
A
766 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
767 "default_env=%p\n",
768 system_region, old_system_region));
769 assert(old_system_region->ref_count > 0);
55e303ae
A
770 if (old_system_region == system_region) {
771 default_environment_shared_regions
772 = old_system_region->default_env_list;
91447636 773 old_system_region->default_env_list = NULL;
55e303ae 774 old_system_region->flags |= SHARED_REGION_STALE;
91447636
A
775 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
776 "old=%p ref_count=%d STALE\n",
777 system_region, old_system_region,
778 old_system_region->ref_count));
55e303ae 779 shared_region_mapping_dealloc_lock(old_system_region,
91447636
A
780 need_sfh_lock,
781 0);
782 if (need_drl_lock) {
783 default_regions_list_unlock();
784 }
55e303ae
A
785 return;
786 }
787
788 while(old_system_region->default_env_list != NULL) {
91447636
A
789 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
790 "old=%p->default_env=%p\n",
791 system_region, old_system_region,
792 old_system_region->default_env_list));
793 assert(old_system_region->default_env_list->ref_count > 0);
55e303ae
A
794 if(old_system_region->default_env_list == system_region) {
795 shared_region_mapping_t dead_region;
796 dead_region = old_system_region->default_env_list;
797 old_system_region->default_env_list =
91447636
A
798 dead_region->default_env_list;
799 dead_region->default_env_list = NULL;
55e303ae 800 dead_region->flags |= SHARED_REGION_STALE;
91447636
A
801 SHARED_REGION_DEBUG(
802 ("remove_default_shared_region_lock(%p): "
803 "dead=%p ref_count=%d stale\n",
804 system_region, dead_region,
805 dead_region->ref_count));
55e303ae 806 shared_region_mapping_dealloc_lock(dead_region,
91447636
A
807 need_sfh_lock,
808 0);
809 if (need_drl_lock) {
810 default_regions_list_unlock();
811 }
55e303ae
A
812 return;
813 }
814 old_system_region = old_system_region->default_env_list;
815 }
91447636
A
816 if (need_drl_lock) {
817 default_regions_list_unlock();
818 }
55e303ae
A
819}
820
821/*
822 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
823 * the only caller. Remove this stub function and the corresponding symbol
824 * export for Merlot.
825 */
826void
827remove_default_shared_region(
828 shared_region_mapping_t system_region)
829{
91447636
A
830 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
831 system_region));
832 if (system_region) {
833 assert(system_region->ref_count > 0);
834 }
835 remove_default_shared_region_lock(system_region, 1, 1);
55e303ae
A
836}
837
838void
91447636 839remove_all_shared_regions(void)
55e303ae
A
840{
841 shared_region_mapping_t system_region;
842 shared_region_mapping_t next_system_region;
843
91447636
A
844 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
845 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
846 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
55e303ae
A
847 default_regions_list_lock();
848 system_region = default_environment_shared_regions;
849
850 if(system_region == NULL) {
851 default_regions_list_unlock();
852 return;
853 }
854
855 while(system_region != NULL) {
856 next_system_region = system_region->default_env_list;
91447636 857 system_region->default_env_list = NULL;
55e303ae 858 system_region->flags |= SHARED_REGION_STALE;
91447636
A
859 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
860 "%p ref_count=%d stale\n",
861 system_region, system_region->ref_count));
862 assert(system_region->ref_count > 0);
863 shared_region_mapping_dealloc_lock(system_region, 1, 0);
55e303ae
A
864 system_region = next_system_region;
865 }
866 default_environment_shared_regions = NULL;
867 default_regions_list_unlock();
91447636
A
868 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
869 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
870 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
55e303ae
A
871}
872
873/* shared_com_boot_time_init initializes the common page shared data and */
874/* text region. This region is semi independent of the split libs */
875/* and so its policies have to be handled differently by the code that */
876/* manipulates the mapping of shared region environments. However, */
877/* the shared region delivery system supports both */
91447636
A
878void shared_com_boot_time_init(void); /* forward */
879void
880shared_com_boot_time_init(void)
55e303ae
A
881{
882 kern_return_t kret;
883 vm_named_entry_t named_entry;
884
91447636
A
885 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
886 if(com_region_handle32) {
887 panic("shared_com_boot_time_init: "
888 "com_region_handle32 already set\n");
889 }
890 if(com_region_handle64) {
55e303ae 891 panic("shared_com_boot_time_init: "
91447636 892 "com_region_handle64 already set\n");
55e303ae
A
893 }
894
91447636
A
895 /* create com page regions, 1 each for 32 and 64-bit code */
896 if((kret = shared_region_object_create(
897 com_region_size,
898 &com_region_handle32))) {
899 panic("shared_com_boot_time_init: "
900 "unable to create 32-bit comm page\n");
901 return;
902 }
903 if((kret = shared_region_object_create(
55e303ae 904 com_region_size,
91447636 905 &com_region_handle64))) {
55e303ae 906 panic("shared_com_boot_time_init: "
91447636 907 "unable to create 64-bit comm page\n");
55e303ae
A
908 return;
909 }
91447636 910
55e303ae 911 /* now set export the underlying region/map */
91447636
A
912 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
913 com_region_map32 = named_entry->backing.map;
914 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
915 com_region_map64 = named_entry->backing.map;
916
55e303ae 917 /* wrap the com region in its own shared file mapping structure */
91447636
A
918 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
919 kret = shared_region_mapping_create(com_region_handle32,
55e303ae 920 com_region_size, NULL, 0, 0,
91447636 921 _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
55e303ae 922 0, 0);
91447636
A
923 if (kret) {
924 panic("shared_region_mapping_create failed for commpage");
925 }
55e303ae
A
926}
927
91447636 928void
1c79356b 929shared_file_boot_time_init(
55e303ae
A
930 unsigned int fs_base,
931 unsigned int system)
1c79356b 932{
91447636
A
933 long text_region_size;
934 long data_region_size;
9bccf70c 935 shared_region_mapping_t new_system_region;
55e303ae 936 shared_region_mapping_t old_default_env;
1c79356b 937
91447636
A
938 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
939 "(base=0x%x,system=0x%x)\n",
940 fs_base, system));
941 text_region_size = 0x10000000;
942 data_region_size = 0x10000000;
1c79356b 943 shared_file_init(&shared_text_region_handle,
91447636
A
944 text_region_size,
945 &shared_data_region_handle,
946 data_region_size,
947 &shared_file_mapping_array);
9bccf70c 948
1c79356b 949 shared_region_mapping_create(shared_text_region_handle,
91447636
A
950 text_region_size,
951 shared_data_region_handle,
952 data_region_size,
953 shared_file_mapping_array,
954 GLOBAL_SHARED_TEXT_SEGMENT,
955 &new_system_region,
956 SHARED_ALTERNATE_LOAD_BASE,
957 SHARED_ALTERNATE_LOAD_BASE);
55e303ae
A
958
959 new_system_region->fs_base = fs_base;
960 new_system_region->system = system;
961 new_system_region->flags = SHARED_REGION_SYSTEM;
962
963 /* grab an extra reference for the caller */
964 /* remember to grab before call to update */
965 shared_region_mapping_ref(new_system_region);
966 old_default_env = update_default_shared_region(new_system_region);
9bccf70c
A
967 /* hold an extra reference because these are the system */
968 /* shared regions. */
55e303ae
A
969 if(old_default_env)
970 shared_region_mapping_dealloc(old_default_env);
971 if(com_mapping_resource == NULL) {
972 shared_com_boot_time_init();
973 }
974 shared_region_mapping_ref(com_mapping_resource);
975 new_system_region->next = com_mapping_resource;
976 vm_set_shared_region(current_task(), new_system_region);
91447636
A
977 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
978 fs_base, system));
1c79356b
A
979}
980
981
982/* called at boot time, allocates two regions, each 256 megs in size */
983/* these regions are later mapped into task spaces, allowing them to */
984/* share the contents of the regions. shared_file_init is part of */
985/* a shared_memory_server which not only allocates the backing maps */
986/* but also coordinates requests for space. */
987
988
9bccf70c 989static kern_return_t
1c79356b 990shared_file_init(
91447636 991 ipc_port_t *text_region_handle,
1c79356b 992 vm_size_t text_region_size,
91447636 993 ipc_port_t *data_region_handle,
1c79356b 994 vm_size_t data_region_size,
91447636 995 vm_offset_t *file_mapping_array)
1c79356b 996{
1c79356b
A
997 shared_file_info_t *sf_head;
998 vm_offset_t table_mapping_address;
999 int data_table_size;
1000 int hash_size;
1c79356b
A
1001 kern_return_t kret;
1002
1003 vm_object_t buf_object;
1004 vm_map_entry_t entry;
1005 vm_size_t alloced;
1006 vm_offset_t b;
1007 vm_page_t p;
1008
91447636 1009 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1c79356b 1010 /* create text and data maps/regions */
91447636
A
1011 kret = shared_region_object_create(
1012 text_region_size,
1013 text_region_handle);
1014 if (kret) {
1c79356b
A
1015 return kret;
1016 }
91447636
A
1017 kret = shared_region_object_create(
1018 data_region_size,
1019 data_region_handle);
1020 if (kret) {
1021 ipc_port_release_send(*text_region_handle);
1c79356b
A
1022 return kret;
1023 }
1024
1025 data_table_size = data_region_size >> 9;
1026 hash_size = data_region_size >> 14;
1027 table_mapping_address = data_region_size - data_table_size;
1028
1029 if(shared_file_mapping_array == 0) {
91447636 1030 vm_map_address_t map_addr;
1c79356b
A
1031 buf_object = vm_object_allocate(data_table_size);
1032
91447636
A
1033 if(vm_map_find_space(kernel_map, &map_addr,
1034 data_table_size, 0, &entry)
1035 != KERN_SUCCESS) {
1c79356b
A
1036 panic("shared_file_init: no space");
1037 }
91447636
A
1038 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1039 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1040 vm_map_unlock(kernel_map);
1041 entry->object.vm_object = buf_object;
1042 entry->offset = 0;
1043
91447636 1044 for (b = *file_mapping_array, alloced = 0;
1c79356b 1045 alloced < (hash_size +
91447636 1046 round_page(sizeof(struct sf_mapping)));
1c79356b
A
1047 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1048 vm_object_lock(buf_object);
1049 p = vm_page_alloc(buf_object, alloced);
1050 if (p == VM_PAGE_NULL) {
1051 panic("shared_file_init: no space");
1052 }
1053 p->busy = FALSE;
1054 vm_object_unlock(buf_object);
55e303ae 1055 pmap_enter(kernel_pmap, b, p->phys_page,
9bccf70c 1056 VM_PROT_READ | VM_PROT_WRITE,
55e303ae
A
1057 ((unsigned int)(p->object->wimg_bits))
1058 & VM_WIMG_MASK,
1059 TRUE);
1c79356b
A
1060 }
1061
1062
1063 /* initialize loaded file array */
91447636 1064 sf_head = (shared_file_info_t *)*file_mapping_array;
1c79356b 1065 sf_head->hash = (queue_head_t *)
91447636 1066 (((int)*file_mapping_array) +
1c79356b
A
1067 sizeof(struct shared_file_info));
1068 sf_head->hash_size = hash_size/sizeof(queue_head_t);
91447636 1069 mutex_init(&(sf_head->lock), 0);
1c79356b
A
1070 sf_head->hash_init = FALSE;
1071
1072
1073 mach_make_memory_entry(kernel_map, &data_table_size,
91447636 1074 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1c79356b
A
1075 NULL);
1076
91447636
A
1077 if (vm_map_wire(kernel_map,
1078 vm_map_trunc_page(*file_mapping_array),
1079 vm_map_round_page(*file_mapping_array +
1080 hash_size +
1081 round_page(sizeof(struct sf_mapping))),
1c79356b
A
1082 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1083 panic("shared_file_init: No memory for data table");
1084 }
1085
1086 lsf_zone = zinit(sizeof(struct load_file_ele),
1087 data_table_size -
55e303ae 1088 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1c79356b
A
1089 0, "load_file_server");
1090
1091 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1092 zone_change(lsf_zone, Z_COLLECT, FALSE);
1093 zone_change(lsf_zone, Z_EXPAND, FALSE);
1094 zone_change(lsf_zone, Z_FOREIGN, TRUE);
55e303ae
A
1095
1096 /* initialize the global default environment lock */
91447636 1097 mutex_init(&default_regions_list_lock_data, 0);
55e303ae 1098
1c79356b 1099 } else {
91447636 1100 *file_mapping_array = shared_file_mapping_array;
1c79356b
A
1101 }
1102
91447636
A
1103 kret = vm_map(((vm_named_entry_t)
1104 (*data_region_handle)->ip_kobject)->backing.map,
1105 &table_mapping_address,
1106 data_table_size, 0,
1107 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1108 sfma_handle, 0, FALSE,
1109 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1c79356b 1110
91447636
A
1111 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1112 return kret;
1c79356b
A
1113}
1114
91447636
A
1115static kern_return_t
1116shared_file_header_init(
1117 shared_file_info_t *shared_file_header)
1118{
1119 vm_size_t hash_table_size;
1120 vm_size_t hash_table_offset;
1121 int i;
1122 /* wire hash entry pool only as needed, since we are the only */
1123 /* users, we take a few liberties with the population of our */
1124 /* zone. */
1125 static int allocable_hash_pages;
1126 static vm_offset_t hash_cram_address;
1127
1128
1129 hash_table_size = shared_file_header->hash_size
1130 * sizeof (struct queue_entry);
1131 hash_table_offset = hash_table_size +
1132 round_page(sizeof (struct sf_mapping));
1133 for (i = 0; i < shared_file_header->hash_size; i++)
1134 queue_init(&shared_file_header->hash[i]);
1135
1136 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1137 / PAGE_SIZE);
1138 hash_cram_address = ((vm_offset_t) shared_file_header)
1139 + hash_table_offset;
1140 shared_file_available_hash_ele = 0;
1141
1142 shared_file_header->hash_init = TRUE;
1143
1144 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1145 int cram_pages, cram_size;
1146
1147 cram_pages = allocable_hash_pages > 3 ?
1148 3 : allocable_hash_pages;
1149 cram_size = cram_pages * PAGE_SIZE;
1150 if (vm_map_wire(kernel_map, hash_cram_address,
1151 hash_cram_address + cram_size,
1152 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1153 printf("shared_file_header_init: "
1154 "No memory for data table\n");
1155 return KERN_NO_SPACE;
1156 }
1157 allocable_hash_pages -= cram_pages;
1158 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1159 shared_file_available_hash_ele
1160 += cram_size/sizeof(struct load_file_ele);
1161 hash_cram_address += cram_size;
1162 }
1163
1164 return KERN_SUCCESS;
1165}
1166
1167
1c79356b
A
1168/* A call made from user space, copyin_shared_file requires the user to */
1169/* provide the address and size of a mapped file, the full path name of */
1170/* that file and a list of offsets to be mapped into shared memory. */
1171/* By requiring that the file be pre-mapped, copyin_shared_file can */
1172/* guarantee that the file is neither deleted nor changed after the user */
1173/* begins the call. */
1174
1175kern_return_t
1176copyin_shared_file(
1177 vm_offset_t mapped_file,
1178 vm_size_t mapped_file_size,
1179 vm_offset_t *base_address,
1180 int map_cnt,
1181 sf_mapping_t *mappings,
0b4e3aa0 1182 memory_object_control_t file_control,
1c79356b
A
1183 shared_region_task_mappings_t sm_info,
1184 int *flags)
1185{
0b4e3aa0 1186 vm_object_t file_object;
1c79356b
A
1187 vm_map_entry_t entry;
1188 shared_file_info_t *shared_file_header;
1189 load_struct_t *file_entry;
1190 loaded_mapping_t *file_mapping;
1191 boolean_t alternate;
1192 int i;
1193 kern_return_t ret;
1194
91447636 1195 SHARED_REGION_DEBUG(("copyin_shared_file()\n"));
1c79356b
A
1196
1197 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1198
1199 mutex_lock(&shared_file_header->lock);
1200
1201 /* If this is the first call to this routine, take the opportunity */
1202 /* to initialize the hash table which will be used to look-up */
1203 /* mappings based on the file object */
1204
1205 if(shared_file_header->hash_init == FALSE) {
91447636
A
1206 ret = shared_file_header_init(shared_file_header);
1207 if (ret != KERN_SUCCESS) {
1208 mutex_unlock(&shared_file_header->lock);
1209 return ret;
1c79356b 1210 }
1c79356b
A
1211 }
1212
1c79356b
A
1213 /* Find the entry in the map associated with the current mapping */
1214 /* of the file object */
0b4e3aa0 1215 file_object = memory_object_control_to_vm_object(file_control);
1c79356b
A
1216 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
1217 vm_object_t mapped_object;
91447636
A
1218 if(entry->is_sub_map ||
1219 entry->object.vm_object == VM_OBJECT_NULL) {
1c79356b
A
1220 mutex_unlock(&shared_file_header->lock);
1221 return KERN_INVALID_ADDRESS;
1222 }
1223 mapped_object = entry->object.vm_object;
1224 while(mapped_object->shadow != NULL) {
1225 mapped_object = mapped_object->shadow;
1226 }
1227 /* check to see that the file object passed is indeed the */
1228 /* same as the mapped object passed */
1229 if(file_object != mapped_object) {
1230 if(sm_info->flags & SHARED_REGION_SYSTEM) {
1231 mutex_unlock(&shared_file_header->lock);
1232 return KERN_PROTECTION_FAILURE;
1233 } else {
1234 file_object = mapped_object;
1235 }
1236 }
1237 } else {
1238 mutex_unlock(&shared_file_header->lock);
1239 return KERN_INVALID_ADDRESS;
1240 }
1241
1242 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
1243
91447636
A
1244 file_entry = lsf_hash_lookup(shared_file_header->hash,
1245 (void *) file_object,
1246 mappings[0].file_offset,
1247 shared_file_header->hash_size,
1248 !alternate, alternate, sm_info);
1249 if (file_entry) {
1c79356b
A
1250 /* File is loaded, check the load manifest for exact match */
1251 /* we simplify by requiring that the elements be the same */
1252 /* size and in the same order rather than checking for */
1253 /* semantic equivalence. */
1254
1255 /* If the file is being loaded in the alternate */
1256 /* area, one load to alternate is allowed per mapped */
1257 /* object the base address is passed back to the */
1258 /* caller and the mappings field is filled in. If the */
1259 /* caller does not pass the precise mappings_cnt */
1260 /* and the Alternate is already loaded, an error */
1261 /* is returned. */
1262 i = 0;
1263 file_mapping = file_entry->mappings;
1264 while(file_mapping != NULL) {
1265 if(i>=map_cnt) {
1266 mutex_unlock(&shared_file_header->lock);
1267 return KERN_INVALID_ARGUMENT;
1268 }
1269 if(((mappings[i].mapping_offset)
1270 & SHARED_DATA_REGION_MASK) !=
1271 file_mapping->mapping_offset ||
1272 mappings[i].size !=
1273 file_mapping->size ||
1274 mappings[i].file_offset !=
1275 file_mapping->file_offset ||
1276 mappings[i].protection !=
1277 file_mapping->protection) {
1278 break;
1279 }
1280 file_mapping = file_mapping->next;
1281 i++;
1282 }
1283 if(i!=map_cnt) {
1284 mutex_unlock(&shared_file_header->lock);
1285 return KERN_INVALID_ARGUMENT;
1286 }
1287 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
1288 + file_entry->base_address;
1289 *flags = SF_PREV_LOADED;
1290 mutex_unlock(&shared_file_header->lock);
1291 return KERN_SUCCESS;
1292 } else {
1293 /* File is not loaded, lets attempt to load it */
1294 ret = lsf_load(mapped_file, mapped_file_size, base_address,
1295 mappings, map_cnt,
1296 (void *)file_object,
1297 *flags, sm_info);
1298 *flags = 0;
1299 if(ret == KERN_NO_SPACE) {
1300 shared_region_mapping_t regions;
55e303ae 1301 shared_region_mapping_t system_region;
1c79356b
A
1302 regions = (shared_region_mapping_t)sm_info->self;
1303 regions->flags |= SHARED_REGION_FULL;
55e303ae
A
1304 system_region = lookup_default_shared_region(
1305 regions->fs_base, regions->system);
1306 if(system_region == regions) {
91447636 1307 shared_region_mapping_t new_system_shared_region;
55e303ae
A
1308 shared_file_boot_time_init(
1309 regions->fs_base, regions->system);
9bccf70c
A
1310 /* current task must stay with its current */
1311 /* regions, drop count on system_shared_region */
1312 /* and put back our original set */
55e303ae 1313 vm_get_shared_region(current_task(),
91447636 1314 &new_system_shared_region);
55e303ae 1315 shared_region_mapping_dealloc_lock(
91447636 1316 new_system_shared_region, 0, 1);
1c79356b 1317 vm_set_shared_region(current_task(), regions);
91447636
A
1318 } else if(system_region != NULL) {
1319 shared_region_mapping_dealloc_lock(
1320 system_region, 0, 1);
1c79356b 1321 }
91447636
A
1322 }
1323 mutex_unlock(&shared_file_header->lock);
1324 return ret;
1325 }
1326}
1327
1328/*
1329 * map_shared_file:
1330 *
1331 * Attempt to map a split library into the shared region. Check if the mappings
1332 * are already in place.
1333 */
1334kern_return_t
1335map_shared_file(
1336 int map_cnt,
1337 struct shared_file_mapping_np *mappings,
1338 memory_object_control_t file_control,
1339 memory_object_size_t file_size,
1340 shared_region_task_mappings_t sm_info,
1341 mach_vm_offset_t base_offset,
1342 mach_vm_offset_t *slide_p)
1343{
1344 vm_object_t file_object;
1345 shared_file_info_t *shared_file_header;
1346 load_struct_t *file_entry;
1347 loaded_mapping_t *file_mapping;
1348 int i;
1349 kern_return_t ret;
1350 mach_vm_offset_t slide;
1351
1352 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1353
1354 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1355
1356 mutex_lock(&shared_file_header->lock);
1357
1358 /* If this is the first call to this routine, take the opportunity */
1359 /* to initialize the hash table which will be used to look-up */
1360 /* mappings based on the file object */
1361
1362 if(shared_file_header->hash_init == FALSE) {
1363 ret = shared_file_header_init(shared_file_header);
1364 if (ret != KERN_SUCCESS) {
1365 mutex_unlock(&shared_file_header->lock);
1366 return KERN_NO_SPACE;
1367 }
1368 }
1369
1370
1371 /* Find the entry in the map associated with the current mapping */
1372 /* of the file object */
1373 file_object = memory_object_control_to_vm_object(file_control);
1374
1375 file_entry = lsf_hash_lookup(shared_file_header->hash,
1376 (void *) file_object,
1377 mappings[0].sfm_file_offset,
1378 shared_file_header->hash_size,
1379 TRUE, TRUE, sm_info);
1380 if (file_entry) {
1381 /* File is loaded, check the load manifest for exact match */
1382 /* we simplify by requiring that the elements be the same */
1383 /* size and in the same order rather than checking for */
1384 /* semantic equivalence. */
1385
1386 i = 0;
1387 file_mapping = file_entry->mappings;
1388 while(file_mapping != NULL) {
1389 if(i>=map_cnt) {
1390 mutex_unlock(&shared_file_header->lock);
1391 return KERN_INVALID_ARGUMENT;
1392 }
1393 if(((mappings[i].sfm_address)
1394 & SHARED_DATA_REGION_MASK) !=
1395 file_mapping->mapping_offset ||
1396 mappings[i].sfm_size != file_mapping->size ||
1397 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1398 mappings[i].sfm_init_prot != file_mapping->protection) {
1399 break;
1400 }
1401 file_mapping = file_mapping->next;
1402 i++;
1403 }
1404 if(i!=map_cnt) {
1405 mutex_unlock(&shared_file_header->lock);
1406 return KERN_INVALID_ARGUMENT;
1407 }
1408
1409 slide = file_entry->base_address - base_offset;
1410 if (slide_p != NULL) {
1411 /*
1412 * File already mapped but at different address,
1413 * and the caller is OK with the sliding.
1414 */
1415 *slide_p = slide;
1416 ret = KERN_SUCCESS;
1417 } else {
1418 /*
1419 * The caller doesn't want any sliding. The file needs
1420 * to be mapped at the requested address or not mapped.
1421 */
1422 if (slide != 0) {
1423 /*
1424 * The file is already mapped but at a different
1425 * address.
1426 * We fail.
1427 * XXX should we attempt to load at
1428 * requested address too ?
1429 */
1430 ret = KERN_FAILURE;
1431 } else {
1432 /*
1433 * The file is already mapped at the correct
1434 * address.
1435 * We're done !
1436 */
1437 ret = KERN_SUCCESS;
1438 }
1439 }
1440 mutex_unlock(&shared_file_header->lock);
1441 return ret;
1442 } else {
1443 /* File is not loaded, lets attempt to load it */
1444 ret = lsf_map(mappings, map_cnt,
1445 (void *)file_control,
1446 file_size,
1447 sm_info,
1448 base_offset,
1449 slide_p);
1450 if(ret == KERN_NO_SPACE) {
1451 shared_region_mapping_t regions;
1452 shared_region_mapping_t system_region;
1453 regions = (shared_region_mapping_t)sm_info->self;
1454 regions->flags |= SHARED_REGION_FULL;
1455 system_region = lookup_default_shared_region(
1456 regions->fs_base, regions->system);
1457 if (system_region == regions) {
1458 shared_region_mapping_t new_system_shared_region;
1459 shared_file_boot_time_init(
1460 regions->fs_base, regions->system);
1461 /* current task must stay with its current */
1462 /* regions, drop count on system_shared_region */
1463 /* and put back our original set */
1464 vm_get_shared_region(current_task(),
1465 &new_system_shared_region);
1466 shared_region_mapping_dealloc_lock(
1467 new_system_shared_region, 0, 1);
1468 vm_set_shared_region(current_task(), regions);
1469 } else if (system_region != NULL) {
55e303ae 1470 shared_region_mapping_dealloc_lock(
91447636 1471 system_region, 0, 1);
55e303ae 1472 }
1c79356b
A
1473 }
1474 mutex_unlock(&shared_file_header->lock);
1475 return ret;
1476 }
1477}
1478
91447636
A
1479/*
1480 * shared_region_cleanup:
1481 *
1482 * Deallocates all the mappings in the shared region, except those explicitly
1483 * specified in the "ranges" set of address ranges.
1484 */
1485kern_return_t
1486shared_region_cleanup(
1487 unsigned int range_count,
1488 struct shared_region_range_np *ranges,
1489 shared_region_task_mappings_t sm_info)
1490{
1491 kern_return_t kr;
1492 ipc_port_t region_handle;
1493 vm_named_entry_t region_named_entry;
1494 vm_map_t text_submap, data_submap, submap, next_submap;
1495 unsigned int i_range;
1496 vm_map_offset_t range_start, range_end;
1497 vm_map_offset_t submap_base, submap_end, submap_offset;
1498 vm_map_size_t delete_size;
1499
1500 struct shared_region_range_np tmp_range;
1501 unsigned int sort_index, sorted_index;
1502 vm_map_offset_t sort_min_address;
1503 unsigned int sort_min_index;
1504
1505 /*
1506 * Since we want to deallocate the holes between the "ranges",
1507 * sort the array by increasing addresses.
1508 */
1509 for (sorted_index = 0;
1510 sorted_index < range_count;
1511 sorted_index++) {
1512
1513 /* first remaining entry is our new starting point */
1514 sort_min_index = sorted_index;
1515 sort_min_address = ranges[sort_min_index].srr_address;
1516
1517 /* find the lowest mapping_offset in the remaining entries */
1518 for (sort_index = sorted_index + 1;
1519 sort_index < range_count;
1520 sort_index++) {
1521 if (ranges[sort_index].srr_address < sort_min_address) {
1522 /* lowest address so far... */
1523 sort_min_index = sort_index;
1524 sort_min_address =
1525 ranges[sort_min_index].srr_address;
1526 }
1527 }
1528
1529 if (sort_min_index != sorted_index) {
1530 /* swap entries */
1531 tmp_range = ranges[sort_min_index];
1532 ranges[sort_min_index] = ranges[sorted_index];
1533 ranges[sorted_index] = tmp_range;
1534 }
1535 }
1536
1537 region_handle = (ipc_port_t) sm_info->text_region;
1538 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1539 text_submap = region_named_entry->backing.map;
1540
1541 region_handle = (ipc_port_t) sm_info->data_region;
1542 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1543 data_submap = region_named_entry->backing.map;
1544
1545 submap = text_submap;
1546 next_submap = submap;
1547 submap_base = sm_info->client_base;
1548 submap_offset = 0;
1549 submap_end = submap_base + sm_info->text_size;
1550 for (i_range = 0;
1551 i_range < range_count;
1552 i_range++) {
1553
1554 /* get the next range of addresses to keep */
1555 range_start = ranges[i_range].srr_address;
1556 range_end = range_start + ranges[i_range].srr_size;
1557 /* align them to page boundaries */
1558 range_start = vm_map_trunc_page(range_start);
1559 range_end = vm_map_round_page(range_end);
1560
1561 /* make sure we don't go beyond the submap's boundaries */
1562 if (range_start < submap_base) {
1563 range_start = submap_base;
1564 } else if (range_start >= submap_end) {
1565 range_start = submap_end;
1566 }
1567 if (range_end < submap_base) {
1568 range_end = submap_base;
1569 } else if (range_end >= submap_end) {
1570 range_end = submap_end;
1571 }
1572
1573 if (range_start > submap_base + submap_offset) {
1574 /*
1575 * Deallocate everything between the last offset in the
1576 * submap and the start of this range.
1577 */
1578 delete_size = range_start -
1579 (submap_base + submap_offset);
1580 (void) vm_deallocate(submap,
1581 submap_offset,
1582 delete_size);
1583 } else {
1584 delete_size = 0;
1585 }
1586
1587 /* skip to the end of the range */
1588 submap_offset += delete_size + (range_end - range_start);
1589
1590 if (submap_base + submap_offset >= submap_end) {
1591 /* get to next submap */
1592
1593 if (submap == data_submap) {
1594 /* no other submap after data: done ! */
1595 break;
1596 }
1597
1598 /* get original range again */
1599 range_start = ranges[i_range].srr_address;
1600 range_end = range_start + ranges[i_range].srr_size;
1601 range_start = vm_map_trunc_page(range_start);
1602 range_end = vm_map_round_page(range_end);
1603
1604 if (range_end > submap_end) {
1605 /*
1606 * This last range overlaps with the next
1607 * submap. We need to process it again
1608 * after switching submaps. Otherwise, we'll
1609 * just continue with the next range.
1610 */
1611 i_range--;
1612 }
1613
1614 if (submap == text_submap) {
1615 /*
1616 * Switch to the data submap.
1617 */
1618 submap = data_submap;
1619 submap_offset = 0;
1620 submap_base = sm_info->client_base +
1621 sm_info->text_size;
1622 submap_end = submap_base + sm_info->data_size;
1623 }
1624 }
1625 }
1626
1627 if (submap_base + submap_offset < submap_end) {
1628 /* delete remainder of this submap, from "offset" to the end */
1629 (void) vm_deallocate(submap,
1630 submap_offset,
1631 submap_end - submap_base - submap_offset);
1632 /* if nothing to keep in data submap, delete it all */
1633 if (submap == text_submap) {
1634 submap = data_submap;
1635 submap_offset = 0;
1636 submap_base = sm_info->client_base + sm_info->text_size;
1637 submap_end = submap_base + sm_info->data_size;
1638 (void) vm_deallocate(data_submap,
1639 0,
1640 submap_end - submap_base);
1641 }
1642 }
1643
1644 kr = KERN_SUCCESS;
1645 return kr;
1646}
1647
1c79356b
A
1648/* A hash lookup function for the list of loaded files in */
1649/* shared_memory_server space. */
1650
9bccf70c 1651static load_struct_t *
1c79356b
A
1652lsf_hash_lookup(
1653 queue_head_t *hash_table,
1654 void *file_object,
55e303ae 1655 vm_offset_t recognizableOffset,
1c79356b 1656 int size,
91447636 1657 boolean_t regular,
1c79356b
A
1658 boolean_t alternate,
1659 shared_region_task_mappings_t sm_info)
1660{
1661 register queue_t bucket;
1662 load_struct_t *entry;
1663 shared_region_mapping_t target_region;
1664 int depth;
1665
91447636
A
1666 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1667 "reg=%d alt=%d sm_info=%p\n",
1668 hash_table, file_object, recognizableOffset, size,
1669 regular, alternate, sm_info));
1670
1c79356b
A
1671 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1672 for (entry = (load_struct_t *)queue_first(bucket);
1673 !queue_end(bucket, &entry->links);
1674 entry = (load_struct_t *)queue_next(&entry->links)) {
55e303ae 1675
55e303ae
A
1676 if ((entry->file_object == (int)file_object) &&
1677 (entry->file_offset == recognizableOffset)) {
1c79356b
A
1678 target_region = (shared_region_mapping_t)sm_info->self;
1679 depth = target_region->depth;
1680 while(target_region) {
1681 if((!(sm_info->self)) ||
1682 ((target_region == entry->regions_instance) &&
1683 (target_region->depth >= entry->depth))) {
91447636
A
1684 if(alternate &&
1685 entry->base_address >= sm_info->alternate_base) {
1686 LSF_DEBUG(("lsf_hash_lookup: "
1687 "alt=%d found entry %p "
1688 "(base=0x%x "
1689 "alt_base=0x%x)\n",
1690 alternate, entry,
1691 entry->base_address,
1692 sm_info->alternate_base));
1693 return entry;
1694 }
1695 if (regular &&
1696 entry->base_address < sm_info->alternate_base) {
1697 LSF_DEBUG(("lsf_hash_lookup: "
1698 "reg=%d found entry %p "
1699 "(base=0x%x "
1700 "alt_base=0x%x)\n",
1701 regular, entry,
1702 entry->base_address,
1703 sm_info->alternate_base));
1704 return entry;
1c79356b
A
1705 }
1706 }
1707 if(target_region->object_chain) {
1708 target_region = (shared_region_mapping_t)
1709 target_region->object_chain->object_chain_region;
1710 depth = target_region->object_chain->depth;
1711 } else {
1712 target_region = NULL;
1713 }
1714 }
1715 }
1716 }
1717
91447636
A
1718 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1719 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1720 hash_table, file_object, recognizableOffset, size,
1721 regular, alternate, sm_info));
1c79356b
A
1722 return (load_struct_t *)0;
1723}
1724
55e303ae
A
1725__private_extern__ load_struct_t *
1726lsf_remove_regions_mappings_lock(
1c79356b 1727 shared_region_mapping_t region,
55e303ae 1728 shared_region_task_mappings_t sm_info,
91447636 1729 int need_sfh_lock)
1c79356b
A
1730{
1731 int i;
1732 register queue_t bucket;
1733 shared_file_info_t *shared_file_header;
1734 load_struct_t *entry;
1735 load_struct_t *next_entry;
1c79356b
A
1736
1737 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1738
91447636
A
1739 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1740 "sfh=%p\n",
1741 region, sm_info, shared_file_header));
1742 if (need_sfh_lock)
55e303ae 1743 mutex_lock(&shared_file_header->lock);
1c79356b 1744 if(shared_file_header->hash_init == FALSE) {
91447636 1745 if (need_sfh_lock)
55e303ae 1746 mutex_unlock(&shared_file_header->lock);
91447636
A
1747 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1748 "(region=%p,sm_info=%p): not inited\n",
1749 region, sm_info));
1c79356b
A
1750 return NULL;
1751 }
1752 for(i = 0; i<shared_file_header->hash_size; i++) {
1753 bucket = &shared_file_header->hash[i];
1754 for (entry = (load_struct_t *)queue_first(bucket);
1755 !queue_end(bucket, &entry->links);) {
1756 next_entry = (load_struct_t *)queue_next(&entry->links);
1757 if(region == entry->regions_instance) {
91447636
A
1758 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1759 "entry %p region %p: "
1760 "unloading\n",
1761 entry, region));
1762 lsf_unload((void *)entry->file_object,
1c79356b 1763 entry->base_address, sm_info);
91447636
A
1764 } else {
1765 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1766 "entry %p region %p target region %p: "
1767 "not unloading\n",
1768 entry, entry->regions_instance, region));
1c79356b 1769 }
91447636 1770
1c79356b
A
1771 entry = next_entry;
1772 }
1773 }
91447636 1774 if (need_sfh_lock)
55e303ae 1775 mutex_unlock(&shared_file_header->lock);
91447636
A
1776 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1777
1778 return NULL; /* XXX */
55e303ae
A
1779}
1780
1781/*
1782 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1783 * only caller. Remove this stub function and the corresponding symbol
1784 * export for Merlot.
1785 */
1786load_struct_t *
1787lsf_remove_regions_mappings(
1788 shared_region_mapping_t region,
1789 shared_region_task_mappings_t sm_info)
1790{
1791 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1c79356b
A
1792}
1793
1794/* Removes a map_list, (list of loaded extents) for a file from */
1795/* the loaded file hash table. */
1796
9bccf70c 1797static load_struct_t *
1c79356b
A
1798lsf_hash_delete(
1799 void *file_object,
1800 vm_offset_t base_offset,
1801 shared_region_task_mappings_t sm_info)
1802{
1803 register queue_t bucket;
1804 shared_file_info_t *shared_file_header;
1805 load_struct_t *entry;
91447636
A
1806
1807 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1808 file_object, base_offset, sm_info));
1c79356b
A
1809
1810 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1811
1812 bucket = &shared_file_header->hash
1813 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1814
1815 for (entry = (load_struct_t *)queue_first(bucket);
1816 !queue_end(bucket, &entry->links);
1817 entry = (load_struct_t *)queue_next(&entry->links)) {
1818 if((!(sm_info->self)) || ((shared_region_mapping_t)
1819 sm_info->self == entry->regions_instance)) {
1820 if ((entry->file_object == (int) file_object) &&
1821 (entry->base_address == base_offset)) {
1822 queue_remove(bucket, entry,
1823 load_struct_ptr_t, links);
91447636 1824 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1c79356b
A
1825 return entry;
1826 }
1827 }
1828 }
1829
91447636 1830 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1c79356b
A
1831 return (load_struct_t *)0;
1832}
1833
1834/* Inserts a new map_list, (list of loaded file extents), into the */
1835/* server loaded file hash table. */
1836
9bccf70c 1837static void
1c79356b
A
1838lsf_hash_insert(
1839 load_struct_t *entry,
1840 shared_region_task_mappings_t sm_info)
1841{
1842 shared_file_info_t *shared_file_header;
1843
91447636
A
1844 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1845 entry, sm_info, entry->file_object, entry->base_address));
1846
1c79356b
A
1847 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1848 queue_enter(&shared_file_header->hash
1849 [load_file_hash(entry->file_object,
1850 shared_file_header->hash_size)],
1851 entry, load_struct_ptr_t, links);
1852}
1853
1854/* Looks up the file type requested. If already loaded and the */
1855/* file extents are an exact match, returns Success. If not */
1856/* loaded attempts to load the file extents at the given offsets */
1857/* if any extent fails to load or if the file was already loaded */
1858/* in a different configuration, lsf_load fails. */
1859
9bccf70c 1860static kern_return_t
1c79356b
A
1861lsf_load(
1862 vm_offset_t mapped_file,
1863 vm_size_t mapped_file_size,
1864 vm_offset_t *base_address,
1865 sf_mapping_t *mappings,
1866 int map_cnt,
1867 void *file_object,
1868 int flags,
1869 shared_region_task_mappings_t sm_info)
1870{
1871
1872 load_struct_t *entry;
1873 vm_map_copy_t copy_object;
1874 loaded_mapping_t *file_mapping;
1875 loaded_mapping_t **tptr;
1876 int i;
1877 ipc_port_t local_map;
1878 vm_offset_t original_alt_load_next;
1879 vm_offset_t alternate_load_next;
1880
91447636
A
1881 LSF_DEBUG(("lsf_load"
1882 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
1883 "\n",
1884 mapped_file_size, *base_address, map_cnt, file_object,
1885 flags, sm_info));
1c79356b 1886 entry = (load_struct_t *)zalloc(lsf_zone);
91447636
A
1887 LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry, map_cnt));
1888 LSF_DEBUG(("lsf_load"
1889 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
1890 "entry=%p\n",
1891 mapped_file_size, *base_address, map_cnt, file_object,
1892 flags, sm_info, entry));
1893 if (entry == NULL) {
1894 printf("lsf_load: unable to allocate memory\n");
1895 return KERN_NO_SPACE;
1896 }
1897
1c79356b
A
1898 shared_file_available_hash_ele--;
1899 entry->file_object = (int)file_object;
1900 entry->mapping_cnt = map_cnt;
1901 entry->mappings = NULL;
1902 entry->links.prev = (queue_entry_t) 0;
1903 entry->links.next = (queue_entry_t) 0;
1904 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
1905 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
55e303ae 1906 entry->file_offset = mappings[0].file_offset;
1c79356b
A
1907
1908 lsf_hash_insert(entry, sm_info);
1909 tptr = &(entry->mappings);
1910
1911
1912 alternate_load_next = sm_info->alternate_next;
1913 original_alt_load_next = alternate_load_next;
1914 if (flags & ALTERNATE_LOAD_SITE) {
91447636 1915 vm_offset_t max_loadfile_offset;
1c79356b
A
1916
1917 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
1918 sm_info->alternate_next;
1919 max_loadfile_offset = 0;
1920 for(i = 0; i<map_cnt; i++) {
1921 if(((mappings[i].mapping_offset
1922 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
1923 max_loadfile_offset) {
1924 max_loadfile_offset =
1925 (mappings[i].mapping_offset
1926 & SHARED_TEXT_REGION_MASK)
1927 + mappings[i].size;
1928 }
1929 }
91447636 1930 if((alternate_load_next + round_page(max_loadfile_offset)) >=
1c79356b 1931 (sm_info->data_size - (sm_info->data_size>>9))) {
55e303ae
A
1932 entry->base_address =
1933 (*base_address) & SHARED_TEXT_REGION_MASK;
1934 lsf_unload(file_object, entry->base_address, sm_info);
1c79356b
A
1935
1936 return KERN_NO_SPACE;
1937 }
91447636 1938 alternate_load_next += round_page(max_loadfile_offset);
1c79356b
A
1939
1940 } else {
1941 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
1942 sm_info->alternate_base) {
1943 entry->base_address =
1944 (*base_address) & SHARED_TEXT_REGION_MASK;
1945 lsf_unload(file_object, entry->base_address, sm_info);
1946 return KERN_INVALID_ARGUMENT;
1947 }
1948 }
1949
1950 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
1951
55e303ae
A
1952 // Sanity check the mappings -- make sure we don't stray across the
1953 // alternate boundary. If any bit of a library that we're not trying
1954 // to load in the alternate load space strays across that boundary,
1955 // return KERN_INVALID_ARGUMENT immediately so that the caller can
1956 // try to load it in the alternate shared area. We do this to avoid
1957 // a nasty case: if a library tries to load so that it crosses the
1958 // boundary, it'll occupy a bit of the alternate load area without
1959 // the kernel being aware. When loads into the alternate load area
1960 // at the first free address are tried, the load will fail.
1961 // Thus, a single library straddling the boundary causes all sliding
1962 // libraries to fail to load. This check will avoid such a case.
1963
1964 if (!(flags & ALTERNATE_LOAD_SITE)) {
1965 for (i = 0; i<map_cnt;i++) {
1966 vm_offset_t region_mask;
1967 vm_address_t region_start;
1968 vm_address_t region_end;
1969
1970 if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
91447636 1971 // mapping offsets are relative to start of shared segments.
55e303ae
A
1972 region_mask = SHARED_TEXT_REGION_MASK;
1973 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
1974 region_end = (mappings[i].size + region_start);
1975 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
1976 // No library is permitted to load so any bit of it is in the
1977 // shared alternate space. If they want it loaded, they can put
1978 // it in the alternate space explicitly.
91447636 1979 printf("Library trying to load across alternate shared region boundary -- denied!\n");
55e303ae
A
1980 lsf_unload(file_object, entry->base_address, sm_info);
1981 return KERN_INVALID_ARGUMENT;
1982 }
1983 } else {
1984 // rw section?
1985 region_mask = SHARED_DATA_REGION_MASK;
1986 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
1987 region_end = (mappings[i].size + region_start);
1988 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
91447636
A
1989 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1990 lsf_unload(file_object, entry->base_address, sm_info);
1991 return KERN_INVALID_ARGUMENT;
55e303ae
A
1992 }
1993 } // write?
1994 } // for
1995 } // if not alternate load site.
1996
1c79356b
A
1997 /* copyin mapped file data */
1998 for(i = 0; i<map_cnt; i++) {
1999 vm_offset_t target_address;
2000 vm_offset_t region_mask;
2001
2002 if(mappings[i].protection & VM_PROT_COW) {
2003 local_map = (ipc_port_t)sm_info->data_region;
2004 region_mask = SHARED_DATA_REGION_MASK;
2005 if((mappings[i].mapping_offset
2006 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
2007 lsf_unload(file_object,
2008 entry->base_address, sm_info);
2009 return KERN_INVALID_ARGUMENT;
2010 }
2011 } else {
2012 region_mask = SHARED_TEXT_REGION_MASK;
2013 local_map = (ipc_port_t)sm_info->text_region;
2014 if(mappings[i].mapping_offset
2015 & GLOBAL_SHARED_SEGMENT_MASK) {
2016 lsf_unload(file_object,
2017 entry->base_address, sm_info);
2018 return KERN_INVALID_ARGUMENT;
2019 }
2020 }
2021 if(!(mappings[i].protection & VM_PROT_ZF)
2022 && ((mapped_file + mappings[i].file_offset +
2023 mappings[i].size) >
2024 (mapped_file + mapped_file_size))) {
2025 lsf_unload(file_object, entry->base_address, sm_info);
2026 return KERN_INVALID_ARGUMENT;
2027 }
2028 target_address = ((mappings[i].mapping_offset) & region_mask)
2029 + entry->base_address;
2030 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
2031 ->backing.map, &target_address,
91447636 2032 mappings[i].size, VM_FLAGS_FIXED)) {
1c79356b
A
2033 lsf_unload(file_object, entry->base_address, sm_info);
2034 return KERN_FAILURE;
2035 }
2036 target_address = ((mappings[i].mapping_offset) & region_mask)
2037 + entry->base_address;
2038 if(!(mappings[i].protection & VM_PROT_ZF)) {
2039 if(vm_map_copyin(current_map(),
91447636
A
2040 (vm_map_address_t)(mapped_file + mappings[i].file_offset),
2041 vm_map_round_page(mappings[i].size), FALSE, &copy_object)) {
1c79356b
A
2042 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
2043 ->backing.map, target_address, mappings[i].size);
2044 lsf_unload(file_object, entry->base_address, sm_info);
2045 return KERN_FAILURE;
2046 }
2047 if(vm_map_copy_overwrite(((vm_named_entry_t)
91447636
A
2048 local_map->ip_kobject)->backing.map,
2049 (vm_map_address_t)target_address,
1c79356b
A
2050 copy_object, FALSE)) {
2051 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
2052 ->backing.map, target_address, mappings[i].size);
2053 lsf_unload(file_object, entry->base_address, sm_info);
2054 return KERN_FAILURE;
2055 }
2056 }
91447636
A
2057
2058 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2059 if (file_mapping == NULL) {
2060 lsf_unload(file_object, entry->base_address, sm_info);
2061 printf("lsf_load: unable to allocate memory\n");
2062 return KERN_NO_SPACE;
2063 }
2064 shared_file_available_hash_ele--;
2065 file_mapping->mapping_offset = (mappings[i].mapping_offset)
2066 & region_mask;
2067 file_mapping->size = mappings[i].size;
2068 file_mapping->file_offset = mappings[i].file_offset;
2069 file_mapping->protection = mappings[i].protection;
2070 file_mapping->next = NULL;
2071 LSF_DEBUG(("lsf_load: file_mapping %p "
2072 "for offset=0x%x size=0x%x\n",
2073 file_mapping, file_mapping->mapping_offset,
2074 file_mapping->size));
2075
1c79356b
A
2076 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
2077 ->backing.map, target_address,
91447636 2078 round_page(target_address + mappings[i].size),
1c79356b
A
2079 (mappings[i].protection &
2080 (VM_PROT_READ | VM_PROT_EXECUTE)),
2081 TRUE);
2082 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
2083 ->backing.map, target_address,
91447636 2084 round_page(target_address + mappings[i].size),
1c79356b
A
2085 (mappings[i].protection &
2086 (VM_PROT_READ | VM_PROT_EXECUTE)),
2087 FALSE);
91447636
A
2088
2089 *tptr = file_mapping;
2090 tptr = &(file_mapping->next);
2091 }
2092 shared_region_mapping_set_alt_next(
2093 (shared_region_mapping_t) sm_info->self,
2094 alternate_load_next);
2095 LSF_DEBUG(("lsf_load: done\n"));
2096 return KERN_SUCCESS;
2097}
2098
2099
2100/*
2101 * lsf_slide:
2102 *
2103 * Look in the shared region, starting from the end, for a place to fit all the
2104 * mappings while respecting their relative offsets.
2105 */
2106static kern_return_t
2107lsf_slide(
2108 unsigned int map_cnt,
2109 struct shared_file_mapping_np *mappings_in,
2110 shared_region_task_mappings_t sm_info,
2111 mach_vm_offset_t *base_offset_p)
2112{
2113 mach_vm_offset_t max_mapping_offset;
2114 int i;
2115 vm_map_entry_t map_entry, prev_entry, next_entry;
2116 mach_vm_offset_t prev_hole_start, prev_hole_end;
2117 mach_vm_offset_t mapping_offset, mapping_end_offset;
2118 mach_vm_offset_t base_offset;
2119 mach_vm_size_t mapping_size;
2120 mach_vm_offset_t wiggle_room, wiggle;
2121 vm_map_t text_map, data_map, map;
2122 vm_named_entry_t region_entry;
2123 ipc_port_t region_handle;
2124 kern_return_t kr;
2125
2126 struct shared_file_mapping_np *mappings, tmp_mapping;
2127 unsigned int sort_index, sorted_index;
2128 vm_map_offset_t sort_min_address;
2129 unsigned int sort_min_index;
2130
2131 /*
2132 * Sort the mappings array, so that we can try and fit them in
2133 * in the right order as we progress along the VM maps.
2134 *
2135 * We can't modify the original array (the original order is
2136 * important when doing lookups of the mappings), so copy it first.
2137 */
2138
2139 kr = kmem_alloc(kernel_map,
2140 (vm_offset_t *) &mappings,
2141 (vm_size_t) (map_cnt * sizeof (mappings[0])));
2142 if (kr != KERN_SUCCESS) {
2143 return KERN_NO_SPACE;
2144 }
2145
2146 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
2147
2148 max_mapping_offset = 0;
2149 for (sorted_index = 0;
2150 sorted_index < map_cnt;
2151 sorted_index++) {
2152
2153 /* first remaining entry is our new starting point */
2154 sort_min_index = sorted_index;
2155 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
2156 SHARED_TEXT_REGION_MASK) +
2157 mappings[sort_min_index].sfm_size);
2158 sort_min_address = mapping_end_offset;
2159 /* compute the highest mapping_offset as well... */
2160 if (mapping_end_offset > max_mapping_offset) {
2161 max_mapping_offset = mapping_end_offset;
2162 }
2163 /* find the lowest mapping_offset in the remaining entries */
2164 for (sort_index = sorted_index + 1;
2165 sort_index < map_cnt;
2166 sort_index++) {
2167
2168 mapping_end_offset =
2169 ((mappings[sort_index].sfm_address &
2170 SHARED_TEXT_REGION_MASK) +
2171 mappings[sort_index].sfm_size);
2172
2173 if (mapping_end_offset < sort_min_address) {
2174 /* lowest mapping_offset so far... */
2175 sort_min_index = sort_index;
2176 sort_min_address = mapping_end_offset;
2177 }
2178 }
2179 if (sort_min_index != sorted_index) {
2180 /* swap entries */
2181 tmp_mapping = mappings[sort_min_index];
2182 mappings[sort_min_index] = mappings[sorted_index];
2183 mappings[sorted_index] = tmp_mapping;
2184 }
2185
2186 }
2187
2188 max_mapping_offset = vm_map_round_page(max_mapping_offset);
2189
2190 /* start from the end of the shared area */
2191 base_offset = sm_info->text_size;
2192
2193 /* can all the mappings fit ? */
2194 if (max_mapping_offset > base_offset) {
2195 kmem_free(kernel_map,
2196 (vm_offset_t) mappings,
2197 map_cnt * sizeof (mappings[0]));
2198 return KERN_FAILURE;
2199 }
2200
2201 /*
2202 * Align the last mapping to the end of the submaps
2203 * and start from there.
2204 */
2205 base_offset -= max_mapping_offset;
2206
2207 region_handle = (ipc_port_t) sm_info->text_region;
2208 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2209 text_map = region_entry->backing.map;
2210
2211 region_handle = (ipc_port_t) sm_info->data_region;
2212 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2213 data_map = region_entry->backing.map;
2214
2215 vm_map_lock_read(text_map);
2216 vm_map_lock_read(data_map);
2217
2218start_over:
2219 /*
2220 * At first, we can wiggle all the way from our starting point
2221 * (base_offset) towards the start of the map (0), if needed.
2222 */
2223 wiggle_room = base_offset;
2224
2225 for (i = (signed) map_cnt - 1; i >= 0; i--) {
2226 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2227 /* copy-on-write mappings are in the data submap */
2228 map = data_map;
2229 } else {
2230 /* other mappings are in the text submap */
2231 map = text_map;
2232 }
2233 /* get the offset within the appropriate submap */
2234 mapping_offset = (mappings[i].sfm_address &
2235 SHARED_TEXT_REGION_MASK);
2236 mapping_size = mappings[i].sfm_size;
2237 mapping_end_offset = mapping_offset + mapping_size;
2238 mapping_offset = vm_map_trunc_page(mapping_offset);
2239 mapping_end_offset = vm_map_round_page(mapping_end_offset);
2240 mapping_size = mapping_end_offset - mapping_offset;
2241
2242 for (;;) {
2243 if (vm_map_lookup_entry(map,
2244 base_offset + mapping_offset,
2245 &map_entry)) {
2246 /*
2247 * The start address for that mapping
2248 * is already mapped: no fit.
2249 * Locate the hole immediately before this map
2250 * entry.
2251 */
2252 prev_hole_end = map_entry->vme_start;
2253 prev_entry = map_entry->vme_prev;
2254 if (prev_entry == vm_map_to_entry(map)) {
2255 /* no previous entry */
2256 prev_hole_start = map->min_offset;
2257 } else {
2258 /* previous entry ends here */
2259 prev_hole_start = prev_entry->vme_end;
2260 }
2261 } else {
2262 /*
2263 * The start address for that mapping is not
2264 * mapped.
2265 * Locate the start and end of the hole
2266 * at that location.
2267 */
2268 /* map_entry is the previous entry */
2269 if (map_entry == vm_map_to_entry(map)) {
2270 /* no previous entry */
2271 prev_hole_start = map->min_offset;
2272 } else {
2273 /* previous entry ends there */
2274 prev_hole_start = map_entry->vme_end;
2275 }
2276 next_entry = map_entry->vme_next;
2277 if (next_entry == vm_map_to_entry(map)) {
2278 /* no next entry */
2279 prev_hole_end = map->max_offset;
2280 } else {
2281 prev_hole_end = next_entry->vme_start;
2282 }
2283 }
2284
2285 if (prev_hole_end <= base_offset + mapping_offset) {
2286 /* hole is to our left: try and wiggle to fit */
2287 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
2288 if (wiggle > base_offset) {
2289 /* we're getting out of the map */
2290 kr = KERN_FAILURE;
2291 goto done;
2292 }
2293 base_offset -= wiggle;
2294 if (wiggle > wiggle_room) {
2295 /* can't wiggle that much: start over */
2296 goto start_over;
2297 }
2298 /* account for the wiggling done */
2299 wiggle_room -= wiggle;
2300 }
2301
2302 if (prev_hole_end >
2303 base_offset + mapping_offset + mapping_size) {
2304 /*
2305 * The hole extends further to the right
2306 * than what we need. Ignore the extra space.
2307 */
2308 prev_hole_end = (base_offset + mapping_offset +
2309 mapping_size);
2310 }
2311
2312 if (prev_hole_end <
2313 base_offset + mapping_offset + mapping_size) {
2314 /*
2315 * The hole is not big enough to establish
2316 * the mapping right there: wiggle towards
2317 * the beginning of the hole so that the end
2318 * of our mapping fits in the hole...
2319 */
2320 wiggle = base_offset + mapping_offset
2321 + mapping_size - prev_hole_end;
2322 if (wiggle > base_offset) {
2323 /* we're getting out of the map */
2324 kr = KERN_FAILURE;
2325 goto done;
2326 }
2327 base_offset -= wiggle;
2328 if (wiggle > wiggle_room) {
2329 /* can't wiggle that much: start over */
2330 goto start_over;
2331 }
2332 /* account for the wiggling done */
2333 wiggle_room -= wiggle;
2334
2335 /* keep searching from this new base */
2336 continue;
2337 }
2338
2339 if (prev_hole_start > base_offset + mapping_offset) {
2340 /* no hole found: keep looking */
2341 continue;
2342 }
2343
2344 /* compute wiggling room at this hole */
2345 wiggle = base_offset + mapping_offset - prev_hole_start;
2346 if (wiggle < wiggle_room) {
2347 /* less wiggle room than before... */
2348 wiggle_room = wiggle;
2349 }
2350
2351 /* found a hole that fits: skip to next mapping */
2352 break;
2353 } /* while we look for a hole */
2354 } /* for each mapping */
2355
2356 *base_offset_p = base_offset;
2357 kr = KERN_SUCCESS;
2358
2359done:
2360 vm_map_unlock_read(text_map);
2361 vm_map_unlock_read(data_map);
2362
2363 kmem_free(kernel_map,
2364 (vm_offset_t) mappings,
2365 map_cnt * sizeof (mappings[0]));
2366
2367 return kr;
2368}
2369
2370/*
2371 * lsf_map:
2372 *
2373 * Attempt to establish the mappings for a split library into the shared region.
2374 */
2375static kern_return_t
2376lsf_map(
2377 struct shared_file_mapping_np *mappings,
2378 int map_cnt,
2379 void *file_control,
2380 memory_object_offset_t file_size,
2381 shared_region_task_mappings_t sm_info,
2382 mach_vm_offset_t base_offset,
2383 mach_vm_offset_t *slide_p)
2384{
2385 load_struct_t *entry;
2386 loaded_mapping_t *file_mapping;
2387 loaded_mapping_t **tptr;
2388 ipc_port_t region_handle;
2389 vm_named_entry_t region_entry;
2390 mach_port_t map_port;
2391 vm_object_t file_object;
2392 kern_return_t kr;
2393 int i;
2394 mach_vm_offset_t original_base_offset;
2395
2396 /* get the VM object from the file's memory object handle */
2397 file_object = memory_object_control_to_vm_object(file_control);
2398
2399 original_base_offset = base_offset;
2400
2401 LSF_DEBUG(("lsf_map"
2402 "(cnt=%d,file=%p,sm_info=%p)"
2403 "\n",
2404 map_cnt, file_object,
2405 sm_info));
2406
2407restart_after_slide:
2408 /* get a new "load_struct_t" to described the mappings for that file */
2409 entry = (load_struct_t *)zalloc(lsf_zone);
2410 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
2411 LSF_DEBUG(("lsf_map"
2412 "(cnt=%d,file=%p,sm_info=%p) "
2413 "entry=%p\n",
2414 map_cnt, file_object,
2415 sm_info, entry));
2416 if (entry == NULL) {
2417 printf("lsf_map: unable to allocate memory\n");
2418 return KERN_NO_SPACE;
2419 }
2420 shared_file_available_hash_ele--;
2421 entry->file_object = (int)file_object;
2422 entry->mapping_cnt = map_cnt;
2423 entry->mappings = NULL;
2424 entry->links.prev = (queue_entry_t) 0;
2425 entry->links.next = (queue_entry_t) 0;
2426 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2427 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2428 entry->file_offset = mappings[0].sfm_file_offset;
2429
2430 /* insert the new file entry in the hash table, for later lookups */
2431 lsf_hash_insert(entry, sm_info);
2432
2433 /* where we should add the next mapping description for that file */
2434 tptr = &(entry->mappings);
2435
2436 entry->base_address = base_offset;
2437
2438
2439 /* establish each requested mapping */
2440 for (i = 0; i < map_cnt; i++) {
2441 mach_vm_offset_t target_address;
2442 mach_vm_offset_t region_mask;
2443
2444 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2445 region_handle = (ipc_port_t)sm_info->data_region;
2446 region_mask = SHARED_DATA_REGION_MASK;
2447 if ((((mappings[i].sfm_address + base_offset)
2448 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2449 (((mappings[i].sfm_address + base_offset +
2450 mappings[i].sfm_size - 1)
2451 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
2452 lsf_unload(file_object,
2453 entry->base_address, sm_info);
2454 return KERN_INVALID_ARGUMENT;
2455 }
2456 } else {
2457 region_mask = SHARED_TEXT_REGION_MASK;
2458 region_handle = (ipc_port_t)sm_info->text_region;
2459 if (((mappings[i].sfm_address + base_offset)
2460 & GLOBAL_SHARED_SEGMENT_MASK) ||
2461 ((mappings[i].sfm_address + base_offset +
2462 mappings[i].sfm_size - 1)
2463 & GLOBAL_SHARED_SEGMENT_MASK)) {
2464 lsf_unload(file_object,
2465 entry->base_address, sm_info);
2466 return KERN_INVALID_ARGUMENT;
2467 }
2468 }
2469 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2470 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2471 (file_size))) {
2472 lsf_unload(file_object, entry->base_address, sm_info);
2473 return KERN_INVALID_ARGUMENT;
2474 }
2475 target_address = entry->base_address +
2476 ((mappings[i].sfm_address) & region_mask);
2477 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2478 map_port = MACH_PORT_NULL;
2479 } else {
2480 map_port = (ipc_port_t) file_object->pager;
2481 }
2482 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2483
2484 if (mach_vm_map(region_entry->backing.map,
2485 &target_address,
2486 vm_map_round_page(mappings[i].sfm_size),
2487 0,
2488 VM_FLAGS_FIXED,
2489 map_port,
2490 mappings[i].sfm_file_offset,
2491 TRUE,
2492 (mappings[i].sfm_init_prot &
2493 (VM_PROT_READ|VM_PROT_EXECUTE)),
2494 (mappings[i].sfm_max_prot &
2495 (VM_PROT_READ|VM_PROT_EXECUTE)),
2496 VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
2497 lsf_unload(file_object, entry->base_address, sm_info);
2498
2499 if (slide_p != NULL) {
2500 /*
2501 * Requested mapping failed but the caller
2502 * is OK with sliding the library in the
2503 * shared region, so let's try and slide it...
2504 */
2505
2506 /* lookup an appropriate spot */
2507 kr = lsf_slide(map_cnt, mappings,
2508 sm_info, &base_offset);
2509 if (kr == KERN_SUCCESS) {
2510 /* try and map it there ... */
2511 entry->base_address = base_offset;
2512 goto restart_after_slide;
2513 }
2514 /* couldn't slide ... */
2515 }
2516
2517 return KERN_FAILURE;
2518 }
2519
2520 /* record this mapping */
1c79356b 2521 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
91447636
A
2522 if (file_mapping == NULL) {
2523 lsf_unload(file_object, entry->base_address, sm_info);
2524 printf("lsf_map: unable to allocate memory\n");
2525 return KERN_NO_SPACE;
2526 }
1c79356b 2527 shared_file_available_hash_ele--;
91447636 2528 file_mapping->mapping_offset = (mappings[i].sfm_address)
1c79356b 2529 & region_mask;
91447636
A
2530 file_mapping->size = mappings[i].sfm_size;
2531 file_mapping->file_offset = mappings[i].sfm_file_offset;
2532 file_mapping->protection = mappings[i].sfm_init_prot;
1c79356b 2533 file_mapping->next = NULL;
91447636
A
2534 LSF_DEBUG(("lsf_map: file_mapping %p "
2535 "for offset=0x%x size=0x%x\n",
2536 file_mapping, file_mapping->mapping_offset,
2537 file_mapping->size));
2538
2539 /* and link it to the file entry */
1c79356b 2540 *tptr = file_mapping;
91447636
A
2541
2542 /* where to put the next mapping's description */
1c79356b
A
2543 tptr = &(file_mapping->next);
2544 }
91447636
A
2545
2546 if (slide_p != NULL) {
2547 *slide_p = base_offset - original_base_offset;
2548 }
2549
2550 if (sm_info->flags & SHARED_REGION_STANDALONE) {
2551 /*
2552 * We have a standalone and private shared region, so we
2553 * don't really need to keep the information about each file
2554 * and each mapping. Just deallocate it all.
2555 * XXX we still have the hash table, though...
2556 */
2557 lsf_deallocate(file_object, entry->base_address, sm_info,
2558 FALSE);
2559 }
2560
2561 LSF_DEBUG(("lsf_map: done\n"));
2562 return KERN_SUCCESS;
1c79356b
A
2563}
2564
2565
2566/* finds the file_object extent list in the shared memory hash table */
2567/* If one is found the associated extents in shared memory are deallocated */
2568/* and the extent list is freed */
2569
9bccf70c 2570static void
1c79356b
A
2571lsf_unload(
2572 void *file_object,
2573 vm_offset_t base_offset,
2574 shared_region_task_mappings_t sm_info)
91447636
A
2575{
2576 lsf_deallocate(file_object, base_offset, sm_info, TRUE);
2577}
2578
2579/*
2580 * lsf_deallocate:
2581 *
2582 * Deallocates all the "shared region" internal data structures describing
2583 * the file and its mappings.
2584 * Also deallocate the actual file mappings if requested ("unload" arg).
2585 */
2586static void
2587lsf_deallocate(
2588 void *file_object,
2589 vm_offset_t base_offset,
2590 shared_region_task_mappings_t sm_info,
2591 boolean_t unload)
1c79356b
A
2592{
2593 load_struct_t *entry;
1c79356b
A
2594 loaded_mapping_t *map_ele;
2595 loaded_mapping_t *back_ptr;
2596
91447636
A
2597 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2598 file_object, base_offset, sm_info, unload));
1c79356b
A
2599 entry = lsf_hash_delete(file_object, base_offset, sm_info);
2600 if(entry) {
2601 map_ele = entry->mappings;
2602 while(map_ele != NULL) {
91447636
A
2603 if (unload) {
2604 ipc_port_t region_handle;
2605 vm_named_entry_t region_entry;
2606
2607 if(map_ele->protection & VM_PROT_COW) {
2608 region_handle = (ipc_port_t)
2609 sm_info->data_region;
2610 } else {
2611 region_handle = (ipc_port_t)
2612 sm_info->text_region;
2613 }
2614 region_entry = (vm_named_entry_t)
2615 region_handle->ip_kobject;
2616
2617 vm_deallocate(region_entry->backing.map,
2618 (entry->base_address +
2619 map_ele->mapping_offset),
2620 map_ele->size);
1c79356b 2621 }
1c79356b
A
2622 back_ptr = map_ele;
2623 map_ele = map_ele->next;
91447636
A
2624 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2625 "offset 0x%x size 0x%x\n",
2626 back_ptr, back_ptr->mapping_offset,
2627 back_ptr->size));
2628 zfree(lsf_zone, back_ptr);
1c79356b
A
2629 shared_file_available_hash_ele++;
2630 }
91447636
A
2631 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2632 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2633 zfree(lsf_zone, entry);
1c79356b
A
2634 shared_file_available_hash_ele++;
2635 }
91447636 2636 LSF_DEBUG(("lsf_unload: done\n"));
1c79356b 2637}
9bccf70c
A
2638
2639/* integer is from 1 to 100 and represents percent full */
2640unsigned int
91447636 2641lsf_mapping_pool_gauge(void)
9bccf70c
A
2642{
2643 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2644}