]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
e13fd2742f695fc6136e97f2c447bba7ec337879
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 *
32 * File: vm/vm_shared_memory_server.c
33 * Author: Chris Youngworth
34 *
35 * Support routines for an in-kernel shared memory allocator
36 */
37
38 #include <debug.h>
39
40 #include <mach/mach_types.h>
41 #include <mach/kern_return.h>
42 #include <mach/vm_inherit.h>
43 #include <mach/vm_map.h>
44 #include <machine/cpu_capabilities.h>
45
46 #include <kern/kern_types.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/thread.h>
49 #include <kern/zalloc.h>
50 #include <kern/kalloc.h>
51
52 #include <ipc/ipc_types.h>
53 #include <ipc/ipc_port.h>
54
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58
59 #include <mach/mach_vm.h>
60 #include <mach/shared_memory_server.h>
61 #include <vm/vm_shared_memory_server.h>
62
63 #if DEBUG
64 int lsf_debug = 0;
65 int lsf_alloc_debug = 0;
66 #define LSF_DEBUG(args) \
67 MACRO_BEGIN \
68 if (lsf_debug) { \
69 kprintf args; \
70 } \
71 MACRO_END
72 #define LSF_ALLOC_DEBUG(args) \
73 MACRO_BEGIN \
74 if (lsf_alloc_debug) { \
75 kprintf args; \
76 } \
77 MACRO_END
78 #else /* DEBUG */
79 #define LSF_DEBUG(args)
80 #define LSF_ALLOC_DEBUG(args)
81 #endif /* DEBUG */
82
83 /* forward declarations */
84 static kern_return_t
85 shared_region_object_create(
86 vm_size_t size,
87 ipc_port_t *object_handle);
88
89 static kern_return_t
90 shared_region_mapping_dealloc_lock(
91 shared_region_mapping_t shared_region,
92 int need_sfh_lock,
93 int need_drl_lock);
94
95
96 static kern_return_t
97 shared_file_init(
98 ipc_port_t *text_region_handle,
99 vm_size_t text_region_size,
100 ipc_port_t *data_region_handle,
101 vm_size_t data_region_size,
102 vm_offset_t *file_mapping_array);
103
104 static kern_return_t
105 shared_file_header_init(
106 shared_file_info_t *shared_file_header);
107
108 static load_struct_t *
109 lsf_hash_lookup(
110 queue_head_t *hash_table,
111 void *file_object,
112 vm_offset_t recognizableOffset,
113 int size,
114 boolean_t regular,
115 boolean_t alternate,
116 shared_region_task_mappings_t sm_info);
117
118 static load_struct_t *
119 lsf_hash_delete(
120 void *file_object,
121 vm_offset_t base_offset,
122 shared_region_task_mappings_t sm_info);
123
124 static void
125 lsf_hash_insert(
126 load_struct_t *entry,
127 shared_region_task_mappings_t sm_info);
128
129 static kern_return_t
130 lsf_slide(
131 unsigned int map_cnt,
132 struct shared_file_mapping_np *mappings,
133 shared_region_task_mappings_t sm_info,
134 mach_vm_offset_t *base_offset_p);
135
136 static kern_return_t
137 lsf_map(
138 struct shared_file_mapping_np *mappings,
139 int map_cnt,
140 void *file_control,
141 memory_object_size_t file_size,
142 shared_region_task_mappings_t sm_info,
143 mach_vm_offset_t base_offset,
144 mach_vm_offset_t *slide_p);
145
146 static void
147 lsf_unload(
148 void *file_object,
149 vm_offset_t base_offset,
150 shared_region_task_mappings_t sm_info);
151
152 static void
153 lsf_deallocate(
154 void *file_object,
155 vm_offset_t base_offset,
156 shared_region_task_mappings_t sm_info,
157 boolean_t unload);
158
159
160 #define load_file_hash(file_object, size) \
161 ((((natural_t)file_object) & 0xffffff) % size)
162
163 /* Implementation */
164 vm_offset_t shared_file_text_region;
165 vm_offset_t shared_file_data_region;
166
167 ipc_port_t shared_text_region_handle;
168 ipc_port_t shared_data_region_handle;
169 vm_offset_t shared_file_mapping_array = 0;
170
171 shared_region_mapping_t default_environment_shared_regions = NULL;
172 static decl_mutex_data(,default_regions_list_lock_data)
173
174 #define default_regions_list_lock() \
175 mutex_lock(&default_regions_list_lock_data)
176 #define default_regions_list_lock_try() \
177 mutex_try(&default_regions_list_lock_data)
178 #define default_regions_list_unlock() \
179 mutex_unlock(&default_regions_list_lock_data)
180
181
182 ipc_port_t sfma_handle = NULL;
183 zone_t lsf_zone;
184
185 int shared_file_available_hash_ele;
186
187 /* com region support */
188 ipc_port_t com_region_handle32 = NULL;
189 ipc_port_t com_region_handle64 = NULL;
190 vm_map_t com_region_map32 = NULL;
191 vm_map_t com_region_map64 = NULL;
192 vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
193 shared_region_mapping_t com_mapping_resource = NULL;
194
195
196 #if DEBUG
197 int shared_region_debug = 0;
198 #endif /* DEBUG */
199
200
201 kern_return_t
202 vm_get_shared_region(
203 task_t task,
204 shared_region_mapping_t *shared_region)
205 {
206 *shared_region = (shared_region_mapping_t) task->system_shared_region;
207 if (*shared_region) {
208 assert((*shared_region)->ref_count > 0);
209 }
210 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
211 task, *shared_region));
212 return KERN_SUCCESS;
213 }
214
215 kern_return_t
216 vm_set_shared_region(
217 task_t task,
218 shared_region_mapping_t shared_region)
219 {
220 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
221 "shared_region=%p)\n",
222 task, shared_region));
223 if (shared_region) {
224 assert(shared_region->ref_count > 0);
225 }
226 task->system_shared_region = shared_region;
227 return KERN_SUCCESS;
228 }
229
230 /*
231 * shared_region_object_chain_detach:
232 *
233 * Mark the shared region as being detached or standalone. This means
234 * that we won't keep track of which file is mapped and how, for this shared
235 * region. And we don't have a "shadow" shared region.
236 * This is used when we clone a private shared region and we intend to remove
237 * some mappings from it. It won't need to maintain mappings info because it's
238 * now private. It can't have a "shadow" shared region because we don't want
239 * to see the shadow of the mappings we're about to remove.
240 */
241 void
242 shared_region_object_chain_detached(
243 shared_region_mapping_t target_region)
244 {
245 shared_region_mapping_lock(target_region);
246 target_region->flags |= SHARED_REGION_STANDALONE;
247 shared_region_mapping_unlock(target_region);
248 }
249
250 /*
251 * shared_region_object_chain_attach:
252 *
253 * Link "target_region" to "object_chain_region". "object_chain_region"
254 * is treated as a shadow of "target_region" for the purpose of looking up
255 * mappings. Since the "target_region" preserves all the mappings of the
256 * older "object_chain_region", we won't duplicate all the mappings info and
257 * we'll just lookup the next region in the "object_chain" if we can't find
258 * what we're looking for in the "target_region". See lsf_hash_lookup().
259 */
260 kern_return_t
261 shared_region_object_chain_attach(
262 shared_region_mapping_t target_region,
263 shared_region_mapping_t object_chain_region)
264 {
265 shared_region_object_chain_t object_ele;
266
267 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
268 "target_region=%p, object_chain_region=%p\n",
269 target_region, object_chain_region));
270 assert(target_region->ref_count > 0);
271 assert(object_chain_region->ref_count > 0);
272 if(target_region->object_chain)
273 return KERN_FAILURE;
274 object_ele = (shared_region_object_chain_t)
275 kalloc(sizeof (struct shared_region_object_chain));
276 shared_region_mapping_lock(object_chain_region);
277 target_region->object_chain = object_ele;
278 object_ele->object_chain_region = object_chain_region;
279 object_ele->next = object_chain_region->object_chain;
280 object_ele->depth = object_chain_region->depth;
281 object_chain_region->depth++;
282 target_region->alternate_next = object_chain_region->alternate_next;
283 shared_region_mapping_unlock(object_chain_region);
284 return KERN_SUCCESS;
285 }
286
287 /* LP64todo - need 64-bit safe version */
288 kern_return_t
289 shared_region_mapping_create(
290 ipc_port_t text_region,
291 vm_size_t text_size,
292 ipc_port_t data_region,
293 vm_size_t data_size,
294 vm_offset_t region_mappings,
295 vm_offset_t client_base,
296 shared_region_mapping_t *shared_region,
297 vm_offset_t alt_base,
298 vm_offset_t alt_next)
299 {
300 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
301 *shared_region = (shared_region_mapping_t)
302 kalloc(sizeof (struct shared_region_mapping));
303 if(*shared_region == NULL) {
304 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
305 "failure\n"));
306 return KERN_FAILURE;
307 }
308 shared_region_mapping_lock_init((*shared_region));
309 (*shared_region)->text_region = text_region;
310 (*shared_region)->text_size = text_size;
311 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
312 (*shared_region)->system = cpu_type();
313 (*shared_region)->data_region = data_region;
314 (*shared_region)->data_size = data_size;
315 (*shared_region)->region_mappings = region_mappings;
316 (*shared_region)->client_base = client_base;
317 (*shared_region)->ref_count = 1;
318 (*shared_region)->next = NULL;
319 (*shared_region)->object_chain = NULL;
320 (*shared_region)->self = *shared_region;
321 (*shared_region)->flags = 0;
322 (*shared_region)->depth = 0;
323 (*shared_region)->default_env_list = NULL;
324 (*shared_region)->alternate_base = alt_base;
325 (*shared_region)->alternate_next = alt_next;
326 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
327 *shared_region));
328 return KERN_SUCCESS;
329 }
330
331 /* LP64todo - need 64-bit safe version */
332 kern_return_t
333 shared_region_mapping_info(
334 shared_region_mapping_t shared_region,
335 ipc_port_t *text_region,
336 vm_size_t *text_size,
337 ipc_port_t *data_region,
338 vm_size_t *data_size,
339 vm_offset_t *region_mappings,
340 vm_offset_t *client_base,
341 vm_offset_t *alt_base,
342 vm_offset_t *alt_next,
343 unsigned int *fs_base,
344 unsigned int *system,
345 int *flags,
346 shared_region_mapping_t *next)
347 {
348 shared_region_mapping_lock(shared_region);
349
350 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
351 shared_region));
352 assert(shared_region->ref_count > 0);
353 *text_region = shared_region->text_region;
354 *text_size = shared_region->text_size;
355 *data_region = shared_region->data_region;
356 *data_size = shared_region->data_size;
357 *region_mappings = shared_region->region_mappings;
358 *client_base = shared_region->client_base;
359 *alt_base = shared_region->alternate_base;
360 *alt_next = shared_region->alternate_next;
361 *flags = shared_region->flags;
362 *fs_base = shared_region->fs_base;
363 *system = shared_region->system;
364 *next = shared_region->next;
365
366 shared_region_mapping_unlock(shared_region);
367 }
368
369 kern_return_t
370 shared_region_mapping_ref(
371 shared_region_mapping_t shared_region)
372 {
373 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
374 "ref_count=%d + 1\n",
375 shared_region,
376 shared_region ? shared_region->ref_count : 0));
377 if(shared_region == NULL)
378 return KERN_SUCCESS;
379 assert(shared_region->ref_count > 0);
380 hw_atomic_add(&shared_region->ref_count, 1);
381 return KERN_SUCCESS;
382 }
383
384 static kern_return_t
385 shared_region_mapping_dealloc_lock(
386 shared_region_mapping_t shared_region,
387 int need_sfh_lock,
388 int need_drl_lock)
389 {
390 struct shared_region_task_mappings sm_info;
391 shared_region_mapping_t next = NULL;
392 int ref_count;
393
394 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
395 "(shared_region=%p,%d,%d) ref_count=%d\n",
396 shared_region, need_sfh_lock, need_drl_lock,
397 shared_region ? shared_region->ref_count : 0));
398 while (shared_region) {
399 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
400 "ref_count=%d\n",
401 shared_region, shared_region->ref_count));
402 assert(shared_region->ref_count > 0);
403 if ((ref_count =
404 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
405 shared_region_mapping_lock(shared_region);
406
407 sm_info.text_region = shared_region->text_region;
408 sm_info.text_size = shared_region->text_size;
409 sm_info.data_region = shared_region->data_region;
410 sm_info.data_size = shared_region->data_size;
411 sm_info.region_mappings = shared_region->region_mappings;
412 sm_info.client_base = shared_region->client_base;
413 sm_info.alternate_base = shared_region->alternate_base;
414 sm_info.alternate_next = shared_region->alternate_next;
415 sm_info.flags = shared_region->flags;
416 sm_info.self = (vm_offset_t)shared_region;
417
418 if(shared_region->region_mappings) {
419 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
420 }
421 if(((vm_named_entry_t)
422 (shared_region->text_region->ip_kobject))
423 ->backing.map->pmap) {
424 pmap_remove(((vm_named_entry_t)
425 (shared_region->text_region->ip_kobject))
426 ->backing.map->pmap,
427 sm_info.client_base,
428 sm_info.client_base + sm_info.text_size);
429 }
430 ipc_port_release_send(shared_region->text_region);
431 if(shared_region->data_region)
432 ipc_port_release_send(shared_region->data_region);
433 if (shared_region->object_chain) {
434 next = shared_region->object_chain->object_chain_region;
435 kfree(shared_region->object_chain,
436 sizeof (struct shared_region_object_chain));
437 } else {
438 next = NULL;
439 }
440 shared_region_mapping_unlock(shared_region);
441 SHARED_REGION_DEBUG(
442 ("shared_region_mapping_dealloc_lock(%p): "
443 "freeing\n",
444 shared_region));
445 bzero((void *)shared_region,
446 sizeof (*shared_region)); /* FBDP debug */
447 kfree(shared_region,
448 sizeof (struct shared_region_mapping));
449 shared_region = next;
450 } else {
451 /* Stale indicates that a system region is no */
452 /* longer in the default environment list. */
453 if((ref_count == 1) &&
454 (shared_region->flags & SHARED_REGION_SYSTEM)
455 && !(shared_region->flags & SHARED_REGION_STALE)) {
456 SHARED_REGION_DEBUG(
457 ("shared_region_mapping_dealloc_lock"
458 "(%p): removing stale\n",
459 shared_region));
460 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
461 }
462 break;
463 }
464 }
465 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
466 shared_region));
467 return KERN_SUCCESS;
468 }
469
470 /*
471 * Stub function; always indicates that the lock needs to be taken in the
472 * call to lsf_remove_regions_mappings_lock().
473 */
474 kern_return_t
475 shared_region_mapping_dealloc(
476 shared_region_mapping_t shared_region)
477 {
478 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
479 "(shared_region=%p)\n",
480 shared_region));
481 if (shared_region) {
482 assert(shared_region->ref_count > 0);
483 }
484 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
485 }
486
487 static
488 kern_return_t
489 shared_region_object_create(
490 vm_size_t size,
491 ipc_port_t *object_handle)
492 {
493 vm_named_entry_t user_entry;
494 ipc_port_t user_handle;
495
496 ipc_port_t previous;
497 vm_map_t new_map;
498
499 user_entry = (vm_named_entry_t)
500 kalloc(sizeof (struct vm_named_entry));
501 if(user_entry == NULL) {
502 return KERN_FAILURE;
503 }
504 named_entry_lock_init(user_entry);
505 user_handle = ipc_port_alloc_kernel();
506
507
508 ip_lock(user_handle);
509
510 /* make a sonce right */
511 user_handle->ip_sorights++;
512 ip_reference(user_handle);
513
514 user_handle->ip_destination = IP_NULL;
515 user_handle->ip_receiver_name = MACH_PORT_NULL;
516 user_handle->ip_receiver = ipc_space_kernel;
517
518 /* make a send right */
519 user_handle->ip_mscount++;
520 user_handle->ip_srights++;
521 ip_reference(user_handle);
522
523 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
524 /* nsrequest unlocks user_handle */
525
526 /* Create a named object based on a submap of specified size */
527
528 new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
529 user_entry->backing.map = new_map;
530 user_entry->internal = TRUE;
531 user_entry->is_sub_map = TRUE;
532 user_entry->is_pager = FALSE;
533 user_entry->offset = 0;
534 user_entry->protection = VM_PROT_ALL;
535 user_entry->size = size;
536 user_entry->ref_count = 1;
537
538 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
539 IKOT_NAMED_ENTRY);
540 *object_handle = user_handle;
541 return KERN_SUCCESS;
542 }
543
544 /* called for the non-default, private branch shared region support */
545 /* system default fields for fs_base and system supported are not */
546 /* relevant as the system default flag is not set */
547 kern_return_t
548 shared_file_create_system_region(
549 shared_region_mapping_t *shared_region)
550 {
551 ipc_port_t text_handle;
552 ipc_port_t data_handle;
553 long text_size;
554 long data_size;
555 vm_offset_t mapping_array;
556 kern_return_t kret;
557
558 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
559
560 text_size = 0x10000000;
561 data_size = 0x10000000;
562
563 kret = shared_file_init(&text_handle,
564 text_size, &data_handle, data_size, &mapping_array);
565 if(kret) {
566 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
567 "shared_file_init failed kret=0x%x\n",
568 kret));
569 return kret;
570 }
571 kret = shared_region_mapping_create(text_handle,
572 text_size, data_handle, data_size, mapping_array,
573 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
574 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
575 if(kret) {
576 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
577 "shared_region_mapping_create failed "
578 "kret=0x%x\n",
579 kret));
580 return kret;
581 }
582 (*shared_region)->flags = 0;
583 if(com_mapping_resource) {
584 shared_region_mapping_ref(com_mapping_resource);
585 (*shared_region)->next = com_mapping_resource;
586 }
587
588 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
589 "-> shared_region=%p\n",
590 *shared_region));
591 return KERN_SUCCESS;
592 }
593
594 /*
595 * load a new default for a specified environment into the default share
596 * regions list. If a previous default exists for the envrionment specification
597 * it is returned along with its reference. It is expected that the new
598 * sytem region structure passes a reference.
599 */
600
601 shared_region_mapping_t
602 update_default_shared_region(
603 shared_region_mapping_t new_system_region)
604 {
605 shared_region_mapping_t old_system_region;
606 unsigned int fs_base;
607 unsigned int system;
608
609 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
610 new_system_region));
611 assert(new_system_region->ref_count > 0);
612 fs_base = new_system_region->fs_base;
613 system = new_system_region->system;
614 new_system_region->flags |= SHARED_REGION_SYSTEM;
615 default_regions_list_lock();
616 old_system_region = default_environment_shared_regions;
617
618 if((old_system_region != NULL) &&
619 (old_system_region->fs_base == fs_base) &&
620 (old_system_region->system == system)) {
621 new_system_region->default_env_list =
622 old_system_region->default_env_list;
623 old_system_region->default_env_list = NULL;
624 default_environment_shared_regions = new_system_region;
625 old_system_region->flags |= SHARED_REGION_STALE;
626 default_regions_list_unlock();
627 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
628 "old=%p stale 1\n",
629 new_system_region, old_system_region));
630 assert(old_system_region->ref_count > 0);
631 return old_system_region;
632 }
633 if (old_system_region) {
634 while(old_system_region->default_env_list != NULL) {
635 if((old_system_region->default_env_list->fs_base == fs_base) &&
636 (old_system_region->default_env_list->system == system)) {
637 shared_region_mapping_t tmp_system_region;
638
639 tmp_system_region =
640 old_system_region->default_env_list;
641 new_system_region->default_env_list =
642 tmp_system_region->default_env_list;
643 tmp_system_region->default_env_list = NULL;
644 old_system_region->default_env_list =
645 new_system_region;
646 old_system_region = tmp_system_region;
647 old_system_region->flags |= SHARED_REGION_STALE;
648 default_regions_list_unlock();
649 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
650 ": old=%p stale 2\n",
651 new_system_region,
652 old_system_region));
653 assert(old_system_region->ref_count > 0);
654 return old_system_region;
655 }
656 old_system_region = old_system_region->default_env_list;
657 }
658 }
659 /* If we get here, we are at the end of the system list and we */
660 /* did not find a pre-existing entry */
661 if(old_system_region) {
662 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
663 "adding after old=%p\n",
664 new_system_region, old_system_region));
665 assert(old_system_region->ref_count > 0);
666 old_system_region->default_env_list = new_system_region;
667 } else {
668 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
669 "new default\n",
670 new_system_region));
671 default_environment_shared_regions = new_system_region;
672 }
673 assert(new_system_region->ref_count > 0);
674 default_regions_list_unlock();
675 return NULL;
676 }
677
678 /*
679 * lookup a system_shared_region for the environment specified. If one is
680 * found, it is returned along with a reference against the structure
681 */
682
683 shared_region_mapping_t
684 lookup_default_shared_region(
685 unsigned int fs_base,
686 unsigned int system)
687 {
688 shared_region_mapping_t system_region;
689 default_regions_list_lock();
690 system_region = default_environment_shared_regions;
691
692 SHARED_REGION_DEBUG(("lookup_default_shared_region"
693 "(base=0x%x, system=0x%x)\n",
694 fs_base, system));
695 while(system_region != NULL) {
696 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
697 ": system_region=%p base=0x%x system=0x%x"
698 " ref_count=%d\n",
699 fs_base, system, system_region,
700 system_region->fs_base,
701 system_region->system,
702 system_region->ref_count));
703 assert(system_region->ref_count > 0);
704 if((system_region->fs_base == fs_base) &&
705 (system_region->system == system)) {
706 break;
707 }
708 system_region = system_region->default_env_list;
709 }
710 if(system_region)
711 shared_region_mapping_ref(system_region);
712 default_regions_list_unlock();
713 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
714 system_region));
715 return system_region;
716 }
717
718 /*
719 * remove a system_region default if it appears in the default regions list.
720 * Drop a reference on removal.
721 */
722
723 __private_extern__ void
724 remove_default_shared_region_lock(
725 shared_region_mapping_t system_region,
726 int need_sfh_lock,
727 int need_drl_lock)
728 {
729 shared_region_mapping_t old_system_region;
730
731 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
732 "(system_region=%p, %d, %d)\n",
733 system_region, need_sfh_lock, need_drl_lock));
734 if (need_drl_lock) {
735 default_regions_list_lock();
736 }
737 old_system_region = default_environment_shared_regions;
738
739 if(old_system_region == NULL) {
740 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
741 "-> default_env=NULL\n",
742 system_region));
743 if (need_drl_lock) {
744 default_regions_list_unlock();
745 }
746 return;
747 }
748
749 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
750 "default_env=%p\n",
751 system_region, old_system_region));
752 assert(old_system_region->ref_count > 0);
753 if (old_system_region == system_region) {
754 default_environment_shared_regions
755 = old_system_region->default_env_list;
756 old_system_region->default_env_list = NULL;
757 old_system_region->flags |= SHARED_REGION_STALE;
758 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
759 "old=%p ref_count=%d STALE\n",
760 system_region, old_system_region,
761 old_system_region->ref_count));
762 shared_region_mapping_dealloc_lock(old_system_region,
763 need_sfh_lock,
764 0);
765 if (need_drl_lock) {
766 default_regions_list_unlock();
767 }
768 return;
769 }
770
771 while(old_system_region->default_env_list != NULL) {
772 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
773 "old=%p->default_env=%p\n",
774 system_region, old_system_region,
775 old_system_region->default_env_list));
776 assert(old_system_region->default_env_list->ref_count > 0);
777 if(old_system_region->default_env_list == system_region) {
778 shared_region_mapping_t dead_region;
779 dead_region = old_system_region->default_env_list;
780 old_system_region->default_env_list =
781 dead_region->default_env_list;
782 dead_region->default_env_list = NULL;
783 dead_region->flags |= SHARED_REGION_STALE;
784 SHARED_REGION_DEBUG(
785 ("remove_default_shared_region_lock(%p): "
786 "dead=%p ref_count=%d stale\n",
787 system_region, dead_region,
788 dead_region->ref_count));
789 shared_region_mapping_dealloc_lock(dead_region,
790 need_sfh_lock,
791 0);
792 if (need_drl_lock) {
793 default_regions_list_unlock();
794 }
795 return;
796 }
797 old_system_region = old_system_region->default_env_list;
798 }
799 if (need_drl_lock) {
800 default_regions_list_unlock();
801 }
802 }
803
804 /*
805 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
806 * the only caller. Remove this stub function and the corresponding symbol
807 * export for Merlot.
808 */
809 void
810 remove_default_shared_region(
811 shared_region_mapping_t system_region)
812 {
813 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
814 system_region));
815 if (system_region) {
816 assert(system_region->ref_count > 0);
817 }
818 remove_default_shared_region_lock(system_region, 1, 1);
819 }
820
821 void
822 remove_all_shared_regions(void)
823 {
824 shared_region_mapping_t system_region;
825 shared_region_mapping_t next_system_region;
826
827 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
828 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
829 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
830 default_regions_list_lock();
831 system_region = default_environment_shared_regions;
832
833 if(system_region == NULL) {
834 default_regions_list_unlock();
835 return;
836 }
837
838 while(system_region != NULL) {
839 next_system_region = system_region->default_env_list;
840 system_region->default_env_list = NULL;
841 system_region->flags |= SHARED_REGION_STALE;
842 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
843 "%p ref_count=%d stale\n",
844 system_region, system_region->ref_count));
845 assert(system_region->ref_count > 0);
846 shared_region_mapping_dealloc_lock(system_region, 1, 0);
847 system_region = next_system_region;
848 }
849 default_environment_shared_regions = NULL;
850 default_regions_list_unlock();
851 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
852 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
853 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
854 }
855
856 /* shared_com_boot_time_init initializes the common page shared data and */
857 /* text region. This region is semi independent of the split libs */
858 /* and so its policies have to be handled differently by the code that */
859 /* manipulates the mapping of shared region environments. However, */
860 /* the shared region delivery system supports both */
861 void shared_com_boot_time_init(void); /* forward */
862 void
863 shared_com_boot_time_init(void)
864 {
865 kern_return_t kret;
866 vm_named_entry_t named_entry;
867
868 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
869 if(com_region_handle32) {
870 panic("shared_com_boot_time_init: "
871 "com_region_handle32 already set\n");
872 }
873 if(com_region_handle64) {
874 panic("shared_com_boot_time_init: "
875 "com_region_handle64 already set\n");
876 }
877
878 /* create com page regions, 1 each for 32 and 64-bit code */
879 if((kret = shared_region_object_create(
880 com_region_size,
881 &com_region_handle32))) {
882 panic("shared_com_boot_time_init: "
883 "unable to create 32-bit comm page\n");
884 return;
885 }
886 if((kret = shared_region_object_create(
887 com_region_size,
888 &com_region_handle64))) {
889 panic("shared_com_boot_time_init: "
890 "unable to create 64-bit comm page\n");
891 return;
892 }
893
894 /* now set export the underlying region/map */
895 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
896 com_region_map32 = named_entry->backing.map;
897 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
898 com_region_map64 = named_entry->backing.map;
899
900 /* wrap the com region in its own shared file mapping structure */
901 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
902 kret = shared_region_mapping_create(com_region_handle32,
903 com_region_size, NULL, 0, 0,
904 _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
905 0, 0);
906 if (kret) {
907 panic("shared_region_mapping_create failed for commpage");
908 }
909 }
910
911 void
912 shared_file_boot_time_init(
913 unsigned int fs_base,
914 unsigned int system)
915 {
916 long text_region_size;
917 long data_region_size;
918 shared_region_mapping_t new_system_region;
919 shared_region_mapping_t old_default_env;
920
921 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
922 "(base=0x%x,system=0x%x)\n",
923 fs_base, system));
924 text_region_size = 0x10000000;
925 data_region_size = 0x10000000;
926 shared_file_init(&shared_text_region_handle,
927 text_region_size,
928 &shared_data_region_handle,
929 data_region_size,
930 &shared_file_mapping_array);
931
932 shared_region_mapping_create(shared_text_region_handle,
933 text_region_size,
934 shared_data_region_handle,
935 data_region_size,
936 shared_file_mapping_array,
937 GLOBAL_SHARED_TEXT_SEGMENT,
938 &new_system_region,
939 SHARED_ALTERNATE_LOAD_BASE,
940 SHARED_ALTERNATE_LOAD_BASE);
941
942 new_system_region->fs_base = fs_base;
943 new_system_region->system = system;
944 new_system_region->flags = SHARED_REGION_SYSTEM;
945
946 /* grab an extra reference for the caller */
947 /* remember to grab before call to update */
948 shared_region_mapping_ref(new_system_region);
949 old_default_env = update_default_shared_region(new_system_region);
950 /* hold an extra reference because these are the system */
951 /* shared regions. */
952 if(old_default_env)
953 shared_region_mapping_dealloc(old_default_env);
954 if(com_mapping_resource == NULL) {
955 shared_com_boot_time_init();
956 }
957 shared_region_mapping_ref(com_mapping_resource);
958 new_system_region->next = com_mapping_resource;
959 vm_set_shared_region(current_task(), new_system_region);
960 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
961 fs_base, system));
962 }
963
964
965 /* called at boot time, allocates two regions, each 256 megs in size */
966 /* these regions are later mapped into task spaces, allowing them to */
967 /* share the contents of the regions. shared_file_init is part of */
968 /* a shared_memory_server which not only allocates the backing maps */
969 /* but also coordinates requests for space. */
970
971
972 static kern_return_t
973 shared_file_init(
974 ipc_port_t *text_region_handle,
975 vm_size_t text_region_size,
976 ipc_port_t *data_region_handle,
977 vm_size_t data_region_size,
978 vm_offset_t *file_mapping_array)
979 {
980 shared_file_info_t *sf_head;
981 vm_offset_t table_mapping_address;
982 int data_table_size;
983 int hash_size;
984 kern_return_t kret;
985
986 vm_object_t buf_object;
987 vm_map_entry_t entry;
988 vm_size_t alloced;
989 vm_offset_t b;
990 vm_page_t p;
991
992 SHARED_REGION_DEBUG(("shared_file_init()\n"));
993 /* create text and data maps/regions */
994 kret = shared_region_object_create(
995 text_region_size,
996 text_region_handle);
997 if (kret) {
998 return kret;
999 }
1000 kret = shared_region_object_create(
1001 data_region_size,
1002 data_region_handle);
1003 if (kret) {
1004 ipc_port_release_send(*text_region_handle);
1005 return kret;
1006 }
1007
1008 data_table_size = data_region_size >> 9;
1009 hash_size = data_region_size >> 14;
1010 table_mapping_address = data_region_size - data_table_size;
1011
1012 if(shared_file_mapping_array == 0) {
1013 vm_map_address_t map_addr;
1014 buf_object = vm_object_allocate(data_table_size);
1015
1016 if(vm_map_find_space(kernel_map, &map_addr,
1017 data_table_size, 0, &entry)
1018 != KERN_SUCCESS) {
1019 panic("shared_file_init: no space");
1020 }
1021 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1022 *file_mapping_array = shared_file_mapping_array;
1023 vm_map_unlock(kernel_map);
1024 entry->object.vm_object = buf_object;
1025 entry->offset = 0;
1026
1027 for (b = *file_mapping_array, alloced = 0;
1028 alloced < (hash_size +
1029 round_page(sizeof(struct sf_mapping)));
1030 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1031 vm_object_lock(buf_object);
1032 p = vm_page_alloc(buf_object, alloced);
1033 if (p == VM_PAGE_NULL) {
1034 panic("shared_file_init: no space");
1035 }
1036 p->busy = FALSE;
1037 vm_object_unlock(buf_object);
1038 pmap_enter(kernel_pmap, b, p->phys_page,
1039 VM_PROT_READ | VM_PROT_WRITE,
1040 ((unsigned int)(p->object->wimg_bits))
1041 & VM_WIMG_MASK,
1042 TRUE);
1043 }
1044
1045
1046 /* initialize loaded file array */
1047 sf_head = (shared_file_info_t *)*file_mapping_array;
1048 sf_head->hash = (queue_head_t *)
1049 (((int)*file_mapping_array) +
1050 sizeof(struct shared_file_info));
1051 sf_head->hash_size = hash_size/sizeof(queue_head_t);
1052 mutex_init(&(sf_head->lock), 0);
1053 sf_head->hash_init = FALSE;
1054
1055
1056 mach_make_memory_entry(kernel_map, &data_table_size,
1057 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1058 NULL);
1059
1060 if (vm_map_wire(kernel_map,
1061 vm_map_trunc_page(*file_mapping_array),
1062 vm_map_round_page(*file_mapping_array +
1063 hash_size +
1064 round_page(sizeof(struct sf_mapping))),
1065 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1066 panic("shared_file_init: No memory for data table");
1067 }
1068
1069 lsf_zone = zinit(sizeof(struct load_file_ele),
1070 data_table_size -
1071 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1072 0, "load_file_server");
1073
1074 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1075 zone_change(lsf_zone, Z_COLLECT, FALSE);
1076 zone_change(lsf_zone, Z_EXPAND, FALSE);
1077 zone_change(lsf_zone, Z_FOREIGN, TRUE);
1078
1079 /* initialize the global default environment lock */
1080 mutex_init(&default_regions_list_lock_data, 0);
1081
1082 } else {
1083 *file_mapping_array = shared_file_mapping_array;
1084 }
1085
1086 kret = vm_map(((vm_named_entry_t)
1087 (*data_region_handle)->ip_kobject)->backing.map,
1088 &table_mapping_address,
1089 data_table_size, 0,
1090 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1091 sfma_handle, 0, FALSE,
1092 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1093
1094 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1095 return kret;
1096 }
1097
1098 static kern_return_t
1099 shared_file_header_init(
1100 shared_file_info_t *shared_file_header)
1101 {
1102 vm_size_t hash_table_size;
1103 vm_size_t hash_table_offset;
1104 int i;
1105 /* wire hash entry pool only as needed, since we are the only */
1106 /* users, we take a few liberties with the population of our */
1107 /* zone. */
1108 static int allocable_hash_pages;
1109 static vm_offset_t hash_cram_address;
1110
1111
1112 hash_table_size = shared_file_header->hash_size
1113 * sizeof (struct queue_entry);
1114 hash_table_offset = hash_table_size +
1115 round_page(sizeof (struct sf_mapping));
1116 for (i = 0; i < shared_file_header->hash_size; i++)
1117 queue_init(&shared_file_header->hash[i]);
1118
1119 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1120 / PAGE_SIZE);
1121 hash_cram_address = ((vm_offset_t) shared_file_header)
1122 + hash_table_offset;
1123 shared_file_available_hash_ele = 0;
1124
1125 shared_file_header->hash_init = TRUE;
1126
1127 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1128 int cram_pages, cram_size;
1129
1130 cram_pages = allocable_hash_pages > 3 ?
1131 3 : allocable_hash_pages;
1132 cram_size = cram_pages * PAGE_SIZE;
1133 if (vm_map_wire(kernel_map, hash_cram_address,
1134 hash_cram_address + cram_size,
1135 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1136 printf("shared_file_header_init: "
1137 "No memory for data table\n");
1138 return KERN_NO_SPACE;
1139 }
1140 allocable_hash_pages -= cram_pages;
1141 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1142 shared_file_available_hash_ele
1143 += cram_size/sizeof(struct load_file_ele);
1144 hash_cram_address += cram_size;
1145 }
1146
1147 return KERN_SUCCESS;
1148 }
1149
1150
1151 /*
1152 * map_shared_file:
1153 *
1154 * Attempt to map a split library into the shared region. Check if the mappings
1155 * are already in place.
1156 */
1157 kern_return_t
1158 map_shared_file(
1159 int map_cnt,
1160 struct shared_file_mapping_np *mappings,
1161 memory_object_control_t file_control,
1162 memory_object_size_t file_size,
1163 shared_region_task_mappings_t sm_info,
1164 mach_vm_offset_t base_offset,
1165 mach_vm_offset_t *slide_p)
1166 {
1167 vm_object_t file_object;
1168 shared_file_info_t *shared_file_header;
1169 load_struct_t *file_entry;
1170 loaded_mapping_t *file_mapping;
1171 int i;
1172 kern_return_t ret;
1173 mach_vm_offset_t slide;
1174
1175 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1176
1177 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1178
1179 mutex_lock(&shared_file_header->lock);
1180
1181 /* If this is the first call to this routine, take the opportunity */
1182 /* to initialize the hash table which will be used to look-up */
1183 /* mappings based on the file object */
1184
1185 if(shared_file_header->hash_init == FALSE) {
1186 ret = shared_file_header_init(shared_file_header);
1187 if (ret != KERN_SUCCESS) {
1188 mutex_unlock(&shared_file_header->lock);
1189 return KERN_NO_SPACE;
1190 }
1191 }
1192
1193
1194 /* Find the entry in the map associated with the current mapping */
1195 /* of the file object */
1196 file_object = memory_object_control_to_vm_object(file_control);
1197
1198 file_entry = lsf_hash_lookup(shared_file_header->hash,
1199 (void *) file_object,
1200 mappings[0].sfm_file_offset,
1201 shared_file_header->hash_size,
1202 TRUE, TRUE, sm_info);
1203 if (file_entry) {
1204 /* File is loaded, check the load manifest for exact match */
1205 /* we simplify by requiring that the elements be the same */
1206 /* size and in the same order rather than checking for */
1207 /* semantic equivalence. */
1208
1209 i = 0;
1210 file_mapping = file_entry->mappings;
1211 while(file_mapping != NULL) {
1212 if(i>=map_cnt) {
1213 mutex_unlock(&shared_file_header->lock);
1214 return KERN_INVALID_ARGUMENT;
1215 }
1216 if(((mappings[i].sfm_address)
1217 & SHARED_DATA_REGION_MASK) !=
1218 file_mapping->mapping_offset ||
1219 mappings[i].sfm_size != file_mapping->size ||
1220 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1221 mappings[i].sfm_init_prot != file_mapping->protection) {
1222 break;
1223 }
1224 file_mapping = file_mapping->next;
1225 i++;
1226 }
1227 if(i!=map_cnt) {
1228 mutex_unlock(&shared_file_header->lock);
1229 return KERN_INVALID_ARGUMENT;
1230 }
1231
1232 slide = file_entry->base_address - base_offset;
1233 if (slide_p != NULL) {
1234 /*
1235 * File already mapped but at different address,
1236 * and the caller is OK with the sliding.
1237 */
1238 *slide_p = slide;
1239 ret = KERN_SUCCESS;
1240 } else {
1241 /*
1242 * The caller doesn't want any sliding. The file needs
1243 * to be mapped at the requested address or not mapped.
1244 */
1245 if (slide != 0) {
1246 /*
1247 * The file is already mapped but at a different
1248 * address.
1249 * We fail.
1250 * XXX should we attempt to load at
1251 * requested address too ?
1252 */
1253 ret = KERN_FAILURE;
1254 } else {
1255 /*
1256 * The file is already mapped at the correct
1257 * address.
1258 * We're done !
1259 */
1260 ret = KERN_SUCCESS;
1261 }
1262 }
1263 mutex_unlock(&shared_file_header->lock);
1264 return ret;
1265 } else {
1266 /* File is not loaded, lets attempt to load it */
1267 ret = lsf_map(mappings, map_cnt,
1268 (void *)file_control,
1269 file_size,
1270 sm_info,
1271 base_offset,
1272 slide_p);
1273 if(ret == KERN_NO_SPACE) {
1274 shared_region_mapping_t regions;
1275 shared_region_mapping_t system_region;
1276 regions = (shared_region_mapping_t)sm_info->self;
1277 regions->flags |= SHARED_REGION_FULL;
1278 system_region = lookup_default_shared_region(
1279 regions->fs_base, regions->system);
1280 if (system_region == regions) {
1281 shared_region_mapping_t new_system_shared_region;
1282 shared_file_boot_time_init(
1283 regions->fs_base, regions->system);
1284 /* current task must stay with its current */
1285 /* regions, drop count on system_shared_region */
1286 /* and put back our original set */
1287 vm_get_shared_region(current_task(),
1288 &new_system_shared_region);
1289 shared_region_mapping_dealloc_lock(
1290 new_system_shared_region, 0, 1);
1291 vm_set_shared_region(current_task(), regions);
1292 } else if (system_region != NULL) {
1293 shared_region_mapping_dealloc_lock(
1294 system_region, 0, 1);
1295 }
1296 }
1297 mutex_unlock(&shared_file_header->lock);
1298 return ret;
1299 }
1300 }
1301
1302 /*
1303 * shared_region_cleanup:
1304 *
1305 * Deallocates all the mappings in the shared region, except those explicitly
1306 * specified in the "ranges" set of address ranges.
1307 */
1308 kern_return_t
1309 shared_region_cleanup(
1310 unsigned int range_count,
1311 struct shared_region_range_np *ranges,
1312 shared_region_task_mappings_t sm_info)
1313 {
1314 kern_return_t kr;
1315 ipc_port_t region_handle;
1316 vm_named_entry_t region_named_entry;
1317 vm_map_t text_submap, data_submap, submap, next_submap;
1318 unsigned int i_range;
1319 vm_map_offset_t range_start, range_end;
1320 vm_map_offset_t submap_base, submap_end, submap_offset;
1321 vm_map_size_t delete_size;
1322
1323 struct shared_region_range_np tmp_range;
1324 unsigned int sort_index, sorted_index;
1325 vm_map_offset_t sort_min_address;
1326 unsigned int sort_min_index;
1327
1328 /*
1329 * Since we want to deallocate the holes between the "ranges",
1330 * sort the array by increasing addresses.
1331 */
1332 for (sorted_index = 0;
1333 sorted_index < range_count;
1334 sorted_index++) {
1335
1336 /* first remaining entry is our new starting point */
1337 sort_min_index = sorted_index;
1338 sort_min_address = ranges[sort_min_index].srr_address;
1339
1340 /* find the lowest mapping_offset in the remaining entries */
1341 for (sort_index = sorted_index + 1;
1342 sort_index < range_count;
1343 sort_index++) {
1344 if (ranges[sort_index].srr_address < sort_min_address) {
1345 /* lowest address so far... */
1346 sort_min_index = sort_index;
1347 sort_min_address =
1348 ranges[sort_min_index].srr_address;
1349 }
1350 }
1351
1352 if (sort_min_index != sorted_index) {
1353 /* swap entries */
1354 tmp_range = ranges[sort_min_index];
1355 ranges[sort_min_index] = ranges[sorted_index];
1356 ranges[sorted_index] = tmp_range;
1357 }
1358 }
1359
1360 region_handle = (ipc_port_t) sm_info->text_region;
1361 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1362 text_submap = region_named_entry->backing.map;
1363
1364 region_handle = (ipc_port_t) sm_info->data_region;
1365 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1366 data_submap = region_named_entry->backing.map;
1367
1368 submap = text_submap;
1369 next_submap = submap;
1370 submap_base = sm_info->client_base;
1371 submap_offset = 0;
1372 submap_end = submap_base + sm_info->text_size;
1373 for (i_range = 0;
1374 i_range < range_count;
1375 i_range++) {
1376
1377 /* get the next range of addresses to keep */
1378 range_start = ranges[i_range].srr_address;
1379 range_end = range_start + ranges[i_range].srr_size;
1380 /* align them to page boundaries */
1381 range_start = vm_map_trunc_page(range_start);
1382 range_end = vm_map_round_page(range_end);
1383
1384 /* make sure we don't go beyond the submap's boundaries */
1385 if (range_start < submap_base) {
1386 range_start = submap_base;
1387 } else if (range_start >= submap_end) {
1388 range_start = submap_end;
1389 }
1390 if (range_end < submap_base) {
1391 range_end = submap_base;
1392 } else if (range_end >= submap_end) {
1393 range_end = submap_end;
1394 }
1395
1396 if (range_start > submap_base + submap_offset) {
1397 /*
1398 * Deallocate everything between the last offset in the
1399 * submap and the start of this range.
1400 */
1401 delete_size = range_start -
1402 (submap_base + submap_offset);
1403 (void) vm_deallocate(submap,
1404 submap_offset,
1405 delete_size);
1406 } else {
1407 delete_size = 0;
1408 }
1409
1410 /* skip to the end of the range */
1411 submap_offset += delete_size + (range_end - range_start);
1412
1413 if (submap_base + submap_offset >= submap_end) {
1414 /* get to next submap */
1415
1416 if (submap == data_submap) {
1417 /* no other submap after data: done ! */
1418 break;
1419 }
1420
1421 /* get original range again */
1422 range_start = ranges[i_range].srr_address;
1423 range_end = range_start + ranges[i_range].srr_size;
1424 range_start = vm_map_trunc_page(range_start);
1425 range_end = vm_map_round_page(range_end);
1426
1427 if (range_end > submap_end) {
1428 /*
1429 * This last range overlaps with the next
1430 * submap. We need to process it again
1431 * after switching submaps. Otherwise, we'll
1432 * just continue with the next range.
1433 */
1434 i_range--;
1435 }
1436
1437 if (submap == text_submap) {
1438 /*
1439 * Switch to the data submap.
1440 */
1441 submap = data_submap;
1442 submap_offset = 0;
1443 submap_base = sm_info->client_base +
1444 sm_info->text_size;
1445 submap_end = submap_base + sm_info->data_size;
1446 }
1447 }
1448 }
1449
1450 if (submap_base + submap_offset < submap_end) {
1451 /* delete remainder of this submap, from "offset" to the end */
1452 (void) vm_deallocate(submap,
1453 submap_offset,
1454 submap_end - submap_base - submap_offset);
1455 /* if nothing to keep in data submap, delete it all */
1456 if (submap == text_submap) {
1457 submap = data_submap;
1458 submap_offset = 0;
1459 submap_base = sm_info->client_base + sm_info->text_size;
1460 submap_end = submap_base + sm_info->data_size;
1461 (void) vm_deallocate(data_submap,
1462 0,
1463 submap_end - submap_base);
1464 }
1465 }
1466
1467 kr = KERN_SUCCESS;
1468 return kr;
1469 }
1470
1471 /* A hash lookup function for the list of loaded files in */
1472 /* shared_memory_server space. */
1473
1474 static load_struct_t *
1475 lsf_hash_lookup(
1476 queue_head_t *hash_table,
1477 void *file_object,
1478 vm_offset_t recognizableOffset,
1479 int size,
1480 boolean_t regular,
1481 boolean_t alternate,
1482 shared_region_task_mappings_t sm_info)
1483 {
1484 register queue_t bucket;
1485 load_struct_t *entry;
1486 shared_region_mapping_t target_region;
1487 int depth;
1488
1489 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1490 "reg=%d alt=%d sm_info=%p\n",
1491 hash_table, file_object, recognizableOffset, size,
1492 regular, alternate, sm_info));
1493
1494 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1495 for (entry = (load_struct_t *)queue_first(bucket);
1496 !queue_end(bucket, &entry->links);
1497 entry = (load_struct_t *)queue_next(&entry->links)) {
1498
1499 if ((entry->file_object == (int)file_object) &&
1500 (entry->file_offset == recognizableOffset)) {
1501 target_region = (shared_region_mapping_t)sm_info->self;
1502 depth = target_region->depth;
1503 while(target_region) {
1504 if((!(sm_info->self)) ||
1505 ((target_region == entry->regions_instance) &&
1506 (target_region->depth >= entry->depth))) {
1507 if(alternate &&
1508 entry->base_address >= sm_info->alternate_base) {
1509 LSF_DEBUG(("lsf_hash_lookup: "
1510 "alt=%d found entry %p "
1511 "(base=0x%x "
1512 "alt_base=0x%x)\n",
1513 alternate, entry,
1514 entry->base_address,
1515 sm_info->alternate_base));
1516 return entry;
1517 }
1518 if (regular &&
1519 entry->base_address < sm_info->alternate_base) {
1520 LSF_DEBUG(("lsf_hash_lookup: "
1521 "reg=%d found entry %p "
1522 "(base=0x%x "
1523 "alt_base=0x%x)\n",
1524 regular, entry,
1525 entry->base_address,
1526 sm_info->alternate_base));
1527 return entry;
1528 }
1529 }
1530 if(target_region->object_chain) {
1531 target_region = (shared_region_mapping_t)
1532 target_region->object_chain->object_chain_region;
1533 depth = target_region->object_chain->depth;
1534 } else {
1535 target_region = NULL;
1536 }
1537 }
1538 }
1539 }
1540
1541 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1542 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1543 hash_table, file_object, recognizableOffset, size,
1544 regular, alternate, sm_info));
1545 return (load_struct_t *)0;
1546 }
1547
1548 __private_extern__ load_struct_t *
1549 lsf_remove_regions_mappings_lock(
1550 shared_region_mapping_t region,
1551 shared_region_task_mappings_t sm_info,
1552 int need_sfh_lock)
1553 {
1554 int i;
1555 register queue_t bucket;
1556 shared_file_info_t *shared_file_header;
1557 load_struct_t *entry;
1558 load_struct_t *next_entry;
1559
1560 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1561
1562 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1563 "sfh=%p\n",
1564 region, sm_info, shared_file_header));
1565 if (need_sfh_lock)
1566 mutex_lock(&shared_file_header->lock);
1567 if(shared_file_header->hash_init == FALSE) {
1568 if (need_sfh_lock)
1569 mutex_unlock(&shared_file_header->lock);
1570 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1571 "(region=%p,sm_info=%p): not inited\n",
1572 region, sm_info));
1573 return NULL;
1574 }
1575 for(i = 0; i<shared_file_header->hash_size; i++) {
1576 bucket = &shared_file_header->hash[i];
1577 for (entry = (load_struct_t *)queue_first(bucket);
1578 !queue_end(bucket, &entry->links);) {
1579 next_entry = (load_struct_t *)queue_next(&entry->links);
1580 if(region == entry->regions_instance) {
1581 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1582 "entry %p region %p: "
1583 "unloading\n",
1584 entry, region));
1585 lsf_unload((void *)entry->file_object,
1586 entry->base_address, sm_info);
1587 } else {
1588 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1589 "entry %p region %p target region %p: "
1590 "not unloading\n",
1591 entry, entry->regions_instance, region));
1592 }
1593
1594 entry = next_entry;
1595 }
1596 }
1597 if (need_sfh_lock)
1598 mutex_unlock(&shared_file_header->lock);
1599 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1600
1601 return NULL; /* XXX */
1602 }
1603
1604 /*
1605 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1606 * only caller. Remove this stub function and the corresponding symbol
1607 * export for Merlot.
1608 */
1609 load_struct_t *
1610 lsf_remove_regions_mappings(
1611 shared_region_mapping_t region,
1612 shared_region_task_mappings_t sm_info)
1613 {
1614 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1615 }
1616
1617 /* Removes a map_list, (list of loaded extents) for a file from */
1618 /* the loaded file hash table. */
1619
1620 static load_struct_t *
1621 lsf_hash_delete(
1622 void *file_object,
1623 vm_offset_t base_offset,
1624 shared_region_task_mappings_t sm_info)
1625 {
1626 register queue_t bucket;
1627 shared_file_info_t *shared_file_header;
1628 load_struct_t *entry;
1629
1630 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1631 file_object, base_offset, sm_info));
1632
1633 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1634
1635 bucket = &shared_file_header->hash
1636 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1637
1638 for (entry = (load_struct_t *)queue_first(bucket);
1639 !queue_end(bucket, &entry->links);
1640 entry = (load_struct_t *)queue_next(&entry->links)) {
1641 if((!(sm_info->self)) || ((shared_region_mapping_t)
1642 sm_info->self == entry->regions_instance)) {
1643 if ((entry->file_object == (int) file_object) &&
1644 (entry->base_address == base_offset)) {
1645 queue_remove(bucket, entry,
1646 load_struct_ptr_t, links);
1647 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1648 return entry;
1649 }
1650 }
1651 }
1652
1653 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1654 return (load_struct_t *)0;
1655 }
1656
1657 /* Inserts a new map_list, (list of loaded file extents), into the */
1658 /* server loaded file hash table. */
1659
1660 static void
1661 lsf_hash_insert(
1662 load_struct_t *entry,
1663 shared_region_task_mappings_t sm_info)
1664 {
1665 shared_file_info_t *shared_file_header;
1666
1667 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1668 entry, sm_info, entry->file_object, entry->base_address));
1669
1670 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1671 queue_enter(&shared_file_header->hash
1672 [load_file_hash(entry->file_object,
1673 shared_file_header->hash_size)],
1674 entry, load_struct_ptr_t, links);
1675 }
1676
1677
1678
1679 /*
1680 * lsf_slide:
1681 *
1682 * Look in the shared region, starting from the end, for a place to fit all the
1683 * mappings while respecting their relative offsets.
1684 */
1685 static kern_return_t
1686 lsf_slide(
1687 unsigned int map_cnt,
1688 struct shared_file_mapping_np *mappings_in,
1689 shared_region_task_mappings_t sm_info,
1690 mach_vm_offset_t *base_offset_p)
1691 {
1692 mach_vm_offset_t max_mapping_offset;
1693 int i;
1694 vm_map_entry_t map_entry, prev_entry, next_entry;
1695 mach_vm_offset_t prev_hole_start, prev_hole_end;
1696 mach_vm_offset_t mapping_offset, mapping_end_offset;
1697 mach_vm_offset_t base_offset;
1698 mach_vm_size_t mapping_size;
1699 mach_vm_offset_t wiggle_room, wiggle;
1700 vm_map_t text_map, data_map, map;
1701 vm_named_entry_t region_entry;
1702 ipc_port_t region_handle;
1703 kern_return_t kr;
1704
1705 struct shared_file_mapping_np *mappings, tmp_mapping;
1706 unsigned int sort_index, sorted_index;
1707 vm_map_offset_t sort_min_address;
1708 unsigned int sort_min_index;
1709
1710 /*
1711 * Sort the mappings array, so that we can try and fit them in
1712 * in the right order as we progress along the VM maps.
1713 *
1714 * We can't modify the original array (the original order is
1715 * important when doing lookups of the mappings), so copy it first.
1716 */
1717
1718 kr = kmem_alloc(kernel_map,
1719 (vm_offset_t *) &mappings,
1720 (vm_size_t) (map_cnt * sizeof (mappings[0])));
1721 if (kr != KERN_SUCCESS) {
1722 return KERN_NO_SPACE;
1723 }
1724
1725 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
1726
1727 max_mapping_offset = 0;
1728 for (sorted_index = 0;
1729 sorted_index < map_cnt;
1730 sorted_index++) {
1731
1732 /* first remaining entry is our new starting point */
1733 sort_min_index = sorted_index;
1734 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
1735 SHARED_TEXT_REGION_MASK) +
1736 mappings[sort_min_index].sfm_size);
1737 sort_min_address = mapping_end_offset;
1738 /* compute the highest mapping_offset as well... */
1739 if (mapping_end_offset > max_mapping_offset) {
1740 max_mapping_offset = mapping_end_offset;
1741 }
1742 /* find the lowest mapping_offset in the remaining entries */
1743 for (sort_index = sorted_index + 1;
1744 sort_index < map_cnt;
1745 sort_index++) {
1746
1747 mapping_end_offset =
1748 ((mappings[sort_index].sfm_address &
1749 SHARED_TEXT_REGION_MASK) +
1750 mappings[sort_index].sfm_size);
1751
1752 if (mapping_end_offset < sort_min_address) {
1753 /* lowest mapping_offset so far... */
1754 sort_min_index = sort_index;
1755 sort_min_address = mapping_end_offset;
1756 }
1757 }
1758 if (sort_min_index != sorted_index) {
1759 /* swap entries */
1760 tmp_mapping = mappings[sort_min_index];
1761 mappings[sort_min_index] = mappings[sorted_index];
1762 mappings[sorted_index] = tmp_mapping;
1763 }
1764
1765 }
1766
1767 max_mapping_offset = vm_map_round_page(max_mapping_offset);
1768
1769 /* start from the end of the shared area */
1770 base_offset = sm_info->text_size;
1771
1772 /* can all the mappings fit ? */
1773 if (max_mapping_offset > base_offset) {
1774 kmem_free(kernel_map,
1775 (vm_offset_t) mappings,
1776 map_cnt * sizeof (mappings[0]));
1777 return KERN_FAILURE;
1778 }
1779
1780 /*
1781 * Align the last mapping to the end of the submaps
1782 * and start from there.
1783 */
1784 base_offset -= max_mapping_offset;
1785
1786 region_handle = (ipc_port_t) sm_info->text_region;
1787 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1788 text_map = region_entry->backing.map;
1789
1790 region_handle = (ipc_port_t) sm_info->data_region;
1791 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1792 data_map = region_entry->backing.map;
1793
1794 vm_map_lock_read(text_map);
1795 vm_map_lock_read(data_map);
1796
1797 start_over:
1798 /*
1799 * At first, we can wiggle all the way from our starting point
1800 * (base_offset) towards the start of the map (0), if needed.
1801 */
1802 wiggle_room = base_offset;
1803
1804 for (i = (signed) map_cnt - 1; i >= 0; i--) {
1805 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
1806 /* copy-on-write mappings are in the data submap */
1807 map = data_map;
1808 } else {
1809 /* other mappings are in the text submap */
1810 map = text_map;
1811 }
1812 /* get the offset within the appropriate submap */
1813 mapping_offset = (mappings[i].sfm_address &
1814 SHARED_TEXT_REGION_MASK);
1815 mapping_size = mappings[i].sfm_size;
1816 mapping_end_offset = mapping_offset + mapping_size;
1817 mapping_offset = vm_map_trunc_page(mapping_offset);
1818 mapping_end_offset = vm_map_round_page(mapping_end_offset);
1819 mapping_size = mapping_end_offset - mapping_offset;
1820
1821 for (;;) {
1822 if (vm_map_lookup_entry(map,
1823 base_offset + mapping_offset,
1824 &map_entry)) {
1825 /*
1826 * The start address for that mapping
1827 * is already mapped: no fit.
1828 * Locate the hole immediately before this map
1829 * entry.
1830 */
1831 prev_hole_end = map_entry->vme_start;
1832 prev_entry = map_entry->vme_prev;
1833 if (prev_entry == vm_map_to_entry(map)) {
1834 /* no previous entry */
1835 prev_hole_start = map->min_offset;
1836 } else {
1837 /* previous entry ends here */
1838 prev_hole_start = prev_entry->vme_end;
1839 }
1840 } else {
1841 /*
1842 * The start address for that mapping is not
1843 * mapped.
1844 * Locate the start and end of the hole
1845 * at that location.
1846 */
1847 /* map_entry is the previous entry */
1848 if (map_entry == vm_map_to_entry(map)) {
1849 /* no previous entry */
1850 prev_hole_start = map->min_offset;
1851 } else {
1852 /* previous entry ends there */
1853 prev_hole_start = map_entry->vme_end;
1854 }
1855 next_entry = map_entry->vme_next;
1856 if (next_entry == vm_map_to_entry(map)) {
1857 /* no next entry */
1858 prev_hole_end = map->max_offset;
1859 } else {
1860 prev_hole_end = next_entry->vme_start;
1861 }
1862 }
1863
1864 if (prev_hole_end <= base_offset + mapping_offset) {
1865 /* hole is to our left: try and wiggle to fit */
1866 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
1867 if (wiggle > base_offset) {
1868 /* we're getting out of the map */
1869 kr = KERN_FAILURE;
1870 goto done;
1871 }
1872 base_offset -= wiggle;
1873 if (wiggle > wiggle_room) {
1874 /* can't wiggle that much: start over */
1875 goto start_over;
1876 }
1877 /* account for the wiggling done */
1878 wiggle_room -= wiggle;
1879 }
1880
1881 if (prev_hole_end >
1882 base_offset + mapping_offset + mapping_size) {
1883 /*
1884 * The hole extends further to the right
1885 * than what we need. Ignore the extra space.
1886 */
1887 prev_hole_end = (base_offset + mapping_offset +
1888 mapping_size);
1889 }
1890
1891 if (prev_hole_end <
1892 base_offset + mapping_offset + mapping_size) {
1893 /*
1894 * The hole is not big enough to establish
1895 * the mapping right there: wiggle towards
1896 * the beginning of the hole so that the end
1897 * of our mapping fits in the hole...
1898 */
1899 wiggle = base_offset + mapping_offset
1900 + mapping_size - prev_hole_end;
1901 if (wiggle > base_offset) {
1902 /* we're getting out of the map */
1903 kr = KERN_FAILURE;
1904 goto done;
1905 }
1906 base_offset -= wiggle;
1907 if (wiggle > wiggle_room) {
1908 /* can't wiggle that much: start over */
1909 goto start_over;
1910 }
1911 /* account for the wiggling done */
1912 wiggle_room -= wiggle;
1913
1914 /* keep searching from this new base */
1915 continue;
1916 }
1917
1918 if (prev_hole_start > base_offset + mapping_offset) {
1919 /* no hole found: keep looking */
1920 continue;
1921 }
1922
1923 /* compute wiggling room at this hole */
1924 wiggle = base_offset + mapping_offset - prev_hole_start;
1925 if (wiggle < wiggle_room) {
1926 /* less wiggle room than before... */
1927 wiggle_room = wiggle;
1928 }
1929
1930 /* found a hole that fits: skip to next mapping */
1931 break;
1932 } /* while we look for a hole */
1933 } /* for each mapping */
1934
1935 *base_offset_p = base_offset;
1936 kr = KERN_SUCCESS;
1937
1938 done:
1939 vm_map_unlock_read(text_map);
1940 vm_map_unlock_read(data_map);
1941
1942 kmem_free(kernel_map,
1943 (vm_offset_t) mappings,
1944 map_cnt * sizeof (mappings[0]));
1945
1946 return kr;
1947 }
1948
1949 /*
1950 * lsf_map:
1951 *
1952 * Attempt to establish the mappings for a split library into the shared region.
1953 */
1954 static kern_return_t
1955 lsf_map(
1956 struct shared_file_mapping_np *mappings,
1957 int map_cnt,
1958 void *file_control,
1959 memory_object_offset_t file_size,
1960 shared_region_task_mappings_t sm_info,
1961 mach_vm_offset_t base_offset,
1962 mach_vm_offset_t *slide_p)
1963 {
1964 load_struct_t *entry;
1965 loaded_mapping_t *file_mapping;
1966 loaded_mapping_t **tptr;
1967 ipc_port_t region_handle;
1968 vm_named_entry_t region_entry;
1969 mach_port_t map_port;
1970 vm_object_t file_object;
1971 kern_return_t kr;
1972 int i;
1973 mach_vm_offset_t original_base_offset;
1974
1975 /* get the VM object from the file's memory object handle */
1976 file_object = memory_object_control_to_vm_object(file_control);
1977
1978 original_base_offset = base_offset;
1979
1980 LSF_DEBUG(("lsf_map"
1981 "(cnt=%d,file=%p,sm_info=%p)"
1982 "\n",
1983 map_cnt, file_object,
1984 sm_info));
1985
1986 restart_after_slide:
1987 /* get a new "load_struct_t" to described the mappings for that file */
1988 entry = (load_struct_t *)zalloc(lsf_zone);
1989 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
1990 LSF_DEBUG(("lsf_map"
1991 "(cnt=%d,file=%p,sm_info=%p) "
1992 "entry=%p\n",
1993 map_cnt, file_object,
1994 sm_info, entry));
1995 if (entry == NULL) {
1996 printf("lsf_map: unable to allocate memory\n");
1997 return KERN_NO_SPACE;
1998 }
1999 shared_file_available_hash_ele--;
2000 entry->file_object = (int)file_object;
2001 entry->mapping_cnt = map_cnt;
2002 entry->mappings = NULL;
2003 entry->links.prev = (queue_entry_t) 0;
2004 entry->links.next = (queue_entry_t) 0;
2005 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2006 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2007 entry->file_offset = mappings[0].sfm_file_offset;
2008
2009 /* insert the new file entry in the hash table, for later lookups */
2010 lsf_hash_insert(entry, sm_info);
2011
2012 /* where we should add the next mapping description for that file */
2013 tptr = &(entry->mappings);
2014
2015 entry->base_address = base_offset;
2016
2017
2018 /* establish each requested mapping */
2019 for (i = 0; i < map_cnt; i++) {
2020 mach_vm_offset_t target_address;
2021 mach_vm_offset_t region_mask;
2022
2023 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2024 region_handle = (ipc_port_t)sm_info->data_region;
2025 region_mask = SHARED_DATA_REGION_MASK;
2026 if ((((mappings[i].sfm_address + base_offset)
2027 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2028 (((mappings[i].sfm_address + base_offset +
2029 mappings[i].sfm_size - 1)
2030 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
2031 lsf_unload(file_object,
2032 entry->base_address, sm_info);
2033 return KERN_INVALID_ARGUMENT;
2034 }
2035 } else {
2036 region_mask = SHARED_TEXT_REGION_MASK;
2037 region_handle = (ipc_port_t)sm_info->text_region;
2038 if (((mappings[i].sfm_address + base_offset)
2039 & GLOBAL_SHARED_SEGMENT_MASK) ||
2040 ((mappings[i].sfm_address + base_offset +
2041 mappings[i].sfm_size - 1)
2042 & GLOBAL_SHARED_SEGMENT_MASK)) {
2043 lsf_unload(file_object,
2044 entry->base_address, sm_info);
2045 return KERN_INVALID_ARGUMENT;
2046 }
2047 }
2048 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2049 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2050 (file_size))) {
2051 lsf_unload(file_object, entry->base_address, sm_info);
2052 return KERN_INVALID_ARGUMENT;
2053 }
2054 target_address = entry->base_address +
2055 ((mappings[i].sfm_address) & region_mask);
2056 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2057 map_port = MACH_PORT_NULL;
2058 } else {
2059 map_port = (ipc_port_t) file_object->pager;
2060 }
2061 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2062
2063 if (mach_vm_map(region_entry->backing.map,
2064 &target_address,
2065 vm_map_round_page(mappings[i].sfm_size),
2066 0,
2067 VM_FLAGS_FIXED,
2068 map_port,
2069 mappings[i].sfm_file_offset,
2070 TRUE,
2071 (mappings[i].sfm_init_prot &
2072 (VM_PROT_READ|VM_PROT_EXECUTE)),
2073 (mappings[i].sfm_max_prot &
2074 (VM_PROT_READ|VM_PROT_EXECUTE)),
2075 VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
2076 lsf_unload(file_object, entry->base_address, sm_info);
2077
2078 if (slide_p != NULL) {
2079 /*
2080 * Requested mapping failed but the caller
2081 * is OK with sliding the library in the
2082 * shared region, so let's try and slide it...
2083 */
2084
2085 /* lookup an appropriate spot */
2086 kr = lsf_slide(map_cnt, mappings,
2087 sm_info, &base_offset);
2088 if (kr == KERN_SUCCESS) {
2089 /* try and map it there ... */
2090 entry->base_address = base_offset;
2091 goto restart_after_slide;
2092 }
2093 /* couldn't slide ... */
2094 }
2095
2096 return KERN_FAILURE;
2097 }
2098
2099 /* record this mapping */
2100 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2101 if (file_mapping == NULL) {
2102 lsf_unload(file_object, entry->base_address, sm_info);
2103 printf("lsf_map: unable to allocate memory\n");
2104 return KERN_NO_SPACE;
2105 }
2106 shared_file_available_hash_ele--;
2107 file_mapping->mapping_offset = (mappings[i].sfm_address)
2108 & region_mask;
2109 file_mapping->size = mappings[i].sfm_size;
2110 file_mapping->file_offset = mappings[i].sfm_file_offset;
2111 file_mapping->protection = mappings[i].sfm_init_prot;
2112 file_mapping->next = NULL;
2113 LSF_DEBUG(("lsf_map: file_mapping %p "
2114 "for offset=0x%x size=0x%x\n",
2115 file_mapping, file_mapping->mapping_offset,
2116 file_mapping->size));
2117
2118 /* and link it to the file entry */
2119 *tptr = file_mapping;
2120
2121 /* where to put the next mapping's description */
2122 tptr = &(file_mapping->next);
2123 }
2124
2125 if (slide_p != NULL) {
2126 *slide_p = base_offset - original_base_offset;
2127 }
2128
2129 if (sm_info->flags & SHARED_REGION_STANDALONE) {
2130 /*
2131 * We have a standalone and private shared region, so we
2132 * don't really need to keep the information about each file
2133 * and each mapping. Just deallocate it all.
2134 * XXX we still have the hash table, though...
2135 */
2136 lsf_deallocate(file_object, entry->base_address, sm_info,
2137 FALSE);
2138 }
2139
2140 LSF_DEBUG(("lsf_map: done\n"));
2141 return KERN_SUCCESS;
2142 }
2143
2144
2145 /* finds the file_object extent list in the shared memory hash table */
2146 /* If one is found the associated extents in shared memory are deallocated */
2147 /* and the extent list is freed */
2148
2149 static void
2150 lsf_unload(
2151 void *file_object,
2152 vm_offset_t base_offset,
2153 shared_region_task_mappings_t sm_info)
2154 {
2155 lsf_deallocate(file_object, base_offset, sm_info, TRUE);
2156 }
2157
2158 /*
2159 * lsf_deallocate:
2160 *
2161 * Deallocates all the "shared region" internal data structures describing
2162 * the file and its mappings.
2163 * Also deallocate the actual file mappings if requested ("unload" arg).
2164 */
2165 static void
2166 lsf_deallocate(
2167 void *file_object,
2168 vm_offset_t base_offset,
2169 shared_region_task_mappings_t sm_info,
2170 boolean_t unload)
2171 {
2172 load_struct_t *entry;
2173 loaded_mapping_t *map_ele;
2174 loaded_mapping_t *back_ptr;
2175
2176 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2177 file_object, base_offset, sm_info, unload));
2178 entry = lsf_hash_delete(file_object, base_offset, sm_info);
2179 if(entry) {
2180 map_ele = entry->mappings;
2181 while(map_ele != NULL) {
2182 if (unload) {
2183 ipc_port_t region_handle;
2184 vm_named_entry_t region_entry;
2185
2186 if(map_ele->protection & VM_PROT_COW) {
2187 region_handle = (ipc_port_t)
2188 sm_info->data_region;
2189 } else {
2190 region_handle = (ipc_port_t)
2191 sm_info->text_region;
2192 }
2193 region_entry = (vm_named_entry_t)
2194 region_handle->ip_kobject;
2195
2196 vm_deallocate(region_entry->backing.map,
2197 (entry->base_address +
2198 map_ele->mapping_offset),
2199 map_ele->size);
2200 }
2201 back_ptr = map_ele;
2202 map_ele = map_ele->next;
2203 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2204 "offset 0x%x size 0x%x\n",
2205 back_ptr, back_ptr->mapping_offset,
2206 back_ptr->size));
2207 zfree(lsf_zone, back_ptr);
2208 shared_file_available_hash_ele++;
2209 }
2210 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2211 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2212 zfree(lsf_zone, entry);
2213 shared_file_available_hash_ele++;
2214 }
2215 LSF_DEBUG(("lsf_unload: done\n"));
2216 }
2217
2218 /* integer is from 1 to 100 and represents percent full */
2219 unsigned int
2220 lsf_mapping_pool_gauge(void)
2221 {
2222 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2223 }