]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
dd5323fc70b4a7bf8f9fe945610083cc8672b3df
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 *
30 * File: vm/vm_shared_memory_server.c
31 * Author: Chris Youngworth
32 *
33 * Support routines for an in-kernel shared memory allocator
34 */
35
36 #include <debug.h>
37
38 #include <mach/mach_types.h>
39 #include <mach/kern_return.h>
40 #include <mach/vm_inherit.h>
41 #include <mach/vm_map.h>
42 #include <machine/cpu_capabilities.h>
43
44 #include <kern/kern_types.h>
45 #include <kern/ipc_kobject.h>
46 #include <kern/thread.h>
47 #include <kern/zalloc.h>
48 #include <kern/kalloc.h>
49
50 #include <ipc/ipc_types.h>
51 #include <ipc/ipc_port.h>
52
53 #include <vm/vm_kern.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56
57 #include <mach/mach_vm.h>
58 #include <mach/shared_memory_server.h>
59 #include <vm/vm_shared_memory_server.h>
60
61 #if DEBUG
62 int lsf_debug = 0;
63 int lsf_alloc_debug = 0;
64 #define LSF_DEBUG(args) \
65 MACRO_BEGIN \
66 if (lsf_debug) { \
67 kprintf args; \
68 } \
69 MACRO_END
70 #define LSF_ALLOC_DEBUG(args) \
71 MACRO_BEGIN \
72 if (lsf_alloc_debug) { \
73 kprintf args; \
74 } \
75 MACRO_END
76 #else /* DEBUG */
77 #define LSF_DEBUG(args)
78 #define LSF_ALLOC_DEBUG(args)
79 #endif /* DEBUG */
80
81 /* forward declarations */
82 static kern_return_t
83 shared_region_object_create(
84 vm_size_t size,
85 ipc_port_t *object_handle);
86
87 static kern_return_t
88 shared_region_mapping_dealloc_lock(
89 shared_region_mapping_t shared_region,
90 int need_sfh_lock,
91 int need_drl_lock);
92
93
94 static kern_return_t
95 shared_file_init(
96 ipc_port_t *text_region_handle,
97 vm_size_t text_region_size,
98 ipc_port_t *data_region_handle,
99 vm_size_t data_region_size,
100 vm_offset_t *file_mapping_array);
101
102 static kern_return_t
103 shared_file_header_init(
104 shared_file_info_t *shared_file_header);
105
106 static load_struct_t *
107 lsf_hash_lookup(
108 queue_head_t *hash_table,
109 void *file_object,
110 vm_offset_t recognizableOffset,
111 int size,
112 boolean_t regular,
113 boolean_t alternate,
114 shared_region_task_mappings_t sm_info);
115
116 static load_struct_t *
117 lsf_hash_delete(
118 void *file_object,
119 vm_offset_t base_offset,
120 shared_region_task_mappings_t sm_info);
121
122 static void
123 lsf_hash_insert(
124 load_struct_t *entry,
125 shared_region_task_mappings_t sm_info);
126
127 static kern_return_t
128 lsf_slide(
129 unsigned int map_cnt,
130 struct shared_file_mapping_np *mappings,
131 shared_region_task_mappings_t sm_info,
132 mach_vm_offset_t *base_offset_p);
133
134 static kern_return_t
135 lsf_map(
136 struct shared_file_mapping_np *mappings,
137 int map_cnt,
138 void *file_control,
139 memory_object_size_t file_size,
140 shared_region_task_mappings_t sm_info,
141 mach_vm_offset_t base_offset,
142 mach_vm_offset_t *slide_p);
143
144 static void
145 lsf_unload(
146 void *file_object,
147 vm_offset_t base_offset,
148 shared_region_task_mappings_t sm_info);
149
150 static void
151 lsf_deallocate(
152 void *file_object,
153 vm_offset_t base_offset,
154 shared_region_task_mappings_t sm_info,
155 boolean_t unload);
156
157
158 #define load_file_hash(file_object, size) \
159 ((((natural_t)file_object) & 0xffffff) % size)
160
161 /* Implementation */
162 vm_offset_t shared_file_text_region;
163 vm_offset_t shared_file_data_region;
164
165 ipc_port_t shared_text_region_handle;
166 ipc_port_t shared_data_region_handle;
167 vm_offset_t shared_file_mapping_array = 0;
168
169 shared_region_mapping_t default_environment_shared_regions = NULL;
170 static decl_mutex_data(,default_regions_list_lock_data)
171
172 #define default_regions_list_lock() \
173 mutex_lock(&default_regions_list_lock_data)
174 #define default_regions_list_lock_try() \
175 mutex_try(&default_regions_list_lock_data)
176 #define default_regions_list_unlock() \
177 mutex_unlock(&default_regions_list_lock_data)
178
179
180 ipc_port_t sfma_handle = NULL;
181 zone_t lsf_zone;
182
183 int shared_file_available_hash_ele;
184
185 /* com region support */
186 ipc_port_t com_region_handle32 = NULL;
187 ipc_port_t com_region_handle64 = NULL;
188 vm_map_t com_region_map32 = NULL;
189 vm_map_t com_region_map64 = NULL;
190 vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
191 shared_region_mapping_t com_mapping_resource = NULL;
192
193
194 #if DEBUG
195 int shared_region_debug = 0;
196 #endif /* DEBUG */
197
198
199 kern_return_t
200 vm_get_shared_region(
201 task_t task,
202 shared_region_mapping_t *shared_region)
203 {
204 *shared_region = (shared_region_mapping_t) task->system_shared_region;
205 if (*shared_region) {
206 assert((*shared_region)->ref_count > 0);
207 }
208 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
209 task, *shared_region));
210 return KERN_SUCCESS;
211 }
212
213 kern_return_t
214 vm_set_shared_region(
215 task_t task,
216 shared_region_mapping_t shared_region)
217 {
218 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
219 "shared_region=%p)\n",
220 task, shared_region));
221 if (shared_region) {
222 assert(shared_region->ref_count > 0);
223 }
224 task->system_shared_region = shared_region;
225 return KERN_SUCCESS;
226 }
227
228 /*
229 * shared_region_object_chain_detach:
230 *
231 * Mark the shared region as being detached or standalone. This means
232 * that we won't keep track of which file is mapped and how, for this shared
233 * region. And we don't have a "shadow" shared region.
234 * This is used when we clone a private shared region and we intend to remove
235 * some mappings from it. It won't need to maintain mappings info because it's
236 * now private. It can't have a "shadow" shared region because we don't want
237 * to see the shadow of the mappings we're about to remove.
238 */
239 void
240 shared_region_object_chain_detached(
241 shared_region_mapping_t target_region)
242 {
243 shared_region_mapping_lock(target_region);
244 target_region->flags |= SHARED_REGION_STANDALONE;
245 shared_region_mapping_unlock(target_region);
246 }
247
248 /*
249 * shared_region_object_chain_attach:
250 *
251 * Link "target_region" to "object_chain_region". "object_chain_region"
252 * is treated as a shadow of "target_region" for the purpose of looking up
253 * mappings. Since the "target_region" preserves all the mappings of the
254 * older "object_chain_region", we won't duplicate all the mappings info and
255 * we'll just lookup the next region in the "object_chain" if we can't find
256 * what we're looking for in the "target_region". See lsf_hash_lookup().
257 */
258 kern_return_t
259 shared_region_object_chain_attach(
260 shared_region_mapping_t target_region,
261 shared_region_mapping_t object_chain_region)
262 {
263 shared_region_object_chain_t object_ele;
264
265 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
266 "target_region=%p, object_chain_region=%p\n",
267 target_region, object_chain_region));
268 assert(target_region->ref_count > 0);
269 assert(object_chain_region->ref_count > 0);
270 if(target_region->object_chain)
271 return KERN_FAILURE;
272 object_ele = (shared_region_object_chain_t)
273 kalloc(sizeof (struct shared_region_object_chain));
274 shared_region_mapping_lock(object_chain_region);
275 target_region->object_chain = object_ele;
276 object_ele->object_chain_region = object_chain_region;
277 object_ele->next = object_chain_region->object_chain;
278 object_ele->depth = object_chain_region->depth;
279 object_chain_region->depth++;
280 target_region->alternate_next = object_chain_region->alternate_next;
281 shared_region_mapping_unlock(object_chain_region);
282 return KERN_SUCCESS;
283 }
284
285 /* LP64todo - need 64-bit safe version */
286 kern_return_t
287 shared_region_mapping_create(
288 ipc_port_t text_region,
289 vm_size_t text_size,
290 ipc_port_t data_region,
291 vm_size_t data_size,
292 vm_offset_t region_mappings,
293 vm_offset_t client_base,
294 shared_region_mapping_t *shared_region,
295 vm_offset_t alt_base,
296 vm_offset_t alt_next)
297 {
298 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
299 *shared_region = (shared_region_mapping_t)
300 kalloc(sizeof (struct shared_region_mapping));
301 if(*shared_region == NULL) {
302 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
303 "failure\n"));
304 return KERN_FAILURE;
305 }
306 shared_region_mapping_lock_init((*shared_region));
307 (*shared_region)->text_region = text_region;
308 (*shared_region)->text_size = text_size;
309 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
310 (*shared_region)->system = cpu_type();
311 (*shared_region)->data_region = data_region;
312 (*shared_region)->data_size = data_size;
313 (*shared_region)->region_mappings = region_mappings;
314 (*shared_region)->client_base = client_base;
315 (*shared_region)->ref_count = 1;
316 (*shared_region)->next = NULL;
317 (*shared_region)->object_chain = NULL;
318 (*shared_region)->self = *shared_region;
319 (*shared_region)->flags = 0;
320 (*shared_region)->depth = 0;
321 (*shared_region)->default_env_list = NULL;
322 (*shared_region)->alternate_base = alt_base;
323 (*shared_region)->alternate_next = alt_next;
324 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
325 *shared_region));
326 return KERN_SUCCESS;
327 }
328
329 /* LP64todo - need 64-bit safe version */
330 kern_return_t
331 shared_region_mapping_info(
332 shared_region_mapping_t shared_region,
333 ipc_port_t *text_region,
334 vm_size_t *text_size,
335 ipc_port_t *data_region,
336 vm_size_t *data_size,
337 vm_offset_t *region_mappings,
338 vm_offset_t *client_base,
339 vm_offset_t *alt_base,
340 vm_offset_t *alt_next,
341 unsigned int *fs_base,
342 unsigned int *system,
343 int *flags,
344 shared_region_mapping_t *next)
345 {
346 shared_region_mapping_lock(shared_region);
347
348 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
349 shared_region));
350 assert(shared_region->ref_count > 0);
351 *text_region = shared_region->text_region;
352 *text_size = shared_region->text_size;
353 *data_region = shared_region->data_region;
354 *data_size = shared_region->data_size;
355 *region_mappings = shared_region->region_mappings;
356 *client_base = shared_region->client_base;
357 *alt_base = shared_region->alternate_base;
358 *alt_next = shared_region->alternate_next;
359 *flags = shared_region->flags;
360 *fs_base = shared_region->fs_base;
361 *system = shared_region->system;
362 *next = shared_region->next;
363
364 shared_region_mapping_unlock(shared_region);
365 }
366
367 kern_return_t
368 shared_region_mapping_ref(
369 shared_region_mapping_t shared_region)
370 {
371 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
372 "ref_count=%d + 1\n",
373 shared_region,
374 shared_region ? shared_region->ref_count : 0));
375 if(shared_region == NULL)
376 return KERN_SUCCESS;
377 assert(shared_region->ref_count > 0);
378 hw_atomic_add(&shared_region->ref_count, 1);
379 return KERN_SUCCESS;
380 }
381
382 static kern_return_t
383 shared_region_mapping_dealloc_lock(
384 shared_region_mapping_t shared_region,
385 int need_sfh_lock,
386 int need_drl_lock)
387 {
388 struct shared_region_task_mappings sm_info;
389 shared_region_mapping_t next = NULL;
390 int ref_count;
391
392 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
393 "(shared_region=%p,%d,%d) ref_count=%d\n",
394 shared_region, need_sfh_lock, need_drl_lock,
395 shared_region ? shared_region->ref_count : 0));
396 while (shared_region) {
397 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
398 "ref_count=%d\n",
399 shared_region, shared_region->ref_count));
400 assert(shared_region->ref_count > 0);
401 if ((ref_count =
402 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
403 shared_region_mapping_lock(shared_region);
404
405 sm_info.text_region = shared_region->text_region;
406 sm_info.text_size = shared_region->text_size;
407 sm_info.data_region = shared_region->data_region;
408 sm_info.data_size = shared_region->data_size;
409 sm_info.region_mappings = shared_region->region_mappings;
410 sm_info.client_base = shared_region->client_base;
411 sm_info.alternate_base = shared_region->alternate_base;
412 sm_info.alternate_next = shared_region->alternate_next;
413 sm_info.flags = shared_region->flags;
414 sm_info.self = (vm_offset_t)shared_region;
415
416 if(shared_region->region_mappings) {
417 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
418 }
419 if(((vm_named_entry_t)
420 (shared_region->text_region->ip_kobject))
421 ->backing.map->pmap) {
422 pmap_remove(((vm_named_entry_t)
423 (shared_region->text_region->ip_kobject))
424 ->backing.map->pmap,
425 sm_info.client_base,
426 sm_info.client_base + sm_info.text_size);
427 }
428 ipc_port_release_send(shared_region->text_region);
429 if(shared_region->data_region)
430 ipc_port_release_send(shared_region->data_region);
431 if (shared_region->object_chain) {
432 next = shared_region->object_chain->object_chain_region;
433 kfree(shared_region->object_chain,
434 sizeof (struct shared_region_object_chain));
435 } else {
436 next = NULL;
437 }
438 shared_region_mapping_unlock(shared_region);
439 SHARED_REGION_DEBUG(
440 ("shared_region_mapping_dealloc_lock(%p): "
441 "freeing\n",
442 shared_region));
443 bzero((void *)shared_region,
444 sizeof (*shared_region)); /* FBDP debug */
445 kfree(shared_region,
446 sizeof (struct shared_region_mapping));
447 shared_region = next;
448 } else {
449 /* Stale indicates that a system region is no */
450 /* longer in the default environment list. */
451 if((ref_count == 1) &&
452 (shared_region->flags & SHARED_REGION_SYSTEM)
453 && !(shared_region->flags & SHARED_REGION_STALE)) {
454 SHARED_REGION_DEBUG(
455 ("shared_region_mapping_dealloc_lock"
456 "(%p): removing stale\n",
457 shared_region));
458 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
459 }
460 break;
461 }
462 }
463 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
464 shared_region));
465 return KERN_SUCCESS;
466 }
467
468 /*
469 * Stub function; always indicates that the lock needs to be taken in the
470 * call to lsf_remove_regions_mappings_lock().
471 */
472 kern_return_t
473 shared_region_mapping_dealloc(
474 shared_region_mapping_t shared_region)
475 {
476 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
477 "(shared_region=%p)\n",
478 shared_region));
479 if (shared_region) {
480 assert(shared_region->ref_count > 0);
481 }
482 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
483 }
484
485 static
486 kern_return_t
487 shared_region_object_create(
488 vm_size_t size,
489 ipc_port_t *object_handle)
490 {
491 vm_named_entry_t user_entry;
492 ipc_port_t user_handle;
493
494 ipc_port_t previous;
495 vm_map_t new_map;
496
497 user_entry = (vm_named_entry_t)
498 kalloc(sizeof (struct vm_named_entry));
499 if(user_entry == NULL) {
500 return KERN_FAILURE;
501 }
502 named_entry_lock_init(user_entry);
503 user_handle = ipc_port_alloc_kernel();
504
505
506 ip_lock(user_handle);
507
508 /* make a sonce right */
509 user_handle->ip_sorights++;
510 ip_reference(user_handle);
511
512 user_handle->ip_destination = IP_NULL;
513 user_handle->ip_receiver_name = MACH_PORT_NULL;
514 user_handle->ip_receiver = ipc_space_kernel;
515
516 /* make a send right */
517 user_handle->ip_mscount++;
518 user_handle->ip_srights++;
519 ip_reference(user_handle);
520
521 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
522 /* nsrequest unlocks user_handle */
523
524 /* Create a named object based on a submap of specified size */
525
526 new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
527 user_entry->backing.map = new_map;
528 user_entry->internal = TRUE;
529 user_entry->is_sub_map = TRUE;
530 user_entry->is_pager = FALSE;
531 user_entry->offset = 0;
532 user_entry->protection = VM_PROT_ALL;
533 user_entry->size = size;
534 user_entry->ref_count = 1;
535
536 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
537 IKOT_NAMED_ENTRY);
538 *object_handle = user_handle;
539 return KERN_SUCCESS;
540 }
541
542 /* called for the non-default, private branch shared region support */
543 /* system default fields for fs_base and system supported are not */
544 /* relevant as the system default flag is not set */
545 kern_return_t
546 shared_file_create_system_region(
547 shared_region_mapping_t *shared_region)
548 {
549 ipc_port_t text_handle;
550 ipc_port_t data_handle;
551 long text_size;
552 long data_size;
553 vm_offset_t mapping_array;
554 kern_return_t kret;
555
556 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
557
558 text_size = 0x10000000;
559 data_size = 0x10000000;
560
561 kret = shared_file_init(&text_handle,
562 text_size, &data_handle, data_size, &mapping_array);
563 if(kret) {
564 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
565 "shared_file_init failed kret=0x%x\n",
566 kret));
567 return kret;
568 }
569 kret = shared_region_mapping_create(text_handle,
570 text_size, data_handle, data_size, mapping_array,
571 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
572 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
573 if(kret) {
574 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
575 "shared_region_mapping_create failed "
576 "kret=0x%x\n",
577 kret));
578 return kret;
579 }
580 (*shared_region)->flags = 0;
581 if(com_mapping_resource) {
582 shared_region_mapping_ref(com_mapping_resource);
583 (*shared_region)->next = com_mapping_resource;
584 }
585
586 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
587 "-> shared_region=%p\n",
588 *shared_region));
589 return KERN_SUCCESS;
590 }
591
592 /*
593 * load a new default for a specified environment into the default share
594 * regions list. If a previous default exists for the envrionment specification
595 * it is returned along with its reference. It is expected that the new
596 * sytem region structure passes a reference.
597 */
598
599 shared_region_mapping_t
600 update_default_shared_region(
601 shared_region_mapping_t new_system_region)
602 {
603 shared_region_mapping_t old_system_region;
604 unsigned int fs_base;
605 unsigned int system;
606
607 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
608 new_system_region));
609 assert(new_system_region->ref_count > 0);
610 fs_base = new_system_region->fs_base;
611 system = new_system_region->system;
612 new_system_region->flags |= SHARED_REGION_SYSTEM;
613 default_regions_list_lock();
614 old_system_region = default_environment_shared_regions;
615
616 if((old_system_region != NULL) &&
617 (old_system_region->fs_base == fs_base) &&
618 (old_system_region->system == system)) {
619 new_system_region->default_env_list =
620 old_system_region->default_env_list;
621 old_system_region->default_env_list = NULL;
622 default_environment_shared_regions = new_system_region;
623 old_system_region->flags |= SHARED_REGION_STALE;
624 default_regions_list_unlock();
625 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
626 "old=%p stale 1\n",
627 new_system_region, old_system_region));
628 assert(old_system_region->ref_count > 0);
629 return old_system_region;
630 }
631 if (old_system_region) {
632 while(old_system_region->default_env_list != NULL) {
633 if((old_system_region->default_env_list->fs_base == fs_base) &&
634 (old_system_region->default_env_list->system == system)) {
635 shared_region_mapping_t tmp_system_region;
636
637 tmp_system_region =
638 old_system_region->default_env_list;
639 new_system_region->default_env_list =
640 tmp_system_region->default_env_list;
641 tmp_system_region->default_env_list = NULL;
642 old_system_region->default_env_list =
643 new_system_region;
644 old_system_region = tmp_system_region;
645 old_system_region->flags |= SHARED_REGION_STALE;
646 default_regions_list_unlock();
647 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
648 ": old=%p stale 2\n",
649 new_system_region,
650 old_system_region));
651 assert(old_system_region->ref_count > 0);
652 return old_system_region;
653 }
654 old_system_region = old_system_region->default_env_list;
655 }
656 }
657 /* If we get here, we are at the end of the system list and we */
658 /* did not find a pre-existing entry */
659 if(old_system_region) {
660 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
661 "adding after old=%p\n",
662 new_system_region, old_system_region));
663 assert(old_system_region->ref_count > 0);
664 old_system_region->default_env_list = new_system_region;
665 } else {
666 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
667 "new default\n",
668 new_system_region));
669 default_environment_shared_regions = new_system_region;
670 }
671 assert(new_system_region->ref_count > 0);
672 default_regions_list_unlock();
673 return NULL;
674 }
675
676 /*
677 * lookup a system_shared_region for the environment specified. If one is
678 * found, it is returned along with a reference against the structure
679 */
680
681 shared_region_mapping_t
682 lookup_default_shared_region(
683 unsigned int fs_base,
684 unsigned int system)
685 {
686 shared_region_mapping_t system_region;
687 default_regions_list_lock();
688 system_region = default_environment_shared_regions;
689
690 SHARED_REGION_DEBUG(("lookup_default_shared_region"
691 "(base=0x%x, system=0x%x)\n",
692 fs_base, system));
693 while(system_region != NULL) {
694 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
695 ": system_region=%p base=0x%x system=0x%x"
696 " ref_count=%d\n",
697 fs_base, system, system_region,
698 system_region->fs_base,
699 system_region->system,
700 system_region->ref_count));
701 assert(system_region->ref_count > 0);
702 if((system_region->fs_base == fs_base) &&
703 (system_region->system == system)) {
704 break;
705 }
706 system_region = system_region->default_env_list;
707 }
708 if(system_region)
709 shared_region_mapping_ref(system_region);
710 default_regions_list_unlock();
711 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
712 system_region));
713 return system_region;
714 }
715
716 /*
717 * remove a system_region default if it appears in the default regions list.
718 * Drop a reference on removal.
719 */
720
721 __private_extern__ void
722 remove_default_shared_region_lock(
723 shared_region_mapping_t system_region,
724 int need_sfh_lock,
725 int need_drl_lock)
726 {
727 shared_region_mapping_t old_system_region;
728
729 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
730 "(system_region=%p, %d, %d)\n",
731 system_region, need_sfh_lock, need_drl_lock));
732 if (need_drl_lock) {
733 default_regions_list_lock();
734 }
735 old_system_region = default_environment_shared_regions;
736
737 if(old_system_region == NULL) {
738 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
739 "-> default_env=NULL\n",
740 system_region));
741 if (need_drl_lock) {
742 default_regions_list_unlock();
743 }
744 return;
745 }
746
747 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
748 "default_env=%p\n",
749 system_region, old_system_region));
750 assert(old_system_region->ref_count > 0);
751 if (old_system_region == system_region) {
752 default_environment_shared_regions
753 = old_system_region->default_env_list;
754 old_system_region->default_env_list = NULL;
755 old_system_region->flags |= SHARED_REGION_STALE;
756 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
757 "old=%p ref_count=%d STALE\n",
758 system_region, old_system_region,
759 old_system_region->ref_count));
760 shared_region_mapping_dealloc_lock(old_system_region,
761 need_sfh_lock,
762 0);
763 if (need_drl_lock) {
764 default_regions_list_unlock();
765 }
766 return;
767 }
768
769 while(old_system_region->default_env_list != NULL) {
770 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
771 "old=%p->default_env=%p\n",
772 system_region, old_system_region,
773 old_system_region->default_env_list));
774 assert(old_system_region->default_env_list->ref_count > 0);
775 if(old_system_region->default_env_list == system_region) {
776 shared_region_mapping_t dead_region;
777 dead_region = old_system_region->default_env_list;
778 old_system_region->default_env_list =
779 dead_region->default_env_list;
780 dead_region->default_env_list = NULL;
781 dead_region->flags |= SHARED_REGION_STALE;
782 SHARED_REGION_DEBUG(
783 ("remove_default_shared_region_lock(%p): "
784 "dead=%p ref_count=%d stale\n",
785 system_region, dead_region,
786 dead_region->ref_count));
787 shared_region_mapping_dealloc_lock(dead_region,
788 need_sfh_lock,
789 0);
790 if (need_drl_lock) {
791 default_regions_list_unlock();
792 }
793 return;
794 }
795 old_system_region = old_system_region->default_env_list;
796 }
797 if (need_drl_lock) {
798 default_regions_list_unlock();
799 }
800 }
801
802 /*
803 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
804 * the only caller. Remove this stub function and the corresponding symbol
805 * export for Merlot.
806 */
807 void
808 remove_default_shared_region(
809 shared_region_mapping_t system_region)
810 {
811 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
812 system_region));
813 if (system_region) {
814 assert(system_region->ref_count > 0);
815 }
816 remove_default_shared_region_lock(system_region, 1, 1);
817 }
818
819 void
820 remove_all_shared_regions(void)
821 {
822 shared_region_mapping_t system_region;
823 shared_region_mapping_t next_system_region;
824
825 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
826 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
827 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
828 default_regions_list_lock();
829 system_region = default_environment_shared_regions;
830
831 if(system_region == NULL) {
832 default_regions_list_unlock();
833 return;
834 }
835
836 while(system_region != NULL) {
837 next_system_region = system_region->default_env_list;
838 system_region->default_env_list = NULL;
839 system_region->flags |= SHARED_REGION_STALE;
840 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
841 "%p ref_count=%d stale\n",
842 system_region, system_region->ref_count));
843 assert(system_region->ref_count > 0);
844 shared_region_mapping_dealloc_lock(system_region, 1, 0);
845 system_region = next_system_region;
846 }
847 default_environment_shared_regions = NULL;
848 default_regions_list_unlock();
849 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
850 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
851 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
852 }
853
854 /* shared_com_boot_time_init initializes the common page shared data and */
855 /* text region. This region is semi independent of the split libs */
856 /* and so its policies have to be handled differently by the code that */
857 /* manipulates the mapping of shared region environments. However, */
858 /* the shared region delivery system supports both */
859 void shared_com_boot_time_init(void); /* forward */
860 void
861 shared_com_boot_time_init(void)
862 {
863 kern_return_t kret;
864 vm_named_entry_t named_entry;
865
866 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
867 if(com_region_handle32) {
868 panic("shared_com_boot_time_init: "
869 "com_region_handle32 already set\n");
870 }
871 if(com_region_handle64) {
872 panic("shared_com_boot_time_init: "
873 "com_region_handle64 already set\n");
874 }
875
876 /* create com page regions, 1 each for 32 and 64-bit code */
877 if((kret = shared_region_object_create(
878 com_region_size,
879 &com_region_handle32))) {
880 panic("shared_com_boot_time_init: "
881 "unable to create 32-bit comm page\n");
882 return;
883 }
884 if((kret = shared_region_object_create(
885 com_region_size,
886 &com_region_handle64))) {
887 panic("shared_com_boot_time_init: "
888 "unable to create 64-bit comm page\n");
889 return;
890 }
891
892 /* now set export the underlying region/map */
893 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
894 com_region_map32 = named_entry->backing.map;
895 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
896 com_region_map64 = named_entry->backing.map;
897
898 /* wrap the com region in its own shared file mapping structure */
899 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
900 kret = shared_region_mapping_create(com_region_handle32,
901 com_region_size, NULL, 0, 0,
902 _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
903 0, 0);
904 if (kret) {
905 panic("shared_region_mapping_create failed for commpage");
906 }
907 }
908
909 void
910 shared_file_boot_time_init(
911 unsigned int fs_base,
912 unsigned int system)
913 {
914 long text_region_size;
915 long data_region_size;
916 shared_region_mapping_t new_system_region;
917 shared_region_mapping_t old_default_env;
918
919 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
920 "(base=0x%x,system=0x%x)\n",
921 fs_base, system));
922 text_region_size = 0x10000000;
923 data_region_size = 0x10000000;
924 shared_file_init(&shared_text_region_handle,
925 text_region_size,
926 &shared_data_region_handle,
927 data_region_size,
928 &shared_file_mapping_array);
929
930 shared_region_mapping_create(shared_text_region_handle,
931 text_region_size,
932 shared_data_region_handle,
933 data_region_size,
934 shared_file_mapping_array,
935 GLOBAL_SHARED_TEXT_SEGMENT,
936 &new_system_region,
937 SHARED_ALTERNATE_LOAD_BASE,
938 SHARED_ALTERNATE_LOAD_BASE);
939
940 new_system_region->fs_base = fs_base;
941 new_system_region->system = system;
942 new_system_region->flags = SHARED_REGION_SYSTEM;
943
944 /* grab an extra reference for the caller */
945 /* remember to grab before call to update */
946 shared_region_mapping_ref(new_system_region);
947 old_default_env = update_default_shared_region(new_system_region);
948 /* hold an extra reference because these are the system */
949 /* shared regions. */
950 if(old_default_env)
951 shared_region_mapping_dealloc(old_default_env);
952 if(com_mapping_resource == NULL) {
953 shared_com_boot_time_init();
954 }
955 shared_region_mapping_ref(com_mapping_resource);
956 new_system_region->next = com_mapping_resource;
957 vm_set_shared_region(current_task(), new_system_region);
958 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
959 fs_base, system));
960 }
961
962
963 /* called at boot time, allocates two regions, each 256 megs in size */
964 /* these regions are later mapped into task spaces, allowing them to */
965 /* share the contents of the regions. shared_file_init is part of */
966 /* a shared_memory_server which not only allocates the backing maps */
967 /* but also coordinates requests for space. */
968
969
970 static kern_return_t
971 shared_file_init(
972 ipc_port_t *text_region_handle,
973 vm_size_t text_region_size,
974 ipc_port_t *data_region_handle,
975 vm_size_t data_region_size,
976 vm_offset_t *file_mapping_array)
977 {
978 shared_file_info_t *sf_head;
979 vm_offset_t table_mapping_address;
980 int data_table_size;
981 int hash_size;
982 kern_return_t kret;
983
984 vm_object_t buf_object;
985 vm_map_entry_t entry;
986 vm_size_t alloced;
987 vm_offset_t b;
988 vm_page_t p;
989
990 SHARED_REGION_DEBUG(("shared_file_init()\n"));
991 /* create text and data maps/regions */
992 kret = shared_region_object_create(
993 text_region_size,
994 text_region_handle);
995 if (kret) {
996 return kret;
997 }
998 kret = shared_region_object_create(
999 data_region_size,
1000 data_region_handle);
1001 if (kret) {
1002 ipc_port_release_send(*text_region_handle);
1003 return kret;
1004 }
1005
1006 data_table_size = data_region_size >> 9;
1007 hash_size = data_region_size >> 14;
1008 table_mapping_address = data_region_size - data_table_size;
1009
1010 if(shared_file_mapping_array == 0) {
1011 vm_map_address_t map_addr;
1012 buf_object = vm_object_allocate(data_table_size);
1013
1014 if(vm_map_find_space(kernel_map, &map_addr,
1015 data_table_size, 0, &entry)
1016 != KERN_SUCCESS) {
1017 panic("shared_file_init: no space");
1018 }
1019 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1020 *file_mapping_array = shared_file_mapping_array;
1021 vm_map_unlock(kernel_map);
1022 entry->object.vm_object = buf_object;
1023 entry->offset = 0;
1024
1025 for (b = *file_mapping_array, alloced = 0;
1026 alloced < (hash_size +
1027 round_page(sizeof(struct sf_mapping)));
1028 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1029 vm_object_lock(buf_object);
1030 p = vm_page_alloc(buf_object, alloced);
1031 if (p == VM_PAGE_NULL) {
1032 panic("shared_file_init: no space");
1033 }
1034 p->busy = FALSE;
1035 vm_object_unlock(buf_object);
1036 pmap_enter(kernel_pmap, b, p->phys_page,
1037 VM_PROT_READ | VM_PROT_WRITE,
1038 ((unsigned int)(p->object->wimg_bits))
1039 & VM_WIMG_MASK,
1040 TRUE);
1041 }
1042
1043
1044 /* initialize loaded file array */
1045 sf_head = (shared_file_info_t *)*file_mapping_array;
1046 sf_head->hash = (queue_head_t *)
1047 (((int)*file_mapping_array) +
1048 sizeof(struct shared_file_info));
1049 sf_head->hash_size = hash_size/sizeof(queue_head_t);
1050 mutex_init(&(sf_head->lock), 0);
1051 sf_head->hash_init = FALSE;
1052
1053
1054 mach_make_memory_entry(kernel_map, &data_table_size,
1055 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1056 NULL);
1057
1058 if (vm_map_wire(kernel_map,
1059 vm_map_trunc_page(*file_mapping_array),
1060 vm_map_round_page(*file_mapping_array +
1061 hash_size +
1062 round_page(sizeof(struct sf_mapping))),
1063 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1064 panic("shared_file_init: No memory for data table");
1065 }
1066
1067 lsf_zone = zinit(sizeof(struct load_file_ele),
1068 data_table_size -
1069 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1070 0, "load_file_server");
1071
1072 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1073 zone_change(lsf_zone, Z_COLLECT, FALSE);
1074 zone_change(lsf_zone, Z_EXPAND, FALSE);
1075 zone_change(lsf_zone, Z_FOREIGN, TRUE);
1076
1077 /* initialize the global default environment lock */
1078 mutex_init(&default_regions_list_lock_data, 0);
1079
1080 } else {
1081 *file_mapping_array = shared_file_mapping_array;
1082 }
1083
1084 kret = vm_map(((vm_named_entry_t)
1085 (*data_region_handle)->ip_kobject)->backing.map,
1086 &table_mapping_address,
1087 data_table_size, 0,
1088 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1089 sfma_handle, 0, FALSE,
1090 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1091
1092 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1093 return kret;
1094 }
1095
1096 static kern_return_t
1097 shared_file_header_init(
1098 shared_file_info_t *shared_file_header)
1099 {
1100 vm_size_t hash_table_size;
1101 vm_size_t hash_table_offset;
1102 int i;
1103 /* wire hash entry pool only as needed, since we are the only */
1104 /* users, we take a few liberties with the population of our */
1105 /* zone. */
1106 static int allocable_hash_pages;
1107 static vm_offset_t hash_cram_address;
1108
1109
1110 hash_table_size = shared_file_header->hash_size
1111 * sizeof (struct queue_entry);
1112 hash_table_offset = hash_table_size +
1113 round_page(sizeof (struct sf_mapping));
1114 for (i = 0; i < shared_file_header->hash_size; i++)
1115 queue_init(&shared_file_header->hash[i]);
1116
1117 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1118 / PAGE_SIZE);
1119 hash_cram_address = ((vm_offset_t) shared_file_header)
1120 + hash_table_offset;
1121 shared_file_available_hash_ele = 0;
1122
1123 shared_file_header->hash_init = TRUE;
1124
1125 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1126 int cram_pages, cram_size;
1127
1128 cram_pages = allocable_hash_pages > 3 ?
1129 3 : allocable_hash_pages;
1130 cram_size = cram_pages * PAGE_SIZE;
1131 if (vm_map_wire(kernel_map, hash_cram_address,
1132 hash_cram_address + cram_size,
1133 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1134 printf("shared_file_header_init: "
1135 "No memory for data table\n");
1136 return KERN_NO_SPACE;
1137 }
1138 allocable_hash_pages -= cram_pages;
1139 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1140 shared_file_available_hash_ele
1141 += cram_size/sizeof(struct load_file_ele);
1142 hash_cram_address += cram_size;
1143 }
1144
1145 return KERN_SUCCESS;
1146 }
1147
1148
1149 /*
1150 * map_shared_file:
1151 *
1152 * Attempt to map a split library into the shared region. Check if the mappings
1153 * are already in place.
1154 */
1155 kern_return_t
1156 map_shared_file(
1157 int map_cnt,
1158 struct shared_file_mapping_np *mappings,
1159 memory_object_control_t file_control,
1160 memory_object_size_t file_size,
1161 shared_region_task_mappings_t sm_info,
1162 mach_vm_offset_t base_offset,
1163 mach_vm_offset_t *slide_p)
1164 {
1165 vm_object_t file_object;
1166 shared_file_info_t *shared_file_header;
1167 load_struct_t *file_entry;
1168 loaded_mapping_t *file_mapping;
1169 int i;
1170 kern_return_t ret;
1171 mach_vm_offset_t slide;
1172
1173 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1174
1175 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1176
1177 mutex_lock(&shared_file_header->lock);
1178
1179 /* If this is the first call to this routine, take the opportunity */
1180 /* to initialize the hash table which will be used to look-up */
1181 /* mappings based on the file object */
1182
1183 if(shared_file_header->hash_init == FALSE) {
1184 ret = shared_file_header_init(shared_file_header);
1185 if (ret != KERN_SUCCESS) {
1186 mutex_unlock(&shared_file_header->lock);
1187 return KERN_NO_SPACE;
1188 }
1189 }
1190
1191
1192 /* Find the entry in the map associated with the current mapping */
1193 /* of the file object */
1194 file_object = memory_object_control_to_vm_object(file_control);
1195
1196 file_entry = lsf_hash_lookup(shared_file_header->hash,
1197 (void *) file_object,
1198 mappings[0].sfm_file_offset,
1199 shared_file_header->hash_size,
1200 TRUE, TRUE, sm_info);
1201 if (file_entry) {
1202 /* File is loaded, check the load manifest for exact match */
1203 /* we simplify by requiring that the elements be the same */
1204 /* size and in the same order rather than checking for */
1205 /* semantic equivalence. */
1206
1207 i = 0;
1208 file_mapping = file_entry->mappings;
1209 while(file_mapping != NULL) {
1210 if(i>=map_cnt) {
1211 mutex_unlock(&shared_file_header->lock);
1212 return KERN_INVALID_ARGUMENT;
1213 }
1214 if(((mappings[i].sfm_address)
1215 & SHARED_DATA_REGION_MASK) !=
1216 file_mapping->mapping_offset ||
1217 mappings[i].sfm_size != file_mapping->size ||
1218 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1219 mappings[i].sfm_init_prot != file_mapping->protection) {
1220 break;
1221 }
1222 file_mapping = file_mapping->next;
1223 i++;
1224 }
1225 if(i!=map_cnt) {
1226 mutex_unlock(&shared_file_header->lock);
1227 return KERN_INVALID_ARGUMENT;
1228 }
1229
1230 slide = file_entry->base_address - base_offset;
1231 if (slide_p != NULL) {
1232 /*
1233 * File already mapped but at different address,
1234 * and the caller is OK with the sliding.
1235 */
1236 *slide_p = slide;
1237 ret = KERN_SUCCESS;
1238 } else {
1239 /*
1240 * The caller doesn't want any sliding. The file needs
1241 * to be mapped at the requested address or not mapped.
1242 */
1243 if (slide != 0) {
1244 /*
1245 * The file is already mapped but at a different
1246 * address.
1247 * We fail.
1248 * XXX should we attempt to load at
1249 * requested address too ?
1250 */
1251 ret = KERN_FAILURE;
1252 } else {
1253 /*
1254 * The file is already mapped at the correct
1255 * address.
1256 * We're done !
1257 */
1258 ret = KERN_SUCCESS;
1259 }
1260 }
1261 mutex_unlock(&shared_file_header->lock);
1262 return ret;
1263 } else {
1264 /* File is not loaded, lets attempt to load it */
1265 ret = lsf_map(mappings, map_cnt,
1266 (void *)file_control,
1267 file_size,
1268 sm_info,
1269 base_offset,
1270 slide_p);
1271 if(ret == KERN_NO_SPACE) {
1272 shared_region_mapping_t regions;
1273 shared_region_mapping_t system_region;
1274 regions = (shared_region_mapping_t)sm_info->self;
1275 regions->flags |= SHARED_REGION_FULL;
1276 system_region = lookup_default_shared_region(
1277 regions->fs_base, regions->system);
1278 if (system_region == regions) {
1279 shared_region_mapping_t new_system_shared_region;
1280 shared_file_boot_time_init(
1281 regions->fs_base, regions->system);
1282 /* current task must stay with its current */
1283 /* regions, drop count on system_shared_region */
1284 /* and put back our original set */
1285 vm_get_shared_region(current_task(),
1286 &new_system_shared_region);
1287 shared_region_mapping_dealloc_lock(
1288 new_system_shared_region, 0, 1);
1289 vm_set_shared_region(current_task(), regions);
1290 } else if (system_region != NULL) {
1291 shared_region_mapping_dealloc_lock(
1292 system_region, 0, 1);
1293 }
1294 }
1295 mutex_unlock(&shared_file_header->lock);
1296 return ret;
1297 }
1298 }
1299
1300 /*
1301 * shared_region_cleanup:
1302 *
1303 * Deallocates all the mappings in the shared region, except those explicitly
1304 * specified in the "ranges" set of address ranges.
1305 */
1306 kern_return_t
1307 shared_region_cleanup(
1308 unsigned int range_count,
1309 struct shared_region_range_np *ranges,
1310 shared_region_task_mappings_t sm_info)
1311 {
1312 kern_return_t kr;
1313 ipc_port_t region_handle;
1314 vm_named_entry_t region_named_entry;
1315 vm_map_t text_submap, data_submap, submap, next_submap;
1316 unsigned int i_range;
1317 vm_map_offset_t range_start, range_end;
1318 vm_map_offset_t submap_base, submap_end, submap_offset;
1319 vm_map_size_t delete_size;
1320
1321 struct shared_region_range_np tmp_range;
1322 unsigned int sort_index, sorted_index;
1323 vm_map_offset_t sort_min_address;
1324 unsigned int sort_min_index;
1325
1326 /*
1327 * Since we want to deallocate the holes between the "ranges",
1328 * sort the array by increasing addresses.
1329 */
1330 for (sorted_index = 0;
1331 sorted_index < range_count;
1332 sorted_index++) {
1333
1334 /* first remaining entry is our new starting point */
1335 sort_min_index = sorted_index;
1336 sort_min_address = ranges[sort_min_index].srr_address;
1337
1338 /* find the lowest mapping_offset in the remaining entries */
1339 for (sort_index = sorted_index + 1;
1340 sort_index < range_count;
1341 sort_index++) {
1342 if (ranges[sort_index].srr_address < sort_min_address) {
1343 /* lowest address so far... */
1344 sort_min_index = sort_index;
1345 sort_min_address =
1346 ranges[sort_min_index].srr_address;
1347 }
1348 }
1349
1350 if (sort_min_index != sorted_index) {
1351 /* swap entries */
1352 tmp_range = ranges[sort_min_index];
1353 ranges[sort_min_index] = ranges[sorted_index];
1354 ranges[sorted_index] = tmp_range;
1355 }
1356 }
1357
1358 region_handle = (ipc_port_t) sm_info->text_region;
1359 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1360 text_submap = region_named_entry->backing.map;
1361
1362 region_handle = (ipc_port_t) sm_info->data_region;
1363 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1364 data_submap = region_named_entry->backing.map;
1365
1366 submap = text_submap;
1367 next_submap = submap;
1368 submap_base = sm_info->client_base;
1369 submap_offset = 0;
1370 submap_end = submap_base + sm_info->text_size;
1371 for (i_range = 0;
1372 i_range < range_count;
1373 i_range++) {
1374
1375 /* get the next range of addresses to keep */
1376 range_start = ranges[i_range].srr_address;
1377 range_end = range_start + ranges[i_range].srr_size;
1378 /* align them to page boundaries */
1379 range_start = vm_map_trunc_page(range_start);
1380 range_end = vm_map_round_page(range_end);
1381
1382 /* make sure we don't go beyond the submap's boundaries */
1383 if (range_start < submap_base) {
1384 range_start = submap_base;
1385 } else if (range_start >= submap_end) {
1386 range_start = submap_end;
1387 }
1388 if (range_end < submap_base) {
1389 range_end = submap_base;
1390 } else if (range_end >= submap_end) {
1391 range_end = submap_end;
1392 }
1393
1394 if (range_start > submap_base + submap_offset) {
1395 /*
1396 * Deallocate everything between the last offset in the
1397 * submap and the start of this range.
1398 */
1399 delete_size = range_start -
1400 (submap_base + submap_offset);
1401 (void) vm_deallocate(submap,
1402 submap_offset,
1403 delete_size);
1404 } else {
1405 delete_size = 0;
1406 }
1407
1408 /* skip to the end of the range */
1409 submap_offset += delete_size + (range_end - range_start);
1410
1411 if (submap_base + submap_offset >= submap_end) {
1412 /* get to next submap */
1413
1414 if (submap == data_submap) {
1415 /* no other submap after data: done ! */
1416 break;
1417 }
1418
1419 /* get original range again */
1420 range_start = ranges[i_range].srr_address;
1421 range_end = range_start + ranges[i_range].srr_size;
1422 range_start = vm_map_trunc_page(range_start);
1423 range_end = vm_map_round_page(range_end);
1424
1425 if (range_end > submap_end) {
1426 /*
1427 * This last range overlaps with the next
1428 * submap. We need to process it again
1429 * after switching submaps. Otherwise, we'll
1430 * just continue with the next range.
1431 */
1432 i_range--;
1433 }
1434
1435 if (submap == text_submap) {
1436 /*
1437 * Switch to the data submap.
1438 */
1439 submap = data_submap;
1440 submap_offset = 0;
1441 submap_base = sm_info->client_base +
1442 sm_info->text_size;
1443 submap_end = submap_base + sm_info->data_size;
1444 }
1445 }
1446 }
1447
1448 if (submap_base + submap_offset < submap_end) {
1449 /* delete remainder of this submap, from "offset" to the end */
1450 (void) vm_deallocate(submap,
1451 submap_offset,
1452 submap_end - submap_base - submap_offset);
1453 /* if nothing to keep in data submap, delete it all */
1454 if (submap == text_submap) {
1455 submap = data_submap;
1456 submap_offset = 0;
1457 submap_base = sm_info->client_base + sm_info->text_size;
1458 submap_end = submap_base + sm_info->data_size;
1459 (void) vm_deallocate(data_submap,
1460 0,
1461 submap_end - submap_base);
1462 }
1463 }
1464
1465 kr = KERN_SUCCESS;
1466 return kr;
1467 }
1468
1469 /* A hash lookup function for the list of loaded files in */
1470 /* shared_memory_server space. */
1471
1472 static load_struct_t *
1473 lsf_hash_lookup(
1474 queue_head_t *hash_table,
1475 void *file_object,
1476 vm_offset_t recognizableOffset,
1477 int size,
1478 boolean_t regular,
1479 boolean_t alternate,
1480 shared_region_task_mappings_t sm_info)
1481 {
1482 register queue_t bucket;
1483 load_struct_t *entry;
1484 shared_region_mapping_t target_region;
1485 int depth;
1486
1487 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1488 "reg=%d alt=%d sm_info=%p\n",
1489 hash_table, file_object, recognizableOffset, size,
1490 regular, alternate, sm_info));
1491
1492 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1493 for (entry = (load_struct_t *)queue_first(bucket);
1494 !queue_end(bucket, &entry->links);
1495 entry = (load_struct_t *)queue_next(&entry->links)) {
1496
1497 if ((entry->file_object == (int)file_object) &&
1498 (entry->file_offset == recognizableOffset)) {
1499 target_region = (shared_region_mapping_t)sm_info->self;
1500 depth = target_region->depth;
1501 while(target_region) {
1502 if((!(sm_info->self)) ||
1503 ((target_region == entry->regions_instance) &&
1504 (target_region->depth >= entry->depth))) {
1505 if(alternate &&
1506 entry->base_address >= sm_info->alternate_base) {
1507 LSF_DEBUG(("lsf_hash_lookup: "
1508 "alt=%d found entry %p "
1509 "(base=0x%x "
1510 "alt_base=0x%x)\n",
1511 alternate, entry,
1512 entry->base_address,
1513 sm_info->alternate_base));
1514 return entry;
1515 }
1516 if (regular &&
1517 entry->base_address < sm_info->alternate_base) {
1518 LSF_DEBUG(("lsf_hash_lookup: "
1519 "reg=%d found entry %p "
1520 "(base=0x%x "
1521 "alt_base=0x%x)\n",
1522 regular, entry,
1523 entry->base_address,
1524 sm_info->alternate_base));
1525 return entry;
1526 }
1527 }
1528 if(target_region->object_chain) {
1529 target_region = (shared_region_mapping_t)
1530 target_region->object_chain->object_chain_region;
1531 depth = target_region->object_chain->depth;
1532 } else {
1533 target_region = NULL;
1534 }
1535 }
1536 }
1537 }
1538
1539 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1540 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1541 hash_table, file_object, recognizableOffset, size,
1542 regular, alternate, sm_info));
1543 return (load_struct_t *)0;
1544 }
1545
1546 __private_extern__ load_struct_t *
1547 lsf_remove_regions_mappings_lock(
1548 shared_region_mapping_t region,
1549 shared_region_task_mappings_t sm_info,
1550 int need_sfh_lock)
1551 {
1552 int i;
1553 register queue_t bucket;
1554 shared_file_info_t *shared_file_header;
1555 load_struct_t *entry;
1556 load_struct_t *next_entry;
1557
1558 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1559
1560 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1561 "sfh=%p\n",
1562 region, sm_info, shared_file_header));
1563 if (need_sfh_lock)
1564 mutex_lock(&shared_file_header->lock);
1565 if(shared_file_header->hash_init == FALSE) {
1566 if (need_sfh_lock)
1567 mutex_unlock(&shared_file_header->lock);
1568 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1569 "(region=%p,sm_info=%p): not inited\n",
1570 region, sm_info));
1571 return NULL;
1572 }
1573 for(i = 0; i<shared_file_header->hash_size; i++) {
1574 bucket = &shared_file_header->hash[i];
1575 for (entry = (load_struct_t *)queue_first(bucket);
1576 !queue_end(bucket, &entry->links);) {
1577 next_entry = (load_struct_t *)queue_next(&entry->links);
1578 if(region == entry->regions_instance) {
1579 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1580 "entry %p region %p: "
1581 "unloading\n",
1582 entry, region));
1583 lsf_unload((void *)entry->file_object,
1584 entry->base_address, sm_info);
1585 } else {
1586 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1587 "entry %p region %p target region %p: "
1588 "not unloading\n",
1589 entry, entry->regions_instance, region));
1590 }
1591
1592 entry = next_entry;
1593 }
1594 }
1595 if (need_sfh_lock)
1596 mutex_unlock(&shared_file_header->lock);
1597 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1598
1599 return NULL; /* XXX */
1600 }
1601
1602 /*
1603 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1604 * only caller. Remove this stub function and the corresponding symbol
1605 * export for Merlot.
1606 */
1607 load_struct_t *
1608 lsf_remove_regions_mappings(
1609 shared_region_mapping_t region,
1610 shared_region_task_mappings_t sm_info)
1611 {
1612 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1613 }
1614
1615 /* Removes a map_list, (list of loaded extents) for a file from */
1616 /* the loaded file hash table. */
1617
1618 static load_struct_t *
1619 lsf_hash_delete(
1620 void *file_object,
1621 vm_offset_t base_offset,
1622 shared_region_task_mappings_t sm_info)
1623 {
1624 register queue_t bucket;
1625 shared_file_info_t *shared_file_header;
1626 load_struct_t *entry;
1627
1628 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1629 file_object, base_offset, sm_info));
1630
1631 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1632
1633 bucket = &shared_file_header->hash
1634 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1635
1636 for (entry = (load_struct_t *)queue_first(bucket);
1637 !queue_end(bucket, &entry->links);
1638 entry = (load_struct_t *)queue_next(&entry->links)) {
1639 if((!(sm_info->self)) || ((shared_region_mapping_t)
1640 sm_info->self == entry->regions_instance)) {
1641 if ((entry->file_object == (int) file_object) &&
1642 (entry->base_address == base_offset)) {
1643 queue_remove(bucket, entry,
1644 load_struct_ptr_t, links);
1645 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1646 return entry;
1647 }
1648 }
1649 }
1650
1651 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1652 return (load_struct_t *)0;
1653 }
1654
1655 /* Inserts a new map_list, (list of loaded file extents), into the */
1656 /* server loaded file hash table. */
1657
1658 static void
1659 lsf_hash_insert(
1660 load_struct_t *entry,
1661 shared_region_task_mappings_t sm_info)
1662 {
1663 shared_file_info_t *shared_file_header;
1664
1665 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1666 entry, sm_info, entry->file_object, entry->base_address));
1667
1668 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1669 queue_enter(&shared_file_header->hash
1670 [load_file_hash(entry->file_object,
1671 shared_file_header->hash_size)],
1672 entry, load_struct_ptr_t, links);
1673 }
1674
1675
1676
1677 /*
1678 * lsf_slide:
1679 *
1680 * Look in the shared region, starting from the end, for a place to fit all the
1681 * mappings while respecting their relative offsets.
1682 */
1683 static kern_return_t
1684 lsf_slide(
1685 unsigned int map_cnt,
1686 struct shared_file_mapping_np *mappings_in,
1687 shared_region_task_mappings_t sm_info,
1688 mach_vm_offset_t *base_offset_p)
1689 {
1690 mach_vm_offset_t max_mapping_offset;
1691 int i;
1692 vm_map_entry_t map_entry, prev_entry, next_entry;
1693 mach_vm_offset_t prev_hole_start, prev_hole_end;
1694 mach_vm_offset_t mapping_offset, mapping_end_offset;
1695 mach_vm_offset_t base_offset;
1696 mach_vm_size_t mapping_size;
1697 mach_vm_offset_t wiggle_room, wiggle;
1698 vm_map_t text_map, data_map, map;
1699 vm_named_entry_t region_entry;
1700 ipc_port_t region_handle;
1701 kern_return_t kr;
1702
1703 struct shared_file_mapping_np *mappings, tmp_mapping;
1704 unsigned int sort_index, sorted_index;
1705 vm_map_offset_t sort_min_address;
1706 unsigned int sort_min_index;
1707
1708 /*
1709 * Sort the mappings array, so that we can try and fit them in
1710 * in the right order as we progress along the VM maps.
1711 *
1712 * We can't modify the original array (the original order is
1713 * important when doing lookups of the mappings), so copy it first.
1714 */
1715
1716 kr = kmem_alloc(kernel_map,
1717 (vm_offset_t *) &mappings,
1718 (vm_size_t) (map_cnt * sizeof (mappings[0])));
1719 if (kr != KERN_SUCCESS) {
1720 return KERN_NO_SPACE;
1721 }
1722
1723 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
1724
1725 max_mapping_offset = 0;
1726 for (sorted_index = 0;
1727 sorted_index < map_cnt;
1728 sorted_index++) {
1729
1730 /* first remaining entry is our new starting point */
1731 sort_min_index = sorted_index;
1732 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
1733 SHARED_TEXT_REGION_MASK) +
1734 mappings[sort_min_index].sfm_size);
1735 sort_min_address = mapping_end_offset;
1736 /* compute the highest mapping_offset as well... */
1737 if (mapping_end_offset > max_mapping_offset) {
1738 max_mapping_offset = mapping_end_offset;
1739 }
1740 /* find the lowest mapping_offset in the remaining entries */
1741 for (sort_index = sorted_index + 1;
1742 sort_index < map_cnt;
1743 sort_index++) {
1744
1745 mapping_end_offset =
1746 ((mappings[sort_index].sfm_address &
1747 SHARED_TEXT_REGION_MASK) +
1748 mappings[sort_index].sfm_size);
1749
1750 if (mapping_end_offset < sort_min_address) {
1751 /* lowest mapping_offset so far... */
1752 sort_min_index = sort_index;
1753 sort_min_address = mapping_end_offset;
1754 }
1755 }
1756 if (sort_min_index != sorted_index) {
1757 /* swap entries */
1758 tmp_mapping = mappings[sort_min_index];
1759 mappings[sort_min_index] = mappings[sorted_index];
1760 mappings[sorted_index] = tmp_mapping;
1761 }
1762
1763 }
1764
1765 max_mapping_offset = vm_map_round_page(max_mapping_offset);
1766
1767 /* start from the end of the shared area */
1768 base_offset = sm_info->text_size;
1769
1770 /* can all the mappings fit ? */
1771 if (max_mapping_offset > base_offset) {
1772 kmem_free(kernel_map,
1773 (vm_offset_t) mappings,
1774 map_cnt * sizeof (mappings[0]));
1775 return KERN_FAILURE;
1776 }
1777
1778 /*
1779 * Align the last mapping to the end of the submaps
1780 * and start from there.
1781 */
1782 base_offset -= max_mapping_offset;
1783
1784 region_handle = (ipc_port_t) sm_info->text_region;
1785 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1786 text_map = region_entry->backing.map;
1787
1788 region_handle = (ipc_port_t) sm_info->data_region;
1789 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1790 data_map = region_entry->backing.map;
1791
1792 vm_map_lock_read(text_map);
1793 vm_map_lock_read(data_map);
1794
1795 start_over:
1796 /*
1797 * At first, we can wiggle all the way from our starting point
1798 * (base_offset) towards the start of the map (0), if needed.
1799 */
1800 wiggle_room = base_offset;
1801
1802 for (i = (signed) map_cnt - 1; i >= 0; i--) {
1803 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
1804 /* copy-on-write mappings are in the data submap */
1805 map = data_map;
1806 } else {
1807 /* other mappings are in the text submap */
1808 map = text_map;
1809 }
1810 /* get the offset within the appropriate submap */
1811 mapping_offset = (mappings[i].sfm_address &
1812 SHARED_TEXT_REGION_MASK);
1813 mapping_size = mappings[i].sfm_size;
1814 mapping_end_offset = mapping_offset + mapping_size;
1815 mapping_offset = vm_map_trunc_page(mapping_offset);
1816 mapping_end_offset = vm_map_round_page(mapping_end_offset);
1817 mapping_size = mapping_end_offset - mapping_offset;
1818
1819 for (;;) {
1820 if (vm_map_lookup_entry(map,
1821 base_offset + mapping_offset,
1822 &map_entry)) {
1823 /*
1824 * The start address for that mapping
1825 * is already mapped: no fit.
1826 * Locate the hole immediately before this map
1827 * entry.
1828 */
1829 prev_hole_end = map_entry->vme_start;
1830 prev_entry = map_entry->vme_prev;
1831 if (prev_entry == vm_map_to_entry(map)) {
1832 /* no previous entry */
1833 prev_hole_start = map->min_offset;
1834 } else {
1835 /* previous entry ends here */
1836 prev_hole_start = prev_entry->vme_end;
1837 }
1838 } else {
1839 /*
1840 * The start address for that mapping is not
1841 * mapped.
1842 * Locate the start and end of the hole
1843 * at that location.
1844 */
1845 /* map_entry is the previous entry */
1846 if (map_entry == vm_map_to_entry(map)) {
1847 /* no previous entry */
1848 prev_hole_start = map->min_offset;
1849 } else {
1850 /* previous entry ends there */
1851 prev_hole_start = map_entry->vme_end;
1852 }
1853 next_entry = map_entry->vme_next;
1854 if (next_entry == vm_map_to_entry(map)) {
1855 /* no next entry */
1856 prev_hole_end = map->max_offset;
1857 } else {
1858 prev_hole_end = next_entry->vme_start;
1859 }
1860 }
1861
1862 if (prev_hole_end <= base_offset + mapping_offset) {
1863 /* hole is to our left: try and wiggle to fit */
1864 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
1865 if (wiggle > base_offset) {
1866 /* we're getting out of the map */
1867 kr = KERN_FAILURE;
1868 goto done;
1869 }
1870 base_offset -= wiggle;
1871 if (wiggle > wiggle_room) {
1872 /* can't wiggle that much: start over */
1873 goto start_over;
1874 }
1875 /* account for the wiggling done */
1876 wiggle_room -= wiggle;
1877 }
1878
1879 if (prev_hole_end >
1880 base_offset + mapping_offset + mapping_size) {
1881 /*
1882 * The hole extends further to the right
1883 * than what we need. Ignore the extra space.
1884 */
1885 prev_hole_end = (base_offset + mapping_offset +
1886 mapping_size);
1887 }
1888
1889 if (prev_hole_end <
1890 base_offset + mapping_offset + mapping_size) {
1891 /*
1892 * The hole is not big enough to establish
1893 * the mapping right there: wiggle towards
1894 * the beginning of the hole so that the end
1895 * of our mapping fits in the hole...
1896 */
1897 wiggle = base_offset + mapping_offset
1898 + mapping_size - prev_hole_end;
1899 if (wiggle > base_offset) {
1900 /* we're getting out of the map */
1901 kr = KERN_FAILURE;
1902 goto done;
1903 }
1904 base_offset -= wiggle;
1905 if (wiggle > wiggle_room) {
1906 /* can't wiggle that much: start over */
1907 goto start_over;
1908 }
1909 /* account for the wiggling done */
1910 wiggle_room -= wiggle;
1911
1912 /* keep searching from this new base */
1913 continue;
1914 }
1915
1916 if (prev_hole_start > base_offset + mapping_offset) {
1917 /* no hole found: keep looking */
1918 continue;
1919 }
1920
1921 /* compute wiggling room at this hole */
1922 wiggle = base_offset + mapping_offset - prev_hole_start;
1923 if (wiggle < wiggle_room) {
1924 /* less wiggle room than before... */
1925 wiggle_room = wiggle;
1926 }
1927
1928 /* found a hole that fits: skip to next mapping */
1929 break;
1930 } /* while we look for a hole */
1931 } /* for each mapping */
1932
1933 *base_offset_p = base_offset;
1934 kr = KERN_SUCCESS;
1935
1936 done:
1937 vm_map_unlock_read(text_map);
1938 vm_map_unlock_read(data_map);
1939
1940 kmem_free(kernel_map,
1941 (vm_offset_t) mappings,
1942 map_cnt * sizeof (mappings[0]));
1943
1944 return kr;
1945 }
1946
1947 /*
1948 * lsf_map:
1949 *
1950 * Attempt to establish the mappings for a split library into the shared region.
1951 */
1952 static kern_return_t
1953 lsf_map(
1954 struct shared_file_mapping_np *mappings,
1955 int map_cnt,
1956 void *file_control,
1957 memory_object_offset_t file_size,
1958 shared_region_task_mappings_t sm_info,
1959 mach_vm_offset_t base_offset,
1960 mach_vm_offset_t *slide_p)
1961 {
1962 load_struct_t *entry;
1963 loaded_mapping_t *file_mapping;
1964 loaded_mapping_t **tptr;
1965 ipc_port_t region_handle;
1966 vm_named_entry_t region_entry;
1967 mach_port_t map_port;
1968 vm_object_t file_object;
1969 kern_return_t kr;
1970 int i;
1971 mach_vm_offset_t original_base_offset;
1972
1973 /* get the VM object from the file's memory object handle */
1974 file_object = memory_object_control_to_vm_object(file_control);
1975
1976 original_base_offset = base_offset;
1977
1978 LSF_DEBUG(("lsf_map"
1979 "(cnt=%d,file=%p,sm_info=%p)"
1980 "\n",
1981 map_cnt, file_object,
1982 sm_info));
1983
1984 restart_after_slide:
1985 /* get a new "load_struct_t" to described the mappings for that file */
1986 entry = (load_struct_t *)zalloc(lsf_zone);
1987 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
1988 LSF_DEBUG(("lsf_map"
1989 "(cnt=%d,file=%p,sm_info=%p) "
1990 "entry=%p\n",
1991 map_cnt, file_object,
1992 sm_info, entry));
1993 if (entry == NULL) {
1994 printf("lsf_map: unable to allocate memory\n");
1995 return KERN_NO_SPACE;
1996 }
1997 shared_file_available_hash_ele--;
1998 entry->file_object = (int)file_object;
1999 entry->mapping_cnt = map_cnt;
2000 entry->mappings = NULL;
2001 entry->links.prev = (queue_entry_t) 0;
2002 entry->links.next = (queue_entry_t) 0;
2003 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2004 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2005 entry->file_offset = mappings[0].sfm_file_offset;
2006
2007 /* insert the new file entry in the hash table, for later lookups */
2008 lsf_hash_insert(entry, sm_info);
2009
2010 /* where we should add the next mapping description for that file */
2011 tptr = &(entry->mappings);
2012
2013 entry->base_address = base_offset;
2014
2015
2016 /* establish each requested mapping */
2017 for (i = 0; i < map_cnt; i++) {
2018 mach_vm_offset_t target_address;
2019 mach_vm_offset_t region_mask;
2020
2021 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2022 region_handle = (ipc_port_t)sm_info->data_region;
2023 region_mask = SHARED_DATA_REGION_MASK;
2024 if ((((mappings[i].sfm_address + base_offset)
2025 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2026 (((mappings[i].sfm_address + base_offset +
2027 mappings[i].sfm_size - 1)
2028 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
2029 lsf_unload(file_object,
2030 entry->base_address, sm_info);
2031 return KERN_INVALID_ARGUMENT;
2032 }
2033 } else {
2034 region_mask = SHARED_TEXT_REGION_MASK;
2035 region_handle = (ipc_port_t)sm_info->text_region;
2036 if (((mappings[i].sfm_address + base_offset)
2037 & GLOBAL_SHARED_SEGMENT_MASK) ||
2038 ((mappings[i].sfm_address + base_offset +
2039 mappings[i].sfm_size - 1)
2040 & GLOBAL_SHARED_SEGMENT_MASK)) {
2041 lsf_unload(file_object,
2042 entry->base_address, sm_info);
2043 return KERN_INVALID_ARGUMENT;
2044 }
2045 }
2046 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2047 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2048 (file_size))) {
2049 lsf_unload(file_object, entry->base_address, sm_info);
2050 return KERN_INVALID_ARGUMENT;
2051 }
2052 target_address = entry->base_address +
2053 ((mappings[i].sfm_address) & region_mask);
2054 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2055 map_port = MACH_PORT_NULL;
2056 } else {
2057 map_port = (ipc_port_t) file_object->pager;
2058 }
2059 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2060
2061 if (mach_vm_map(region_entry->backing.map,
2062 &target_address,
2063 vm_map_round_page(mappings[i].sfm_size),
2064 0,
2065 VM_FLAGS_FIXED,
2066 map_port,
2067 mappings[i].sfm_file_offset,
2068 TRUE,
2069 (mappings[i].sfm_init_prot &
2070 (VM_PROT_READ|VM_PROT_EXECUTE)),
2071 (mappings[i].sfm_max_prot &
2072 (VM_PROT_READ|VM_PROT_EXECUTE)),
2073 VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
2074 lsf_unload(file_object, entry->base_address, sm_info);
2075
2076 if (slide_p != NULL) {
2077 /*
2078 * Requested mapping failed but the caller
2079 * is OK with sliding the library in the
2080 * shared region, so let's try and slide it...
2081 */
2082
2083 /* lookup an appropriate spot */
2084 kr = lsf_slide(map_cnt, mappings,
2085 sm_info, &base_offset);
2086 if (kr == KERN_SUCCESS) {
2087 /* try and map it there ... */
2088 entry->base_address = base_offset;
2089 goto restart_after_slide;
2090 }
2091 /* couldn't slide ... */
2092 }
2093
2094 return KERN_FAILURE;
2095 }
2096
2097 /* record this mapping */
2098 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2099 if (file_mapping == NULL) {
2100 lsf_unload(file_object, entry->base_address, sm_info);
2101 printf("lsf_map: unable to allocate memory\n");
2102 return KERN_NO_SPACE;
2103 }
2104 shared_file_available_hash_ele--;
2105 file_mapping->mapping_offset = (mappings[i].sfm_address)
2106 & region_mask;
2107 file_mapping->size = mappings[i].sfm_size;
2108 file_mapping->file_offset = mappings[i].sfm_file_offset;
2109 file_mapping->protection = mappings[i].sfm_init_prot;
2110 file_mapping->next = NULL;
2111 LSF_DEBUG(("lsf_map: file_mapping %p "
2112 "for offset=0x%x size=0x%x\n",
2113 file_mapping, file_mapping->mapping_offset,
2114 file_mapping->size));
2115
2116 /* and link it to the file entry */
2117 *tptr = file_mapping;
2118
2119 /* where to put the next mapping's description */
2120 tptr = &(file_mapping->next);
2121 }
2122
2123 if (slide_p != NULL) {
2124 *slide_p = base_offset - original_base_offset;
2125 }
2126
2127 if (sm_info->flags & SHARED_REGION_STANDALONE) {
2128 /*
2129 * We have a standalone and private shared region, so we
2130 * don't really need to keep the information about each file
2131 * and each mapping. Just deallocate it all.
2132 * XXX we still have the hash table, though...
2133 */
2134 lsf_deallocate(file_object, entry->base_address, sm_info,
2135 FALSE);
2136 }
2137
2138 LSF_DEBUG(("lsf_map: done\n"));
2139 return KERN_SUCCESS;
2140 }
2141
2142
2143 /* finds the file_object extent list in the shared memory hash table */
2144 /* If one is found the associated extents in shared memory are deallocated */
2145 /* and the extent list is freed */
2146
2147 static void
2148 lsf_unload(
2149 void *file_object,
2150 vm_offset_t base_offset,
2151 shared_region_task_mappings_t sm_info)
2152 {
2153 lsf_deallocate(file_object, base_offset, sm_info, TRUE);
2154 }
2155
2156 /*
2157 * lsf_deallocate:
2158 *
2159 * Deallocates all the "shared region" internal data structures describing
2160 * the file and its mappings.
2161 * Also deallocate the actual file mappings if requested ("unload" arg).
2162 */
2163 static void
2164 lsf_deallocate(
2165 void *file_object,
2166 vm_offset_t base_offset,
2167 shared_region_task_mappings_t sm_info,
2168 boolean_t unload)
2169 {
2170 load_struct_t *entry;
2171 loaded_mapping_t *map_ele;
2172 loaded_mapping_t *back_ptr;
2173
2174 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2175 file_object, base_offset, sm_info, unload));
2176 entry = lsf_hash_delete(file_object, base_offset, sm_info);
2177 if(entry) {
2178 map_ele = entry->mappings;
2179 while(map_ele != NULL) {
2180 if (unload) {
2181 ipc_port_t region_handle;
2182 vm_named_entry_t region_entry;
2183
2184 if(map_ele->protection & VM_PROT_COW) {
2185 region_handle = (ipc_port_t)
2186 sm_info->data_region;
2187 } else {
2188 region_handle = (ipc_port_t)
2189 sm_info->text_region;
2190 }
2191 region_entry = (vm_named_entry_t)
2192 region_handle->ip_kobject;
2193
2194 vm_deallocate(region_entry->backing.map,
2195 (entry->base_address +
2196 map_ele->mapping_offset),
2197 map_ele->size);
2198 }
2199 back_ptr = map_ele;
2200 map_ele = map_ele->next;
2201 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2202 "offset 0x%x size 0x%x\n",
2203 back_ptr, back_ptr->mapping_offset,
2204 back_ptr->size));
2205 zfree(lsf_zone, back_ptr);
2206 shared_file_available_hash_ele++;
2207 }
2208 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2209 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2210 zfree(lsf_zone, entry);
2211 shared_file_available_hash_ele++;
2212 }
2213 LSF_DEBUG(("lsf_unload: done\n"));
2214 }
2215
2216 /* integer is from 1 to 100 and represents percent full */
2217 unsigned int
2218 lsf_mapping_pool_gauge(void)
2219 {
2220 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2221 }