]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <debug.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <mach/vm_map.h>
36 #include <machine/cpu_capabilities.h>
37
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
43
44 #include <ipc/ipc_types.h>
45 #include <ipc/ipc_port.h>
46
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50
51 #include <mach/mach_vm.h>
52 #include <mach/shared_memory_server.h>
53 #include <vm/vm_shared_memory_server.h>
54
55 #if DEBUG
56 int lsf_debug = 0;
57 int lsf_alloc_debug = 0;
58 #define LSF_DEBUG(args) \
59 MACRO_BEGIN \
60 if (lsf_debug) { \
61 kprintf args; \
62 } \
63 MACRO_END
64 #define LSF_ALLOC_DEBUG(args) \
65 MACRO_BEGIN \
66 if (lsf_alloc_debug) { \
67 kprintf args; \
68 } \
69 MACRO_END
70 #else /* DEBUG */
71 #define LSF_DEBUG(args)
72 #define LSF_ALLOC_DEBUG(args)
73 #endif /* DEBUG */
74
75 /* forward declarations */
76 static kern_return_t
77 shared_region_object_create(
78 vm_size_t size,
79 ipc_port_t *object_handle);
80
81 static kern_return_t
82 shared_region_mapping_dealloc_lock(
83 shared_region_mapping_t shared_region,
84 int need_sfh_lock,
85 int need_drl_lock);
86
87
88 static kern_return_t
89 shared_file_init(
90 ipc_port_t *text_region_handle,
91 vm_size_t text_region_size,
92 ipc_port_t *data_region_handle,
93 vm_size_t data_region_size,
94 vm_offset_t *file_mapping_array);
95
96 static kern_return_t
97 shared_file_header_init(
98 shared_file_info_t *shared_file_header);
99
100 static load_struct_t *
101 lsf_hash_lookup(
102 queue_head_t *hash_table,
103 void *file_object,
104 vm_offset_t recognizableOffset,
105 int size,
106 boolean_t regular,
107 boolean_t alternate,
108 shared_region_task_mappings_t sm_info);
109
110 static load_struct_t *
111 lsf_hash_delete(
112 void *file_object,
113 vm_offset_t base_offset,
114 shared_region_task_mappings_t sm_info);
115
116 static void
117 lsf_hash_insert(
118 load_struct_t *entry,
119 shared_region_task_mappings_t sm_info);
120
121 static kern_return_t
122 lsf_slide(
123 unsigned int map_cnt,
124 struct shared_file_mapping_np *mappings,
125 shared_region_task_mappings_t sm_info,
126 mach_vm_offset_t *base_offset_p);
127
128 static kern_return_t
129 lsf_map(
130 struct shared_file_mapping_np *mappings,
131 int map_cnt,
132 void *file_control,
133 memory_object_size_t file_size,
134 shared_region_task_mappings_t sm_info,
135 mach_vm_offset_t base_offset,
136 mach_vm_offset_t *slide_p);
137
138 static void
139 lsf_unload(
140 void *file_object,
141 vm_offset_t base_offset,
142 shared_region_task_mappings_t sm_info);
143
144 static void
145 lsf_deallocate(
146 void *file_object,
147 vm_offset_t base_offset,
148 shared_region_task_mappings_t sm_info,
149 boolean_t unload);
150
151
152 #define load_file_hash(file_object, size) \
153 ((((natural_t)file_object) & 0xffffff) % size)
154
155 /* Implementation */
156 vm_offset_t shared_file_text_region;
157 vm_offset_t shared_file_data_region;
158
159 ipc_port_t shared_text_region_handle;
160 ipc_port_t shared_data_region_handle;
161 vm_offset_t shared_file_mapping_array = 0;
162
163 shared_region_mapping_t default_environment_shared_regions = NULL;
164 static decl_mutex_data(,default_regions_list_lock_data)
165
166 #define default_regions_list_lock() \
167 mutex_lock(&default_regions_list_lock_data)
168 #define default_regions_list_lock_try() \
169 mutex_try(&default_regions_list_lock_data)
170 #define default_regions_list_unlock() \
171 mutex_unlock(&default_regions_list_lock_data)
172
173
174 ipc_port_t sfma_handle = NULL;
175 zone_t lsf_zone;
176
177 int shared_file_available_hash_ele;
178
179 /* com region support */
180 ipc_port_t com_region_handle32 = NULL;
181 ipc_port_t com_region_handle64 = NULL;
182 vm_map_t com_region_map32 = NULL;
183 vm_map_t com_region_map64 = NULL;
184 vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
185 shared_region_mapping_t com_mapping_resource = NULL;
186
187
188 #if DEBUG
189 int shared_region_debug = 0;
190 #endif /* DEBUG */
191
192
193 kern_return_t
194 vm_get_shared_region(
195 task_t task,
196 shared_region_mapping_t *shared_region)
197 {
198 *shared_region = (shared_region_mapping_t) task->system_shared_region;
199 if (*shared_region) {
200 assert((*shared_region)->ref_count > 0);
201 }
202 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
203 task, *shared_region));
204 return KERN_SUCCESS;
205 }
206
207 kern_return_t
208 vm_set_shared_region(
209 task_t task,
210 shared_region_mapping_t shared_region)
211 {
212 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
213 "shared_region=%p)\n",
214 task, shared_region));
215 if (shared_region) {
216 assert(shared_region->ref_count > 0);
217 }
218 task->system_shared_region = shared_region;
219 return KERN_SUCCESS;
220 }
221
222 /*
223 * shared_region_object_chain_detach:
224 *
225 * Mark the shared region as being detached or standalone. This means
226 * that we won't keep track of which file is mapped and how, for this shared
227 * region. And we don't have a "shadow" shared region.
228 * This is used when we clone a private shared region and we intend to remove
229 * some mappings from it. It won't need to maintain mappings info because it's
230 * now private. It can't have a "shadow" shared region because we don't want
231 * to see the shadow of the mappings we're about to remove.
232 */
233 void
234 shared_region_object_chain_detached(
235 shared_region_mapping_t target_region)
236 {
237 shared_region_mapping_lock(target_region);
238 target_region->flags |= SHARED_REGION_STANDALONE;
239 shared_region_mapping_unlock(target_region);
240 }
241
242 /*
243 * shared_region_object_chain_attach:
244 *
245 * Link "target_region" to "object_chain_region". "object_chain_region"
246 * is treated as a shadow of "target_region" for the purpose of looking up
247 * mappings. Since the "target_region" preserves all the mappings of the
248 * older "object_chain_region", we won't duplicate all the mappings info and
249 * we'll just lookup the next region in the "object_chain" if we can't find
250 * what we're looking for in the "target_region". See lsf_hash_lookup().
251 */
252 kern_return_t
253 shared_region_object_chain_attach(
254 shared_region_mapping_t target_region,
255 shared_region_mapping_t object_chain_region)
256 {
257 shared_region_object_chain_t object_ele;
258
259 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
260 "target_region=%p, object_chain_region=%p\n",
261 target_region, object_chain_region));
262 assert(target_region->ref_count > 0);
263 assert(object_chain_region->ref_count > 0);
264 if(target_region->object_chain)
265 return KERN_FAILURE;
266 object_ele = (shared_region_object_chain_t)
267 kalloc(sizeof (struct shared_region_object_chain));
268 shared_region_mapping_lock(object_chain_region);
269 target_region->object_chain = object_ele;
270 object_ele->object_chain_region = object_chain_region;
271 object_ele->next = object_chain_region->object_chain;
272 object_ele->depth = object_chain_region->depth;
273 object_chain_region->depth++;
274 target_region->alternate_next = object_chain_region->alternate_next;
275 shared_region_mapping_unlock(object_chain_region);
276 return KERN_SUCCESS;
277 }
278
279 /* LP64todo - need 64-bit safe version */
280 kern_return_t
281 shared_region_mapping_create(
282 ipc_port_t text_region,
283 vm_size_t text_size,
284 ipc_port_t data_region,
285 vm_size_t data_size,
286 vm_offset_t region_mappings,
287 vm_offset_t client_base,
288 shared_region_mapping_t *shared_region,
289 vm_offset_t alt_base,
290 vm_offset_t alt_next)
291 {
292 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
293 *shared_region = (shared_region_mapping_t)
294 kalloc(sizeof (struct shared_region_mapping));
295 if(*shared_region == NULL) {
296 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
297 "failure\n"));
298 return KERN_FAILURE;
299 }
300 shared_region_mapping_lock_init((*shared_region));
301 (*shared_region)->text_region = text_region;
302 (*shared_region)->text_size = text_size;
303 (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
304 (*shared_region)->system = cpu_type();
305 (*shared_region)->data_region = data_region;
306 (*shared_region)->data_size = data_size;
307 (*shared_region)->region_mappings = region_mappings;
308 (*shared_region)->client_base = client_base;
309 (*shared_region)->ref_count = 1;
310 (*shared_region)->next = NULL;
311 (*shared_region)->object_chain = NULL;
312 (*shared_region)->self = *shared_region;
313 (*shared_region)->flags = 0;
314 (*shared_region)->depth = 0;
315 (*shared_region)->default_env_list = NULL;
316 (*shared_region)->alternate_base = alt_base;
317 (*shared_region)->alternate_next = alt_next;
318 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
319 *shared_region));
320 return KERN_SUCCESS;
321 }
322
323 /* LP64todo - need 64-bit safe version */
324 kern_return_t
325 shared_region_mapping_info(
326 shared_region_mapping_t shared_region,
327 ipc_port_t *text_region,
328 vm_size_t *text_size,
329 ipc_port_t *data_region,
330 vm_size_t *data_size,
331 vm_offset_t *region_mappings,
332 vm_offset_t *client_base,
333 vm_offset_t *alt_base,
334 vm_offset_t *alt_next,
335 unsigned int *fs_base,
336 unsigned int *system,
337 int *flags,
338 shared_region_mapping_t *next)
339 {
340 shared_region_mapping_lock(shared_region);
341
342 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
343 shared_region));
344 assert(shared_region->ref_count > 0);
345 *text_region = shared_region->text_region;
346 *text_size = shared_region->text_size;
347 *data_region = shared_region->data_region;
348 *data_size = shared_region->data_size;
349 *region_mappings = shared_region->region_mappings;
350 *client_base = shared_region->client_base;
351 *alt_base = shared_region->alternate_base;
352 *alt_next = shared_region->alternate_next;
353 *flags = shared_region->flags;
354 *fs_base = shared_region->fs_base;
355 *system = shared_region->system;
356 *next = shared_region->next;
357
358 shared_region_mapping_unlock(shared_region);
359 }
360
361 kern_return_t
362 shared_region_mapping_ref(
363 shared_region_mapping_t shared_region)
364 {
365 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
366 "ref_count=%d + 1\n",
367 shared_region,
368 shared_region ? shared_region->ref_count : 0));
369 if(shared_region == NULL)
370 return KERN_SUCCESS;
371 assert(shared_region->ref_count > 0);
372 hw_atomic_add(&shared_region->ref_count, 1);
373 return KERN_SUCCESS;
374 }
375
376 static kern_return_t
377 shared_region_mapping_dealloc_lock(
378 shared_region_mapping_t shared_region,
379 int need_sfh_lock,
380 int need_drl_lock)
381 {
382 struct shared_region_task_mappings sm_info;
383 shared_region_mapping_t next = NULL;
384 int ref_count;
385
386 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
387 "(shared_region=%p,%d,%d) ref_count=%d\n",
388 shared_region, need_sfh_lock, need_drl_lock,
389 shared_region ? shared_region->ref_count : 0));
390 while (shared_region) {
391 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
392 "ref_count=%d\n",
393 shared_region, shared_region->ref_count));
394 assert(shared_region->ref_count > 0);
395 if ((ref_count =
396 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
397 shared_region_mapping_lock(shared_region);
398
399 sm_info.text_region = shared_region->text_region;
400 sm_info.text_size = shared_region->text_size;
401 sm_info.data_region = shared_region->data_region;
402 sm_info.data_size = shared_region->data_size;
403 sm_info.region_mappings = shared_region->region_mappings;
404 sm_info.client_base = shared_region->client_base;
405 sm_info.alternate_base = shared_region->alternate_base;
406 sm_info.alternate_next = shared_region->alternate_next;
407 sm_info.flags = shared_region->flags;
408 sm_info.self = (vm_offset_t)shared_region;
409
410 if(shared_region->region_mappings) {
411 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
412 }
413 if(((vm_named_entry_t)
414 (shared_region->text_region->ip_kobject))
415 ->backing.map->pmap) {
416 pmap_remove(((vm_named_entry_t)
417 (shared_region->text_region->ip_kobject))
418 ->backing.map->pmap,
419 sm_info.client_base,
420 sm_info.client_base + sm_info.text_size);
421 }
422 ipc_port_release_send(shared_region->text_region);
423 if(shared_region->data_region)
424 ipc_port_release_send(shared_region->data_region);
425 if (shared_region->object_chain) {
426 next = shared_region->object_chain->object_chain_region;
427 kfree(shared_region->object_chain,
428 sizeof (struct shared_region_object_chain));
429 } else {
430 next = NULL;
431 }
432 shared_region_mapping_unlock(shared_region);
433 SHARED_REGION_DEBUG(
434 ("shared_region_mapping_dealloc_lock(%p): "
435 "freeing\n",
436 shared_region));
437 bzero((void *)shared_region,
438 sizeof (*shared_region)); /* FBDP debug */
439 kfree(shared_region,
440 sizeof (struct shared_region_mapping));
441 shared_region = next;
442 } else {
443 /* Stale indicates that a system region is no */
444 /* longer in the default environment list. */
445 if((ref_count == 1) &&
446 (shared_region->flags & SHARED_REGION_SYSTEM)
447 && !(shared_region->flags & SHARED_REGION_STALE)) {
448 SHARED_REGION_DEBUG(
449 ("shared_region_mapping_dealloc_lock"
450 "(%p): removing stale\n",
451 shared_region));
452 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
453 }
454 break;
455 }
456 }
457 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
458 shared_region));
459 return KERN_SUCCESS;
460 }
461
462 /*
463 * Stub function; always indicates that the lock needs to be taken in the
464 * call to lsf_remove_regions_mappings_lock().
465 */
466 kern_return_t
467 shared_region_mapping_dealloc(
468 shared_region_mapping_t shared_region)
469 {
470 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
471 "(shared_region=%p)\n",
472 shared_region));
473 if (shared_region) {
474 assert(shared_region->ref_count > 0);
475 }
476 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
477 }
478
479 static
480 kern_return_t
481 shared_region_object_create(
482 vm_size_t size,
483 ipc_port_t *object_handle)
484 {
485 vm_named_entry_t user_entry;
486 ipc_port_t user_handle;
487
488 ipc_port_t previous;
489 vm_map_t new_map;
490
491 user_entry = (vm_named_entry_t)
492 kalloc(sizeof (struct vm_named_entry));
493 if(user_entry == NULL) {
494 return KERN_FAILURE;
495 }
496 named_entry_lock_init(user_entry);
497 user_handle = ipc_port_alloc_kernel();
498
499
500 ip_lock(user_handle);
501
502 /* make a sonce right */
503 user_handle->ip_sorights++;
504 ip_reference(user_handle);
505
506 user_handle->ip_destination = IP_NULL;
507 user_handle->ip_receiver_name = MACH_PORT_NULL;
508 user_handle->ip_receiver = ipc_space_kernel;
509
510 /* make a send right */
511 user_handle->ip_mscount++;
512 user_handle->ip_srights++;
513 ip_reference(user_handle);
514
515 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
516 /* nsrequest unlocks user_handle */
517
518 /* Create a named object based on a submap of specified size */
519
520 new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
521 user_entry->backing.map = new_map;
522 user_entry->internal = TRUE;
523 user_entry->is_sub_map = TRUE;
524 user_entry->is_pager = FALSE;
525 user_entry->offset = 0;
526 user_entry->protection = VM_PROT_ALL;
527 user_entry->size = size;
528 user_entry->ref_count = 1;
529
530 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
531 IKOT_NAMED_ENTRY);
532 *object_handle = user_handle;
533 return KERN_SUCCESS;
534 }
535
536 /* called for the non-default, private branch shared region support */
537 /* system default fields for fs_base and system supported are not */
538 /* relevant as the system default flag is not set */
539 kern_return_t
540 shared_file_create_system_region(
541 shared_region_mapping_t *shared_region)
542 {
543 ipc_port_t text_handle;
544 ipc_port_t data_handle;
545 long text_size;
546 long data_size;
547 vm_offset_t mapping_array;
548 kern_return_t kret;
549
550 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
551
552 text_size = 0x10000000;
553 data_size = 0x10000000;
554
555 kret = shared_file_init(&text_handle,
556 text_size, &data_handle, data_size, &mapping_array);
557 if(kret) {
558 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
559 "shared_file_init failed kret=0x%x\n",
560 kret));
561 return kret;
562 }
563 kret = shared_region_mapping_create(text_handle,
564 text_size, data_handle, data_size, mapping_array,
565 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
566 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
567 if(kret) {
568 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
569 "shared_region_mapping_create failed "
570 "kret=0x%x\n",
571 kret));
572 return kret;
573 }
574 (*shared_region)->flags = 0;
575 if(com_mapping_resource) {
576 shared_region_mapping_ref(com_mapping_resource);
577 (*shared_region)->next = com_mapping_resource;
578 }
579
580 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
581 "-> shared_region=%p\n",
582 *shared_region));
583 return KERN_SUCCESS;
584 }
585
586 /*
587 * load a new default for a specified environment into the default share
588 * regions list. If a previous default exists for the envrionment specification
589 * it is returned along with its reference. It is expected that the new
590 * sytem region structure passes a reference.
591 */
592
593 shared_region_mapping_t
594 update_default_shared_region(
595 shared_region_mapping_t new_system_region)
596 {
597 shared_region_mapping_t old_system_region;
598 unsigned int fs_base;
599 unsigned int system;
600
601 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
602 new_system_region));
603 assert(new_system_region->ref_count > 0);
604 fs_base = new_system_region->fs_base;
605 system = new_system_region->system;
606 new_system_region->flags |= SHARED_REGION_SYSTEM;
607 default_regions_list_lock();
608 old_system_region = default_environment_shared_regions;
609
610 if((old_system_region != NULL) &&
611 (old_system_region->fs_base == fs_base) &&
612 (old_system_region->system == system)) {
613 new_system_region->default_env_list =
614 old_system_region->default_env_list;
615 old_system_region->default_env_list = NULL;
616 default_environment_shared_regions = new_system_region;
617 old_system_region->flags |= SHARED_REGION_STALE;
618 default_regions_list_unlock();
619 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
620 "old=%p stale 1\n",
621 new_system_region, old_system_region));
622 assert(old_system_region->ref_count > 0);
623 return old_system_region;
624 }
625 if (old_system_region) {
626 while(old_system_region->default_env_list != NULL) {
627 if((old_system_region->default_env_list->fs_base == fs_base) &&
628 (old_system_region->default_env_list->system == system)) {
629 shared_region_mapping_t tmp_system_region;
630
631 tmp_system_region =
632 old_system_region->default_env_list;
633 new_system_region->default_env_list =
634 tmp_system_region->default_env_list;
635 tmp_system_region->default_env_list = NULL;
636 old_system_region->default_env_list =
637 new_system_region;
638 old_system_region = tmp_system_region;
639 old_system_region->flags |= SHARED_REGION_STALE;
640 default_regions_list_unlock();
641 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
642 ": old=%p stale 2\n",
643 new_system_region,
644 old_system_region));
645 assert(old_system_region->ref_count > 0);
646 return old_system_region;
647 }
648 old_system_region = old_system_region->default_env_list;
649 }
650 }
651 /* If we get here, we are at the end of the system list and we */
652 /* did not find a pre-existing entry */
653 if(old_system_region) {
654 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
655 "adding after old=%p\n",
656 new_system_region, old_system_region));
657 assert(old_system_region->ref_count > 0);
658 old_system_region->default_env_list = new_system_region;
659 } else {
660 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
661 "new default\n",
662 new_system_region));
663 default_environment_shared_regions = new_system_region;
664 }
665 assert(new_system_region->ref_count > 0);
666 default_regions_list_unlock();
667 return NULL;
668 }
669
670 /*
671 * lookup a system_shared_region for the environment specified. If one is
672 * found, it is returned along with a reference against the structure
673 */
674
675 shared_region_mapping_t
676 lookup_default_shared_region(
677 unsigned int fs_base,
678 unsigned int system)
679 {
680 shared_region_mapping_t system_region;
681 default_regions_list_lock();
682 system_region = default_environment_shared_regions;
683
684 SHARED_REGION_DEBUG(("lookup_default_shared_region"
685 "(base=0x%x, system=0x%x)\n",
686 fs_base, system));
687 while(system_region != NULL) {
688 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
689 ": system_region=%p base=0x%x system=0x%x"
690 " ref_count=%d\n",
691 fs_base, system, system_region,
692 system_region->fs_base,
693 system_region->system,
694 system_region->ref_count));
695 assert(system_region->ref_count > 0);
696 if((system_region->fs_base == fs_base) &&
697 (system_region->system == system)) {
698 break;
699 }
700 system_region = system_region->default_env_list;
701 }
702 if(system_region)
703 shared_region_mapping_ref(system_region);
704 default_regions_list_unlock();
705 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
706 system_region));
707 return system_region;
708 }
709
710 /*
711 * remove a system_region default if it appears in the default regions list.
712 * Drop a reference on removal.
713 */
714
715 __private_extern__ void
716 remove_default_shared_region_lock(
717 shared_region_mapping_t system_region,
718 int need_sfh_lock,
719 int need_drl_lock)
720 {
721 shared_region_mapping_t old_system_region;
722
723 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
724 "(system_region=%p, %d, %d)\n",
725 system_region, need_sfh_lock, need_drl_lock));
726 if (need_drl_lock) {
727 default_regions_list_lock();
728 }
729 old_system_region = default_environment_shared_regions;
730
731 if(old_system_region == NULL) {
732 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
733 "-> default_env=NULL\n",
734 system_region));
735 if (need_drl_lock) {
736 default_regions_list_unlock();
737 }
738 return;
739 }
740
741 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
742 "default_env=%p\n",
743 system_region, old_system_region));
744 assert(old_system_region->ref_count > 0);
745 if (old_system_region == system_region) {
746 default_environment_shared_regions
747 = old_system_region->default_env_list;
748 old_system_region->default_env_list = NULL;
749 old_system_region->flags |= SHARED_REGION_STALE;
750 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
751 "old=%p ref_count=%d STALE\n",
752 system_region, old_system_region,
753 old_system_region->ref_count));
754 shared_region_mapping_dealloc_lock(old_system_region,
755 need_sfh_lock,
756 0);
757 if (need_drl_lock) {
758 default_regions_list_unlock();
759 }
760 return;
761 }
762
763 while(old_system_region->default_env_list != NULL) {
764 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
765 "old=%p->default_env=%p\n",
766 system_region, old_system_region,
767 old_system_region->default_env_list));
768 assert(old_system_region->default_env_list->ref_count > 0);
769 if(old_system_region->default_env_list == system_region) {
770 shared_region_mapping_t dead_region;
771 dead_region = old_system_region->default_env_list;
772 old_system_region->default_env_list =
773 dead_region->default_env_list;
774 dead_region->default_env_list = NULL;
775 dead_region->flags |= SHARED_REGION_STALE;
776 SHARED_REGION_DEBUG(
777 ("remove_default_shared_region_lock(%p): "
778 "dead=%p ref_count=%d stale\n",
779 system_region, dead_region,
780 dead_region->ref_count));
781 shared_region_mapping_dealloc_lock(dead_region,
782 need_sfh_lock,
783 0);
784 if (need_drl_lock) {
785 default_regions_list_unlock();
786 }
787 return;
788 }
789 old_system_region = old_system_region->default_env_list;
790 }
791 if (need_drl_lock) {
792 default_regions_list_unlock();
793 }
794 }
795
796 /*
797 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
798 * the only caller. Remove this stub function and the corresponding symbol
799 * export for Merlot.
800 */
801 void
802 remove_default_shared_region(
803 shared_region_mapping_t system_region)
804 {
805 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
806 system_region));
807 if (system_region) {
808 assert(system_region->ref_count > 0);
809 }
810 remove_default_shared_region_lock(system_region, 1, 1);
811 }
812
813 void
814 remove_all_shared_regions(void)
815 {
816 shared_region_mapping_t system_region;
817 shared_region_mapping_t next_system_region;
818
819 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
820 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
821 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
822 default_regions_list_lock();
823 system_region = default_environment_shared_regions;
824
825 if(system_region == NULL) {
826 default_regions_list_unlock();
827 return;
828 }
829
830 while(system_region != NULL) {
831 next_system_region = system_region->default_env_list;
832 system_region->default_env_list = NULL;
833 system_region->flags |= SHARED_REGION_STALE;
834 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
835 "%p ref_count=%d stale\n",
836 system_region, system_region->ref_count));
837 assert(system_region->ref_count > 0);
838 shared_region_mapping_dealloc_lock(system_region, 1, 0);
839 system_region = next_system_region;
840 }
841 default_environment_shared_regions = NULL;
842 default_regions_list_unlock();
843 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
844 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
845 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
846 }
847
848 /* shared_com_boot_time_init initializes the common page shared data and */
849 /* text region. This region is semi independent of the split libs */
850 /* and so its policies have to be handled differently by the code that */
851 /* manipulates the mapping of shared region environments. However, */
852 /* the shared region delivery system supports both */
853 void shared_com_boot_time_init(void); /* forward */
854 void
855 shared_com_boot_time_init(void)
856 {
857 kern_return_t kret;
858 vm_named_entry_t named_entry;
859
860 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
861 if(com_region_handle32) {
862 panic("shared_com_boot_time_init: "
863 "com_region_handle32 already set\n");
864 }
865 if(com_region_handle64) {
866 panic("shared_com_boot_time_init: "
867 "com_region_handle64 already set\n");
868 }
869
870 /* create com page regions, 1 each for 32 and 64-bit code */
871 if((kret = shared_region_object_create(
872 com_region_size,
873 &com_region_handle32))) {
874 panic("shared_com_boot_time_init: "
875 "unable to create 32-bit comm page\n");
876 return;
877 }
878 if((kret = shared_region_object_create(
879 com_region_size,
880 &com_region_handle64))) {
881 panic("shared_com_boot_time_init: "
882 "unable to create 64-bit comm page\n");
883 return;
884 }
885
886 /* now set export the underlying region/map */
887 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
888 com_region_map32 = named_entry->backing.map;
889 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
890 com_region_map64 = named_entry->backing.map;
891
892 /* wrap the com region in its own shared file mapping structure */
893 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
894 kret = shared_region_mapping_create(com_region_handle32,
895 com_region_size, NULL, 0, 0,
896 _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
897 0, 0);
898 if (kret) {
899 panic("shared_region_mapping_create failed for commpage");
900 }
901 }
902
903 void
904 shared_file_boot_time_init(
905 unsigned int fs_base,
906 unsigned int system)
907 {
908 long text_region_size;
909 long data_region_size;
910 shared_region_mapping_t new_system_region;
911 shared_region_mapping_t old_default_env;
912
913 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
914 "(base=0x%x,system=0x%x)\n",
915 fs_base, system));
916 text_region_size = 0x10000000;
917 data_region_size = 0x10000000;
918 shared_file_init(&shared_text_region_handle,
919 text_region_size,
920 &shared_data_region_handle,
921 data_region_size,
922 &shared_file_mapping_array);
923
924 shared_region_mapping_create(shared_text_region_handle,
925 text_region_size,
926 shared_data_region_handle,
927 data_region_size,
928 shared_file_mapping_array,
929 GLOBAL_SHARED_TEXT_SEGMENT,
930 &new_system_region,
931 SHARED_ALTERNATE_LOAD_BASE,
932 SHARED_ALTERNATE_LOAD_BASE);
933
934 new_system_region->fs_base = fs_base;
935 new_system_region->system = system;
936 new_system_region->flags = SHARED_REGION_SYSTEM;
937
938 /* grab an extra reference for the caller */
939 /* remember to grab before call to update */
940 shared_region_mapping_ref(new_system_region);
941 old_default_env = update_default_shared_region(new_system_region);
942 /* hold an extra reference because these are the system */
943 /* shared regions. */
944 if(old_default_env)
945 shared_region_mapping_dealloc(old_default_env);
946 if(com_mapping_resource == NULL) {
947 shared_com_boot_time_init();
948 }
949 shared_region_mapping_ref(com_mapping_resource);
950 new_system_region->next = com_mapping_resource;
951 vm_set_shared_region(current_task(), new_system_region);
952 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
953 fs_base, system));
954 }
955
956
957 /* called at boot time, allocates two regions, each 256 megs in size */
958 /* these regions are later mapped into task spaces, allowing them to */
959 /* share the contents of the regions. shared_file_init is part of */
960 /* a shared_memory_server which not only allocates the backing maps */
961 /* but also coordinates requests for space. */
962
963
964 static kern_return_t
965 shared_file_init(
966 ipc_port_t *text_region_handle,
967 vm_size_t text_region_size,
968 ipc_port_t *data_region_handle,
969 vm_size_t data_region_size,
970 vm_offset_t *file_mapping_array)
971 {
972 shared_file_info_t *sf_head;
973 vm_offset_t table_mapping_address;
974 int data_table_size;
975 int hash_size;
976 kern_return_t kret;
977
978 vm_object_t buf_object;
979 vm_map_entry_t entry;
980 vm_size_t alloced;
981 vm_offset_t b;
982 vm_page_t p;
983
984 SHARED_REGION_DEBUG(("shared_file_init()\n"));
985 /* create text and data maps/regions */
986 kret = shared_region_object_create(
987 text_region_size,
988 text_region_handle);
989 if (kret) {
990 return kret;
991 }
992 kret = shared_region_object_create(
993 data_region_size,
994 data_region_handle);
995 if (kret) {
996 ipc_port_release_send(*text_region_handle);
997 return kret;
998 }
999
1000 data_table_size = data_region_size >> 9;
1001 hash_size = data_region_size >> 14;
1002 table_mapping_address = data_region_size - data_table_size;
1003
1004 if(shared_file_mapping_array == 0) {
1005 vm_map_address_t map_addr;
1006 buf_object = vm_object_allocate(data_table_size);
1007
1008 if(vm_map_find_space(kernel_map, &map_addr,
1009 data_table_size, 0, &entry)
1010 != KERN_SUCCESS) {
1011 panic("shared_file_init: no space");
1012 }
1013 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1014 *file_mapping_array = shared_file_mapping_array;
1015 vm_map_unlock(kernel_map);
1016 entry->object.vm_object = buf_object;
1017 entry->offset = 0;
1018
1019 for (b = *file_mapping_array, alloced = 0;
1020 alloced < (hash_size +
1021 round_page(sizeof(struct sf_mapping)));
1022 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1023 vm_object_lock(buf_object);
1024 p = vm_page_alloc(buf_object, alloced);
1025 if (p == VM_PAGE_NULL) {
1026 panic("shared_file_init: no space");
1027 }
1028 p->busy = FALSE;
1029 vm_object_unlock(buf_object);
1030 pmap_enter(kernel_pmap, b, p->phys_page,
1031 VM_PROT_READ | VM_PROT_WRITE,
1032 ((unsigned int)(p->object->wimg_bits))
1033 & VM_WIMG_MASK,
1034 TRUE);
1035 }
1036
1037
1038 /* initialize loaded file array */
1039 sf_head = (shared_file_info_t *)*file_mapping_array;
1040 sf_head->hash = (queue_head_t *)
1041 (((int)*file_mapping_array) +
1042 sizeof(struct shared_file_info));
1043 sf_head->hash_size = hash_size/sizeof(queue_head_t);
1044 mutex_init(&(sf_head->lock), 0);
1045 sf_head->hash_init = FALSE;
1046
1047
1048 mach_make_memory_entry(kernel_map, &data_table_size,
1049 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1050 NULL);
1051
1052 if (vm_map_wire(kernel_map,
1053 vm_map_trunc_page(*file_mapping_array),
1054 vm_map_round_page(*file_mapping_array +
1055 hash_size +
1056 round_page(sizeof(struct sf_mapping))),
1057 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1058 panic("shared_file_init: No memory for data table");
1059 }
1060
1061 lsf_zone = zinit(sizeof(struct load_file_ele),
1062 data_table_size -
1063 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1064 0, "load_file_server");
1065
1066 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1067 zone_change(lsf_zone, Z_COLLECT, FALSE);
1068 zone_change(lsf_zone, Z_EXPAND, FALSE);
1069 zone_change(lsf_zone, Z_FOREIGN, TRUE);
1070
1071 /* initialize the global default environment lock */
1072 mutex_init(&default_regions_list_lock_data, 0);
1073
1074 } else {
1075 *file_mapping_array = shared_file_mapping_array;
1076 }
1077
1078 kret = vm_map(((vm_named_entry_t)
1079 (*data_region_handle)->ip_kobject)->backing.map,
1080 &table_mapping_address,
1081 data_table_size, 0,
1082 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1083 sfma_handle, 0, FALSE,
1084 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
1085
1086 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1087 return kret;
1088 }
1089
1090 static kern_return_t
1091 shared_file_header_init(
1092 shared_file_info_t *shared_file_header)
1093 {
1094 vm_size_t hash_table_size;
1095 vm_size_t hash_table_offset;
1096 int i;
1097 /* wire hash entry pool only as needed, since we are the only */
1098 /* users, we take a few liberties with the population of our */
1099 /* zone. */
1100 static int allocable_hash_pages;
1101 static vm_offset_t hash_cram_address;
1102
1103
1104 hash_table_size = shared_file_header->hash_size
1105 * sizeof (struct queue_entry);
1106 hash_table_offset = hash_table_size +
1107 round_page(sizeof (struct sf_mapping));
1108 for (i = 0; i < shared_file_header->hash_size; i++)
1109 queue_init(&shared_file_header->hash[i]);
1110
1111 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1112 / PAGE_SIZE);
1113 hash_cram_address = ((vm_offset_t) shared_file_header)
1114 + hash_table_offset;
1115 shared_file_available_hash_ele = 0;
1116
1117 shared_file_header->hash_init = TRUE;
1118
1119 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1120 int cram_pages, cram_size;
1121
1122 cram_pages = allocable_hash_pages > 3 ?
1123 3 : allocable_hash_pages;
1124 cram_size = cram_pages * PAGE_SIZE;
1125 if (vm_map_wire(kernel_map, hash_cram_address,
1126 hash_cram_address + cram_size,
1127 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1128 printf("shared_file_header_init: "
1129 "No memory for data table\n");
1130 return KERN_NO_SPACE;
1131 }
1132 allocable_hash_pages -= cram_pages;
1133 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1134 shared_file_available_hash_ele
1135 += cram_size/sizeof(struct load_file_ele);
1136 hash_cram_address += cram_size;
1137 }
1138
1139 return KERN_SUCCESS;
1140 }
1141
1142
1143 /*
1144 * map_shared_file:
1145 *
1146 * Attempt to map a split library into the shared region. Check if the mappings
1147 * are already in place.
1148 */
1149 kern_return_t
1150 map_shared_file(
1151 int map_cnt,
1152 struct shared_file_mapping_np *mappings,
1153 memory_object_control_t file_control,
1154 memory_object_size_t file_size,
1155 shared_region_task_mappings_t sm_info,
1156 mach_vm_offset_t base_offset,
1157 mach_vm_offset_t *slide_p)
1158 {
1159 vm_object_t file_object;
1160 shared_file_info_t *shared_file_header;
1161 load_struct_t *file_entry;
1162 loaded_mapping_t *file_mapping;
1163 int i;
1164 kern_return_t ret;
1165 mach_vm_offset_t slide;
1166
1167 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1168
1169 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1170
1171 mutex_lock(&shared_file_header->lock);
1172
1173 /* If this is the first call to this routine, take the opportunity */
1174 /* to initialize the hash table which will be used to look-up */
1175 /* mappings based on the file object */
1176
1177 if(shared_file_header->hash_init == FALSE) {
1178 ret = shared_file_header_init(shared_file_header);
1179 if (ret != KERN_SUCCESS) {
1180 mutex_unlock(&shared_file_header->lock);
1181 return KERN_NO_SPACE;
1182 }
1183 }
1184
1185
1186 /* Find the entry in the map associated with the current mapping */
1187 /* of the file object */
1188 file_object = memory_object_control_to_vm_object(file_control);
1189
1190 file_entry = lsf_hash_lookup(shared_file_header->hash,
1191 (void *) file_object,
1192 mappings[0].sfm_file_offset,
1193 shared_file_header->hash_size,
1194 TRUE, TRUE, sm_info);
1195 if (file_entry) {
1196 /* File is loaded, check the load manifest for exact match */
1197 /* we simplify by requiring that the elements be the same */
1198 /* size and in the same order rather than checking for */
1199 /* semantic equivalence. */
1200
1201 i = 0;
1202 file_mapping = file_entry->mappings;
1203 while(file_mapping != NULL) {
1204 if(i>=map_cnt) {
1205 mutex_unlock(&shared_file_header->lock);
1206 return KERN_INVALID_ARGUMENT;
1207 }
1208 if(((mappings[i].sfm_address)
1209 & SHARED_DATA_REGION_MASK) !=
1210 file_mapping->mapping_offset ||
1211 mappings[i].sfm_size != file_mapping->size ||
1212 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1213 mappings[i].sfm_init_prot != file_mapping->protection) {
1214 break;
1215 }
1216 file_mapping = file_mapping->next;
1217 i++;
1218 }
1219 if(i!=map_cnt) {
1220 mutex_unlock(&shared_file_header->lock);
1221 return KERN_INVALID_ARGUMENT;
1222 }
1223
1224 slide = file_entry->base_address - base_offset;
1225 if (slide_p != NULL) {
1226 /*
1227 * File already mapped but at different address,
1228 * and the caller is OK with the sliding.
1229 */
1230 *slide_p = slide;
1231 ret = KERN_SUCCESS;
1232 } else {
1233 /*
1234 * The caller doesn't want any sliding. The file needs
1235 * to be mapped at the requested address or not mapped.
1236 */
1237 if (slide != 0) {
1238 /*
1239 * The file is already mapped but at a different
1240 * address.
1241 * We fail.
1242 * XXX should we attempt to load at
1243 * requested address too ?
1244 */
1245 ret = KERN_FAILURE;
1246 } else {
1247 /*
1248 * The file is already mapped at the correct
1249 * address.
1250 * We're done !
1251 */
1252 ret = KERN_SUCCESS;
1253 }
1254 }
1255 mutex_unlock(&shared_file_header->lock);
1256 return ret;
1257 } else {
1258 /* File is not loaded, lets attempt to load it */
1259 ret = lsf_map(mappings, map_cnt,
1260 (void *)file_control,
1261 file_size,
1262 sm_info,
1263 base_offset,
1264 slide_p);
1265 if(ret == KERN_NO_SPACE) {
1266 shared_region_mapping_t regions;
1267 shared_region_mapping_t system_region;
1268 regions = (shared_region_mapping_t)sm_info->self;
1269 regions->flags |= SHARED_REGION_FULL;
1270 system_region = lookup_default_shared_region(
1271 regions->fs_base, regions->system);
1272 if (system_region == regions) {
1273 shared_region_mapping_t new_system_shared_region;
1274 shared_file_boot_time_init(
1275 regions->fs_base, regions->system);
1276 /* current task must stay with its current */
1277 /* regions, drop count on system_shared_region */
1278 /* and put back our original set */
1279 vm_get_shared_region(current_task(),
1280 &new_system_shared_region);
1281 shared_region_mapping_dealloc_lock(
1282 new_system_shared_region, 0, 1);
1283 vm_set_shared_region(current_task(), regions);
1284 } else if (system_region != NULL) {
1285 shared_region_mapping_dealloc_lock(
1286 system_region, 0, 1);
1287 }
1288 }
1289 mutex_unlock(&shared_file_header->lock);
1290 return ret;
1291 }
1292 }
1293
1294 /*
1295 * shared_region_cleanup:
1296 *
1297 * Deallocates all the mappings in the shared region, except those explicitly
1298 * specified in the "ranges" set of address ranges.
1299 */
1300 kern_return_t
1301 shared_region_cleanup(
1302 unsigned int range_count,
1303 struct shared_region_range_np *ranges,
1304 shared_region_task_mappings_t sm_info)
1305 {
1306 kern_return_t kr;
1307 ipc_port_t region_handle;
1308 vm_named_entry_t region_named_entry;
1309 vm_map_t text_submap, data_submap, submap, next_submap;
1310 unsigned int i_range;
1311 vm_map_offset_t range_start, range_end;
1312 vm_map_offset_t submap_base, submap_end, submap_offset;
1313 vm_map_size_t delete_size;
1314
1315 struct shared_region_range_np tmp_range;
1316 unsigned int sort_index, sorted_index;
1317 vm_map_offset_t sort_min_address;
1318 unsigned int sort_min_index;
1319
1320 /*
1321 * Since we want to deallocate the holes between the "ranges",
1322 * sort the array by increasing addresses.
1323 */
1324 for (sorted_index = 0;
1325 sorted_index < range_count;
1326 sorted_index++) {
1327
1328 /* first remaining entry is our new starting point */
1329 sort_min_index = sorted_index;
1330 sort_min_address = ranges[sort_min_index].srr_address;
1331
1332 /* find the lowest mapping_offset in the remaining entries */
1333 for (sort_index = sorted_index + 1;
1334 sort_index < range_count;
1335 sort_index++) {
1336 if (ranges[sort_index].srr_address < sort_min_address) {
1337 /* lowest address so far... */
1338 sort_min_index = sort_index;
1339 sort_min_address =
1340 ranges[sort_min_index].srr_address;
1341 }
1342 }
1343
1344 if (sort_min_index != sorted_index) {
1345 /* swap entries */
1346 tmp_range = ranges[sort_min_index];
1347 ranges[sort_min_index] = ranges[sorted_index];
1348 ranges[sorted_index] = tmp_range;
1349 }
1350 }
1351
1352 region_handle = (ipc_port_t) sm_info->text_region;
1353 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1354 text_submap = region_named_entry->backing.map;
1355
1356 region_handle = (ipc_port_t) sm_info->data_region;
1357 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1358 data_submap = region_named_entry->backing.map;
1359
1360 submap = text_submap;
1361 next_submap = submap;
1362 submap_base = sm_info->client_base;
1363 submap_offset = 0;
1364 submap_end = submap_base + sm_info->text_size;
1365 for (i_range = 0;
1366 i_range < range_count;
1367 i_range++) {
1368
1369 /* get the next range of addresses to keep */
1370 range_start = ranges[i_range].srr_address;
1371 range_end = range_start + ranges[i_range].srr_size;
1372 /* align them to page boundaries */
1373 range_start = vm_map_trunc_page(range_start);
1374 range_end = vm_map_round_page(range_end);
1375
1376 /* make sure we don't go beyond the submap's boundaries */
1377 if (range_start < submap_base) {
1378 range_start = submap_base;
1379 } else if (range_start >= submap_end) {
1380 range_start = submap_end;
1381 }
1382 if (range_end < submap_base) {
1383 range_end = submap_base;
1384 } else if (range_end >= submap_end) {
1385 range_end = submap_end;
1386 }
1387
1388 if (range_start > submap_base + submap_offset) {
1389 /*
1390 * Deallocate everything between the last offset in the
1391 * submap and the start of this range.
1392 */
1393 delete_size = range_start -
1394 (submap_base + submap_offset);
1395 (void) vm_deallocate(submap,
1396 submap_offset,
1397 delete_size);
1398 } else {
1399 delete_size = 0;
1400 }
1401
1402 /* skip to the end of the range */
1403 submap_offset += delete_size + (range_end - range_start);
1404
1405 if (submap_base + submap_offset >= submap_end) {
1406 /* get to next submap */
1407
1408 if (submap == data_submap) {
1409 /* no other submap after data: done ! */
1410 break;
1411 }
1412
1413 /* get original range again */
1414 range_start = ranges[i_range].srr_address;
1415 range_end = range_start + ranges[i_range].srr_size;
1416 range_start = vm_map_trunc_page(range_start);
1417 range_end = vm_map_round_page(range_end);
1418
1419 if (range_end > submap_end) {
1420 /*
1421 * This last range overlaps with the next
1422 * submap. We need to process it again
1423 * after switching submaps. Otherwise, we'll
1424 * just continue with the next range.
1425 */
1426 i_range--;
1427 }
1428
1429 if (submap == text_submap) {
1430 /*
1431 * Switch to the data submap.
1432 */
1433 submap = data_submap;
1434 submap_offset = 0;
1435 submap_base = sm_info->client_base +
1436 sm_info->text_size;
1437 submap_end = submap_base + sm_info->data_size;
1438 }
1439 }
1440 }
1441
1442 if (submap_base + submap_offset < submap_end) {
1443 /* delete remainder of this submap, from "offset" to the end */
1444 (void) vm_deallocate(submap,
1445 submap_offset,
1446 submap_end - submap_base - submap_offset);
1447 /* if nothing to keep in data submap, delete it all */
1448 if (submap == text_submap) {
1449 submap = data_submap;
1450 submap_offset = 0;
1451 submap_base = sm_info->client_base + sm_info->text_size;
1452 submap_end = submap_base + sm_info->data_size;
1453 (void) vm_deallocate(data_submap,
1454 0,
1455 submap_end - submap_base);
1456 }
1457 }
1458
1459 kr = KERN_SUCCESS;
1460 return kr;
1461 }
1462
1463 /* A hash lookup function for the list of loaded files in */
1464 /* shared_memory_server space. */
1465
1466 static load_struct_t *
1467 lsf_hash_lookup(
1468 queue_head_t *hash_table,
1469 void *file_object,
1470 vm_offset_t recognizableOffset,
1471 int size,
1472 boolean_t regular,
1473 boolean_t alternate,
1474 shared_region_task_mappings_t sm_info)
1475 {
1476 register queue_t bucket;
1477 load_struct_t *entry;
1478 shared_region_mapping_t target_region;
1479 int depth;
1480
1481 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1482 "reg=%d alt=%d sm_info=%p\n",
1483 hash_table, file_object, recognizableOffset, size,
1484 regular, alternate, sm_info));
1485
1486 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1487 for (entry = (load_struct_t *)queue_first(bucket);
1488 !queue_end(bucket, &entry->links);
1489 entry = (load_struct_t *)queue_next(&entry->links)) {
1490
1491 if ((entry->file_object == (int)file_object) &&
1492 (entry->file_offset == recognizableOffset)) {
1493 target_region = (shared_region_mapping_t)sm_info->self;
1494 depth = target_region->depth;
1495 while(target_region) {
1496 if((!(sm_info->self)) ||
1497 ((target_region == entry->regions_instance) &&
1498 (target_region->depth >= entry->depth))) {
1499 if(alternate &&
1500 entry->base_address >= sm_info->alternate_base) {
1501 LSF_DEBUG(("lsf_hash_lookup: "
1502 "alt=%d found entry %p "
1503 "(base=0x%x "
1504 "alt_base=0x%x)\n",
1505 alternate, entry,
1506 entry->base_address,
1507 sm_info->alternate_base));
1508 return entry;
1509 }
1510 if (regular &&
1511 entry->base_address < sm_info->alternate_base) {
1512 LSF_DEBUG(("lsf_hash_lookup: "
1513 "reg=%d found entry %p "
1514 "(base=0x%x "
1515 "alt_base=0x%x)\n",
1516 regular, entry,
1517 entry->base_address,
1518 sm_info->alternate_base));
1519 return entry;
1520 }
1521 }
1522 if(target_region->object_chain) {
1523 target_region = (shared_region_mapping_t)
1524 target_region->object_chain->object_chain_region;
1525 depth = target_region->object_chain->depth;
1526 } else {
1527 target_region = NULL;
1528 }
1529 }
1530 }
1531 }
1532
1533 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1534 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1535 hash_table, file_object, recognizableOffset, size,
1536 regular, alternate, sm_info));
1537 return (load_struct_t *)0;
1538 }
1539
1540 __private_extern__ load_struct_t *
1541 lsf_remove_regions_mappings_lock(
1542 shared_region_mapping_t region,
1543 shared_region_task_mappings_t sm_info,
1544 int need_sfh_lock)
1545 {
1546 int i;
1547 register queue_t bucket;
1548 shared_file_info_t *shared_file_header;
1549 load_struct_t *entry;
1550 load_struct_t *next_entry;
1551
1552 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1553
1554 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1555 "sfh=%p\n",
1556 region, sm_info, shared_file_header));
1557 if (need_sfh_lock)
1558 mutex_lock(&shared_file_header->lock);
1559 if(shared_file_header->hash_init == FALSE) {
1560 if (need_sfh_lock)
1561 mutex_unlock(&shared_file_header->lock);
1562 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1563 "(region=%p,sm_info=%p): not inited\n",
1564 region, sm_info));
1565 return NULL;
1566 }
1567 for(i = 0; i<shared_file_header->hash_size; i++) {
1568 bucket = &shared_file_header->hash[i];
1569 for (entry = (load_struct_t *)queue_first(bucket);
1570 !queue_end(bucket, &entry->links);) {
1571 next_entry = (load_struct_t *)queue_next(&entry->links);
1572 if(region == entry->regions_instance) {
1573 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1574 "entry %p region %p: "
1575 "unloading\n",
1576 entry, region));
1577 lsf_unload((void *)entry->file_object,
1578 entry->base_address, sm_info);
1579 } else {
1580 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1581 "entry %p region %p target region %p: "
1582 "not unloading\n",
1583 entry, entry->regions_instance, region));
1584 }
1585
1586 entry = next_entry;
1587 }
1588 }
1589 if (need_sfh_lock)
1590 mutex_unlock(&shared_file_header->lock);
1591 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1592
1593 return NULL; /* XXX */
1594 }
1595
1596 /*
1597 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1598 * only caller. Remove this stub function and the corresponding symbol
1599 * export for Merlot.
1600 */
1601 load_struct_t *
1602 lsf_remove_regions_mappings(
1603 shared_region_mapping_t region,
1604 shared_region_task_mappings_t sm_info)
1605 {
1606 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
1607 }
1608
1609 /* Removes a map_list, (list of loaded extents) for a file from */
1610 /* the loaded file hash table. */
1611
1612 static load_struct_t *
1613 lsf_hash_delete(
1614 void *file_object,
1615 vm_offset_t base_offset,
1616 shared_region_task_mappings_t sm_info)
1617 {
1618 register queue_t bucket;
1619 shared_file_info_t *shared_file_header;
1620 load_struct_t *entry;
1621
1622 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1623 file_object, base_offset, sm_info));
1624
1625 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1626
1627 bucket = &shared_file_header->hash
1628 [load_file_hash((int)file_object, shared_file_header->hash_size)];
1629
1630 for (entry = (load_struct_t *)queue_first(bucket);
1631 !queue_end(bucket, &entry->links);
1632 entry = (load_struct_t *)queue_next(&entry->links)) {
1633 if((!(sm_info->self)) || ((shared_region_mapping_t)
1634 sm_info->self == entry->regions_instance)) {
1635 if ((entry->file_object == (int) file_object) &&
1636 (entry->base_address == base_offset)) {
1637 queue_remove(bucket, entry,
1638 load_struct_ptr_t, links);
1639 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1640 return entry;
1641 }
1642 }
1643 }
1644
1645 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1646 return (load_struct_t *)0;
1647 }
1648
1649 /* Inserts a new map_list, (list of loaded file extents), into the */
1650 /* server loaded file hash table. */
1651
1652 static void
1653 lsf_hash_insert(
1654 load_struct_t *entry,
1655 shared_region_task_mappings_t sm_info)
1656 {
1657 shared_file_info_t *shared_file_header;
1658
1659 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1660 entry, sm_info, entry->file_object, entry->base_address));
1661
1662 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1663 queue_enter(&shared_file_header->hash
1664 [load_file_hash(entry->file_object,
1665 shared_file_header->hash_size)],
1666 entry, load_struct_ptr_t, links);
1667 }
1668
1669
1670
1671 /*
1672 * lsf_slide:
1673 *
1674 * Look in the shared region, starting from the end, for a place to fit all the
1675 * mappings while respecting their relative offsets.
1676 */
1677 static kern_return_t
1678 lsf_slide(
1679 unsigned int map_cnt,
1680 struct shared_file_mapping_np *mappings_in,
1681 shared_region_task_mappings_t sm_info,
1682 mach_vm_offset_t *base_offset_p)
1683 {
1684 mach_vm_offset_t max_mapping_offset;
1685 int i;
1686 vm_map_entry_t map_entry, prev_entry, next_entry;
1687 mach_vm_offset_t prev_hole_start, prev_hole_end;
1688 mach_vm_offset_t mapping_offset, mapping_end_offset;
1689 mach_vm_offset_t base_offset;
1690 mach_vm_size_t mapping_size;
1691 mach_vm_offset_t wiggle_room, wiggle;
1692 vm_map_t text_map, data_map, map;
1693 vm_named_entry_t region_entry;
1694 ipc_port_t region_handle;
1695 kern_return_t kr;
1696
1697 struct shared_file_mapping_np *mappings, tmp_mapping;
1698 unsigned int sort_index, sorted_index;
1699 vm_map_offset_t sort_min_address;
1700 unsigned int sort_min_index;
1701
1702 /*
1703 * Sort the mappings array, so that we can try and fit them in
1704 * in the right order as we progress along the VM maps.
1705 *
1706 * We can't modify the original array (the original order is
1707 * important when doing lookups of the mappings), so copy it first.
1708 */
1709
1710 kr = kmem_alloc(kernel_map,
1711 (vm_offset_t *) &mappings,
1712 (vm_size_t) (map_cnt * sizeof (mappings[0])));
1713 if (kr != KERN_SUCCESS) {
1714 return KERN_NO_SPACE;
1715 }
1716
1717 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
1718
1719 max_mapping_offset = 0;
1720 for (sorted_index = 0;
1721 sorted_index < map_cnt;
1722 sorted_index++) {
1723
1724 /* first remaining entry is our new starting point */
1725 sort_min_index = sorted_index;
1726 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
1727 SHARED_TEXT_REGION_MASK) +
1728 mappings[sort_min_index].sfm_size);
1729 sort_min_address = mapping_end_offset;
1730 /* compute the highest mapping_offset as well... */
1731 if (mapping_end_offset > max_mapping_offset) {
1732 max_mapping_offset = mapping_end_offset;
1733 }
1734 /* find the lowest mapping_offset in the remaining entries */
1735 for (sort_index = sorted_index + 1;
1736 sort_index < map_cnt;
1737 sort_index++) {
1738
1739 mapping_end_offset =
1740 ((mappings[sort_index].sfm_address &
1741 SHARED_TEXT_REGION_MASK) +
1742 mappings[sort_index].sfm_size);
1743
1744 if (mapping_end_offset < sort_min_address) {
1745 /* lowest mapping_offset so far... */
1746 sort_min_index = sort_index;
1747 sort_min_address = mapping_end_offset;
1748 }
1749 }
1750 if (sort_min_index != sorted_index) {
1751 /* swap entries */
1752 tmp_mapping = mappings[sort_min_index];
1753 mappings[sort_min_index] = mappings[sorted_index];
1754 mappings[sorted_index] = tmp_mapping;
1755 }
1756
1757 }
1758
1759 max_mapping_offset = vm_map_round_page(max_mapping_offset);
1760
1761 /* start from the end of the shared area */
1762 base_offset = sm_info->text_size;
1763
1764 /* can all the mappings fit ? */
1765 if (max_mapping_offset > base_offset) {
1766 kmem_free(kernel_map,
1767 (vm_offset_t) mappings,
1768 map_cnt * sizeof (mappings[0]));
1769 return KERN_FAILURE;
1770 }
1771
1772 /*
1773 * Align the last mapping to the end of the submaps
1774 * and start from there.
1775 */
1776 base_offset -= max_mapping_offset;
1777
1778 region_handle = (ipc_port_t) sm_info->text_region;
1779 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1780 text_map = region_entry->backing.map;
1781
1782 region_handle = (ipc_port_t) sm_info->data_region;
1783 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
1784 data_map = region_entry->backing.map;
1785
1786 vm_map_lock_read(text_map);
1787 vm_map_lock_read(data_map);
1788
1789 start_over:
1790 /*
1791 * At first, we can wiggle all the way from our starting point
1792 * (base_offset) towards the start of the map (0), if needed.
1793 */
1794 wiggle_room = base_offset;
1795
1796 for (i = (signed) map_cnt - 1; i >= 0; i--) {
1797 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
1798 /* copy-on-write mappings are in the data submap */
1799 map = data_map;
1800 } else {
1801 /* other mappings are in the text submap */
1802 map = text_map;
1803 }
1804 /* get the offset within the appropriate submap */
1805 mapping_offset = (mappings[i].sfm_address &
1806 SHARED_TEXT_REGION_MASK);
1807 mapping_size = mappings[i].sfm_size;
1808 mapping_end_offset = mapping_offset + mapping_size;
1809 mapping_offset = vm_map_trunc_page(mapping_offset);
1810 mapping_end_offset = vm_map_round_page(mapping_end_offset);
1811 mapping_size = mapping_end_offset - mapping_offset;
1812
1813 for (;;) {
1814 if (vm_map_lookup_entry(map,
1815 base_offset + mapping_offset,
1816 &map_entry)) {
1817 /*
1818 * The start address for that mapping
1819 * is already mapped: no fit.
1820 * Locate the hole immediately before this map
1821 * entry.
1822 */
1823 prev_hole_end = map_entry->vme_start;
1824 prev_entry = map_entry->vme_prev;
1825 if (prev_entry == vm_map_to_entry(map)) {
1826 /* no previous entry */
1827 prev_hole_start = map->min_offset;
1828 } else {
1829 /* previous entry ends here */
1830 prev_hole_start = prev_entry->vme_end;
1831 }
1832 } else {
1833 /*
1834 * The start address for that mapping is not
1835 * mapped.
1836 * Locate the start and end of the hole
1837 * at that location.
1838 */
1839 /* map_entry is the previous entry */
1840 if (map_entry == vm_map_to_entry(map)) {
1841 /* no previous entry */
1842 prev_hole_start = map->min_offset;
1843 } else {
1844 /* previous entry ends there */
1845 prev_hole_start = map_entry->vme_end;
1846 }
1847 next_entry = map_entry->vme_next;
1848 if (next_entry == vm_map_to_entry(map)) {
1849 /* no next entry */
1850 prev_hole_end = map->max_offset;
1851 } else {
1852 prev_hole_end = next_entry->vme_start;
1853 }
1854 }
1855
1856 if (prev_hole_end <= base_offset + mapping_offset) {
1857 /* hole is to our left: try and wiggle to fit */
1858 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
1859 if (wiggle > base_offset) {
1860 /* we're getting out of the map */
1861 kr = KERN_FAILURE;
1862 goto done;
1863 }
1864 base_offset -= wiggle;
1865 if (wiggle > wiggle_room) {
1866 /* can't wiggle that much: start over */
1867 goto start_over;
1868 }
1869 /* account for the wiggling done */
1870 wiggle_room -= wiggle;
1871 }
1872
1873 if (prev_hole_end >
1874 base_offset + mapping_offset + mapping_size) {
1875 /*
1876 * The hole extends further to the right
1877 * than what we need. Ignore the extra space.
1878 */
1879 prev_hole_end = (base_offset + mapping_offset +
1880 mapping_size);
1881 }
1882
1883 if (prev_hole_end <
1884 base_offset + mapping_offset + mapping_size) {
1885 /*
1886 * The hole is not big enough to establish
1887 * the mapping right there: wiggle towards
1888 * the beginning of the hole so that the end
1889 * of our mapping fits in the hole...
1890 */
1891 wiggle = base_offset + mapping_offset
1892 + mapping_size - prev_hole_end;
1893 if (wiggle > base_offset) {
1894 /* we're getting out of the map */
1895 kr = KERN_FAILURE;
1896 goto done;
1897 }
1898 base_offset -= wiggle;
1899 if (wiggle > wiggle_room) {
1900 /* can't wiggle that much: start over */
1901 goto start_over;
1902 }
1903 /* account for the wiggling done */
1904 wiggle_room -= wiggle;
1905
1906 /* keep searching from this new base */
1907 continue;
1908 }
1909
1910 if (prev_hole_start > base_offset + mapping_offset) {
1911 /* no hole found: keep looking */
1912 continue;
1913 }
1914
1915 /* compute wiggling room at this hole */
1916 wiggle = base_offset + mapping_offset - prev_hole_start;
1917 if (wiggle < wiggle_room) {
1918 /* less wiggle room than before... */
1919 wiggle_room = wiggle;
1920 }
1921
1922 /* found a hole that fits: skip to next mapping */
1923 break;
1924 } /* while we look for a hole */
1925 } /* for each mapping */
1926
1927 *base_offset_p = base_offset;
1928 kr = KERN_SUCCESS;
1929
1930 done:
1931 vm_map_unlock_read(text_map);
1932 vm_map_unlock_read(data_map);
1933
1934 kmem_free(kernel_map,
1935 (vm_offset_t) mappings,
1936 map_cnt * sizeof (mappings[0]));
1937
1938 return kr;
1939 }
1940
1941 /*
1942 * lsf_map:
1943 *
1944 * Attempt to establish the mappings for a split library into the shared region.
1945 */
1946 static kern_return_t
1947 lsf_map(
1948 struct shared_file_mapping_np *mappings,
1949 int map_cnt,
1950 void *file_control,
1951 memory_object_offset_t file_size,
1952 shared_region_task_mappings_t sm_info,
1953 mach_vm_offset_t base_offset,
1954 mach_vm_offset_t *slide_p)
1955 {
1956 load_struct_t *entry;
1957 loaded_mapping_t *file_mapping;
1958 loaded_mapping_t **tptr;
1959 ipc_port_t region_handle;
1960 vm_named_entry_t region_entry;
1961 mach_port_t map_port;
1962 vm_object_t file_object;
1963 kern_return_t kr;
1964 int i;
1965 mach_vm_offset_t original_base_offset;
1966
1967 /* get the VM object from the file's memory object handle */
1968 file_object = memory_object_control_to_vm_object(file_control);
1969
1970 original_base_offset = base_offset;
1971
1972 LSF_DEBUG(("lsf_map"
1973 "(cnt=%d,file=%p,sm_info=%p)"
1974 "\n",
1975 map_cnt, file_object,
1976 sm_info));
1977
1978 restart_after_slide:
1979 /* get a new "load_struct_t" to described the mappings for that file */
1980 entry = (load_struct_t *)zalloc(lsf_zone);
1981 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
1982 LSF_DEBUG(("lsf_map"
1983 "(cnt=%d,file=%p,sm_info=%p) "
1984 "entry=%p\n",
1985 map_cnt, file_object,
1986 sm_info, entry));
1987 if (entry == NULL) {
1988 printf("lsf_map: unable to allocate memory\n");
1989 return KERN_NO_SPACE;
1990 }
1991 shared_file_available_hash_ele--;
1992 entry->file_object = (int)file_object;
1993 entry->mapping_cnt = map_cnt;
1994 entry->mappings = NULL;
1995 entry->links.prev = (queue_entry_t) 0;
1996 entry->links.next = (queue_entry_t) 0;
1997 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
1998 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
1999 entry->file_offset = mappings[0].sfm_file_offset;
2000
2001 /* insert the new file entry in the hash table, for later lookups */
2002 lsf_hash_insert(entry, sm_info);
2003
2004 /* where we should add the next mapping description for that file */
2005 tptr = &(entry->mappings);
2006
2007 entry->base_address = base_offset;
2008
2009
2010 /* establish each requested mapping */
2011 for (i = 0; i < map_cnt; i++) {
2012 mach_vm_offset_t target_address;
2013 mach_vm_offset_t region_mask;
2014
2015 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2016 region_handle = (ipc_port_t)sm_info->data_region;
2017 region_mask = SHARED_DATA_REGION_MASK;
2018 if ((((mappings[i].sfm_address + base_offset)
2019 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2020 (((mappings[i].sfm_address + base_offset +
2021 mappings[i].sfm_size - 1)
2022 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
2023 lsf_unload(file_object,
2024 entry->base_address, sm_info);
2025 return KERN_INVALID_ARGUMENT;
2026 }
2027 } else {
2028 region_mask = SHARED_TEXT_REGION_MASK;
2029 region_handle = (ipc_port_t)sm_info->text_region;
2030 if (((mappings[i].sfm_address + base_offset)
2031 & GLOBAL_SHARED_SEGMENT_MASK) ||
2032 ((mappings[i].sfm_address + base_offset +
2033 mappings[i].sfm_size - 1)
2034 & GLOBAL_SHARED_SEGMENT_MASK)) {
2035 lsf_unload(file_object,
2036 entry->base_address, sm_info);
2037 return KERN_INVALID_ARGUMENT;
2038 }
2039 }
2040 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2041 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2042 (file_size))) {
2043 lsf_unload(file_object, entry->base_address, sm_info);
2044 return KERN_INVALID_ARGUMENT;
2045 }
2046 target_address = entry->base_address +
2047 ((mappings[i].sfm_address) & region_mask);
2048 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2049 map_port = MACH_PORT_NULL;
2050 } else {
2051 map_port = (ipc_port_t) file_object->pager;
2052 }
2053 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2054
2055 if (mach_vm_map(region_entry->backing.map,
2056 &target_address,
2057 vm_map_round_page(mappings[i].sfm_size),
2058 0,
2059 VM_FLAGS_FIXED,
2060 map_port,
2061 mappings[i].sfm_file_offset,
2062 TRUE,
2063 (mappings[i].sfm_init_prot &
2064 (VM_PROT_READ|VM_PROT_EXECUTE)),
2065 (mappings[i].sfm_max_prot &
2066 (VM_PROT_READ|VM_PROT_EXECUTE)),
2067 VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
2068 lsf_unload(file_object, entry->base_address, sm_info);
2069
2070 if (slide_p != NULL) {
2071 /*
2072 * Requested mapping failed but the caller
2073 * is OK with sliding the library in the
2074 * shared region, so let's try and slide it...
2075 */
2076
2077 /* lookup an appropriate spot */
2078 kr = lsf_slide(map_cnt, mappings,
2079 sm_info, &base_offset);
2080 if (kr == KERN_SUCCESS) {
2081 /* try and map it there ... */
2082 entry->base_address = base_offset;
2083 goto restart_after_slide;
2084 }
2085 /* couldn't slide ... */
2086 }
2087
2088 return KERN_FAILURE;
2089 }
2090
2091 /* record this mapping */
2092 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2093 if (file_mapping == NULL) {
2094 lsf_unload(file_object, entry->base_address, sm_info);
2095 printf("lsf_map: unable to allocate memory\n");
2096 return KERN_NO_SPACE;
2097 }
2098 shared_file_available_hash_ele--;
2099 file_mapping->mapping_offset = (mappings[i].sfm_address)
2100 & region_mask;
2101 file_mapping->size = mappings[i].sfm_size;
2102 file_mapping->file_offset = mappings[i].sfm_file_offset;
2103 file_mapping->protection = mappings[i].sfm_init_prot;
2104 file_mapping->next = NULL;
2105 LSF_DEBUG(("lsf_map: file_mapping %p "
2106 "for offset=0x%x size=0x%x\n",
2107 file_mapping, file_mapping->mapping_offset,
2108 file_mapping->size));
2109
2110 /* and link it to the file entry */
2111 *tptr = file_mapping;
2112
2113 /* where to put the next mapping's description */
2114 tptr = &(file_mapping->next);
2115 }
2116
2117 if (slide_p != NULL) {
2118 *slide_p = base_offset - original_base_offset;
2119 }
2120
2121 if (sm_info->flags & SHARED_REGION_STANDALONE) {
2122 /*
2123 * We have a standalone and private shared region, so we
2124 * don't really need to keep the information about each file
2125 * and each mapping. Just deallocate it all.
2126 * XXX we still have the hash table, though...
2127 */
2128 lsf_deallocate(file_object, entry->base_address, sm_info,
2129 FALSE);
2130 }
2131
2132 LSF_DEBUG(("lsf_map: done\n"));
2133 return KERN_SUCCESS;
2134 }
2135
2136
2137 /* finds the file_object extent list in the shared memory hash table */
2138 /* If one is found the associated extents in shared memory are deallocated */
2139 /* and the extent list is freed */
2140
2141 static void
2142 lsf_unload(
2143 void *file_object,
2144 vm_offset_t base_offset,
2145 shared_region_task_mappings_t sm_info)
2146 {
2147 lsf_deallocate(file_object, base_offset, sm_info, TRUE);
2148 }
2149
2150 /*
2151 * lsf_deallocate:
2152 *
2153 * Deallocates all the "shared region" internal data structures describing
2154 * the file and its mappings.
2155 * Also deallocate the actual file mappings if requested ("unload" arg).
2156 */
2157 static void
2158 lsf_deallocate(
2159 void *file_object,
2160 vm_offset_t base_offset,
2161 shared_region_task_mappings_t sm_info,
2162 boolean_t unload)
2163 {
2164 load_struct_t *entry;
2165 loaded_mapping_t *map_ele;
2166 loaded_mapping_t *back_ptr;
2167
2168 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2169 file_object, base_offset, sm_info, unload));
2170 entry = lsf_hash_delete(file_object, base_offset, sm_info);
2171 if(entry) {
2172 map_ele = entry->mappings;
2173 while(map_ele != NULL) {
2174 if (unload) {
2175 ipc_port_t region_handle;
2176 vm_named_entry_t region_entry;
2177
2178 if(map_ele->protection & VM_PROT_COW) {
2179 region_handle = (ipc_port_t)
2180 sm_info->data_region;
2181 } else {
2182 region_handle = (ipc_port_t)
2183 sm_info->text_region;
2184 }
2185 region_entry = (vm_named_entry_t)
2186 region_handle->ip_kobject;
2187
2188 vm_deallocate(region_entry->backing.map,
2189 (entry->base_address +
2190 map_ele->mapping_offset),
2191 map_ele->size);
2192 }
2193 back_ptr = map_ele;
2194 map_ele = map_ele->next;
2195 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2196 "offset 0x%x size 0x%x\n",
2197 back_ptr, back_ptr->mapping_offset,
2198 back_ptr->size));
2199 zfree(lsf_zone, back_ptr);
2200 shared_file_available_hash_ele++;
2201 }
2202 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2203 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2204 zfree(lsf_zone, entry);
2205 shared_file_available_hash_ele++;
2206 }
2207 LSF_DEBUG(("lsf_unload: done\n"));
2208 }
2209
2210 /* integer is from 1 to 100 and represents percent full */
2211 unsigned int
2212 lsf_mapping_pool_gauge(void)
2213 {
2214 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
2215 }