]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
183d7b0c5f2cbae0b6e6e0d50eedc3081f5f9aca
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <debug.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <mach/vm_map.h>
36 #include <machine/cpu_capabilities.h>
37
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
43
44 #include <ipc/ipc_types.h>
45 #include <ipc/ipc_port.h>
46
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_protos.h>
51
52 #include <mach/mach_vm.h>
53 #include <mach/shared_memory_server.h>
54 #include <vm/vm_shared_memory_server.h>
55
56 int shared_region_trace_level = SHARED_REGION_TRACE_ERROR;
57
58 #if DEBUG
59 int lsf_debug = 0;
60 int lsf_alloc_debug = 0;
61 #define LSF_DEBUG(args) \
62 MACRO_BEGIN \
63 if (lsf_debug) { \
64 kprintf args; \
65 } \
66 MACRO_END
67 #define LSF_ALLOC_DEBUG(args) \
68 MACRO_BEGIN \
69 if (lsf_alloc_debug) { \
70 kprintf args; \
71 } \
72 MACRO_END
73 #else /* DEBUG */
74 #define LSF_DEBUG(args)
75 #define LSF_ALLOC_DEBUG(args)
76 #endif /* DEBUG */
77
78 /* forward declarations */
79 static kern_return_t
80 shared_region_object_create(
81 vm_size_t size,
82 ipc_port_t *object_handle);
83
84 static kern_return_t
85 shared_region_mapping_dealloc_lock(
86 shared_region_mapping_t shared_region,
87 int need_sfh_lock,
88 int need_drl_lock);
89
90
91 static kern_return_t
92 shared_file_init(
93 ipc_port_t *text_region_handle,
94 vm_size_t text_region_size,
95 ipc_port_t *data_region_handle,
96 vm_size_t data_region_size,
97 vm_offset_t *file_mapping_array);
98
99 static kern_return_t
100 shared_file_header_init(
101 shared_file_info_t *shared_file_header);
102
103 static load_struct_t *
104 lsf_hash_lookup(
105 queue_head_t *hash_table,
106 void *file_object,
107 vm_offset_t recognizableOffset,
108 int size,
109 boolean_t regular,
110 boolean_t alternate,
111 shared_region_task_mappings_t sm_info);
112
113 static load_struct_t *
114 lsf_hash_delete(
115 load_struct_t *target_entry, /* optional */
116 void *file_object,
117 vm_offset_t base_offset,
118 shared_region_task_mappings_t sm_info);
119
120 static void
121 lsf_hash_insert(
122 load_struct_t *entry,
123 shared_region_task_mappings_t sm_info);
124
125 static kern_return_t
126 lsf_load(
127 vm_offset_t mapped_file,
128 vm_size_t mapped_file_size,
129 vm_offset_t *base_address,
130 sf_mapping_t *mappings,
131 int map_cnt,
132 void *file_object,
133 int flags,
134 shared_region_task_mappings_t sm_info);
135
136 static kern_return_t
137 lsf_slide(
138 unsigned int map_cnt,
139 struct shared_file_mapping_np *mappings,
140 shared_region_task_mappings_t sm_info,
141 mach_vm_offset_t *base_offset_p);
142
143 static kern_return_t
144 lsf_map(
145 struct shared_file_mapping_np *mappings,
146 int map_cnt,
147 void *file_control,
148 memory_object_size_t file_size,
149 shared_region_task_mappings_t sm_info,
150 mach_vm_offset_t base_offset,
151 mach_vm_offset_t *slide_p);
152
153 static void
154 lsf_unload(
155 void *file_object,
156 vm_offset_t base_offset,
157 shared_region_task_mappings_t sm_info);
158
159 static void
160 lsf_deallocate(
161 load_struct_t *target_entry, /* optional */
162 void *file_object,
163 vm_offset_t base_offset,
164 shared_region_task_mappings_t sm_info,
165 boolean_t unload);
166
167
168 #define load_file_hash(file_object, size) \
169 ((((natural_t)file_object) & 0xffffff) % size)
170
171 /* Implementation */
172 vm_offset_t shared_file_mapping_array = 0;
173
174 shared_region_mapping_t default_environment_shared_regions = NULL;
175 static decl_mutex_data(,default_regions_list_lock_data)
176
177 #define default_regions_list_lock() \
178 mutex_lock(&default_regions_list_lock_data)
179 #define default_regions_list_lock_try() \
180 mutex_try(&default_regions_list_lock_data)
181 #define default_regions_list_unlock() \
182 mutex_unlock(&default_regions_list_lock_data)
183
184
185 ipc_port_t sfma_handle = NULL;
186 zone_t lsf_zone;
187
188 int shared_file_available_hash_ele;
189
190 /* com region support */
191 ipc_port_t com_region_handle32 = NULL;
192 ipc_port_t com_region_handle64 = NULL;
193 vm_map_t com_region_map32 = NULL;
194 vm_map_t com_region_map64 = NULL;
195 vm_size_t com_region_size32 = _COMM_PAGE32_AREA_LENGTH;
196 vm_size_t com_region_size64 = _COMM_PAGE64_AREA_LENGTH;
197 shared_region_mapping_t com_mapping_resource = NULL;
198
199
200 #if DEBUG
201 int shared_region_debug = 0;
202 #endif /* DEBUG */
203
204
205 kern_return_t
206 vm_get_shared_region(
207 task_t task,
208 shared_region_mapping_t *shared_region)
209 {
210 *shared_region = (shared_region_mapping_t) task->system_shared_region;
211 if (*shared_region) {
212 assert((*shared_region)->ref_count > 0);
213 }
214 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
215 task, *shared_region));
216 return KERN_SUCCESS;
217 }
218
219 kern_return_t
220 vm_set_shared_region(
221 task_t task,
222 shared_region_mapping_t shared_region)
223 {
224 shared_region_mapping_t old_region;
225
226 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
227 "shared_region=%p[%x,%x,%x])\n",
228 task, shared_region,
229 shared_region ? shared_region->fs_base : 0,
230 shared_region ? shared_region->system : 0,
231 shared_region ? shared_region->flags : 0));
232 if (shared_region) {
233 assert(shared_region->ref_count > 0);
234 }
235
236 old_region = task->system_shared_region;
237 SHARED_REGION_TRACE(
238 SHARED_REGION_TRACE_INFO,
239 ("shared_region: %p set_region(task=%p)"
240 "old=%p[%x,%x,%x], new=%p[%x,%x,%x]\n",
241 current_thread(), task,
242 old_region,
243 old_region ? old_region->fs_base : 0,
244 old_region ? old_region->system : 0,
245 old_region ? old_region->flags : 0,
246 shared_region,
247 shared_region ? shared_region->fs_base : 0,
248 shared_region ? shared_region->system : 0,
249 shared_region ? shared_region->flags : 0));
250
251 task->system_shared_region = shared_region;
252 return KERN_SUCCESS;
253 }
254
255 /*
256 * shared_region_object_chain_detach:
257 *
258 * Mark the shared region as being detached or standalone. This means
259 * that we won't keep track of which file is mapped and how, for this shared
260 * region. And we don't have a "shadow" shared region.
261 * This is used when we clone a private shared region and we intend to remove
262 * some mappings from it. It won't need to maintain mappings info because it's
263 * now private. It can't have a "shadow" shared region because we don't want
264 * to see the shadow of the mappings we're about to remove.
265 */
266 void
267 shared_region_object_chain_detached(
268 shared_region_mapping_t target_region)
269 {
270 shared_region_mapping_lock(target_region);
271 target_region->flags |= SHARED_REGION_STANDALONE;
272 shared_region_mapping_unlock(target_region);
273 }
274
275 /*
276 * shared_region_object_chain_attach:
277 *
278 * Link "target_region" to "object_chain_region". "object_chain_region"
279 * is treated as a shadow of "target_region" for the purpose of looking up
280 * mappings. Since the "target_region" preserves all the mappings of the
281 * older "object_chain_region", we won't duplicate all the mappings info and
282 * we'll just lookup the next region in the "object_chain" if we can't find
283 * what we're looking for in the "target_region". See lsf_hash_lookup().
284 */
285 kern_return_t
286 shared_region_object_chain_attach(
287 shared_region_mapping_t target_region,
288 shared_region_mapping_t object_chain_region)
289 {
290 shared_region_object_chain_t object_ele;
291
292 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
293 "target_region=%p, object_chain_region=%p\n",
294 target_region, object_chain_region));
295 assert(target_region->ref_count > 0);
296 assert(object_chain_region->ref_count > 0);
297 if(target_region->object_chain)
298 return KERN_FAILURE;
299 object_ele = (shared_region_object_chain_t)
300 kalloc(sizeof (struct shared_region_object_chain));
301 shared_region_mapping_lock(object_chain_region);
302 target_region->object_chain = object_ele;
303 object_ele->object_chain_region = object_chain_region;
304 object_ele->next = object_chain_region->object_chain;
305 object_ele->depth = object_chain_region->depth;
306 object_chain_region->depth++;
307 target_region->alternate_next = object_chain_region->alternate_next;
308 shared_region_mapping_unlock(object_chain_region);
309 return KERN_SUCCESS;
310 }
311
312 /* LP64todo - need 64-bit safe version */
313 kern_return_t
314 shared_region_mapping_create(
315 ipc_port_t text_region,
316 vm_size_t text_size,
317 ipc_port_t data_region,
318 vm_size_t data_size,
319 vm_offset_t region_mappings,
320 vm_offset_t client_base,
321 shared_region_mapping_t *shared_region,
322 vm_offset_t alt_base,
323 vm_offset_t alt_next,
324 int fs_base,
325 int system)
326 {
327 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
328 *shared_region = (shared_region_mapping_t)
329 kalloc(sizeof (struct shared_region_mapping));
330 if(*shared_region == NULL) {
331 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
332 "failure\n"));
333 return KERN_FAILURE;
334 }
335 shared_region_mapping_lock_init((*shared_region));
336 (*shared_region)->text_region = text_region;
337 (*shared_region)->text_size = text_size;
338 (*shared_region)->fs_base = fs_base;
339 (*shared_region)->system = system;
340 (*shared_region)->data_region = data_region;
341 (*shared_region)->data_size = data_size;
342 (*shared_region)->region_mappings = region_mappings;
343 (*shared_region)->client_base = client_base;
344 (*shared_region)->ref_count = 1;
345 (*shared_region)->next = NULL;
346 (*shared_region)->object_chain = NULL;
347 (*shared_region)->self = *shared_region;
348 (*shared_region)->flags = 0;
349 (*shared_region)->depth = 0;
350 (*shared_region)->default_env_list = NULL;
351 (*shared_region)->alternate_base = alt_base;
352 (*shared_region)->alternate_next = alt_next;
353 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
354 *shared_region));
355 return KERN_SUCCESS;
356 }
357
358 /* LP64todo - need 64-bit safe version */
359 kern_return_t
360 shared_region_mapping_info(
361 shared_region_mapping_t shared_region,
362 ipc_port_t *text_region,
363 vm_size_t *text_size,
364 ipc_port_t *data_region,
365 vm_size_t *data_size,
366 vm_offset_t *region_mappings,
367 vm_offset_t *client_base,
368 vm_offset_t *alt_base,
369 vm_offset_t *alt_next,
370 unsigned int *fs_base,
371 unsigned int *system,
372 int *flags,
373 shared_region_mapping_t *next)
374 {
375 shared_region_mapping_lock(shared_region);
376
377 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
378 shared_region));
379 assert(shared_region->ref_count > 0);
380 *text_region = shared_region->text_region;
381 *text_size = shared_region->text_size;
382 *data_region = shared_region->data_region;
383 *data_size = shared_region->data_size;
384 *region_mappings = shared_region->region_mappings;
385 *client_base = shared_region->client_base;
386 *alt_base = shared_region->alternate_base;
387 *alt_next = shared_region->alternate_next;
388 *flags = shared_region->flags;
389 *fs_base = shared_region->fs_base;
390 *system = shared_region->system;
391 *next = shared_region->next;
392
393 shared_region_mapping_unlock(shared_region);
394
395 return KERN_SUCCESS;
396 }
397
398 /* LP64todo - need 64-bit safe version */
399 kern_return_t
400 shared_region_mapping_set_alt_next(
401 shared_region_mapping_t shared_region,
402 vm_offset_t alt_next)
403 {
404 SHARED_REGION_DEBUG(("shared_region_mapping_set_alt_next"
405 "(shared_region=%p, alt_next=0%x)\n",
406 shared_region, alt_next));
407 assert(shared_region->ref_count > 0);
408 shared_region->alternate_next = alt_next;
409 return KERN_SUCCESS;
410 }
411
412 kern_return_t
413 shared_region_mapping_ref(
414 shared_region_mapping_t shared_region)
415 {
416 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
417 "ref_count=%d + 1\n",
418 shared_region,
419 shared_region ? shared_region->ref_count : 0));
420 if(shared_region == NULL)
421 return KERN_SUCCESS;
422 assert(shared_region->ref_count > 0);
423 hw_atomic_add(&shared_region->ref_count, 1);
424 return KERN_SUCCESS;
425 }
426
427 static kern_return_t
428 shared_region_mapping_dealloc_lock(
429 shared_region_mapping_t shared_region,
430 int need_sfh_lock,
431 int need_drl_lock)
432 {
433 struct shared_region_task_mappings sm_info;
434 shared_region_mapping_t next = NULL;
435 unsigned int ref_count;
436
437 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
438 "(shared_region=%p,%d,%d) ref_count=%d\n",
439 shared_region, need_sfh_lock, need_drl_lock,
440 shared_region ? shared_region->ref_count : 0));
441 while (shared_region) {
442 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
443 "ref_count=%d\n",
444 shared_region, shared_region->ref_count));
445 assert(shared_region->ref_count > 0);
446 if ((ref_count =
447 hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
448 shared_region_mapping_lock(shared_region);
449
450 sm_info.text_region = shared_region->text_region;
451 sm_info.text_size = shared_region->text_size;
452 sm_info.data_region = shared_region->data_region;
453 sm_info.data_size = shared_region->data_size;
454 sm_info.region_mappings = shared_region->region_mappings;
455 sm_info.client_base = shared_region->client_base;
456 sm_info.alternate_base = shared_region->alternate_base;
457 sm_info.alternate_next = shared_region->alternate_next;
458 sm_info.flags = shared_region->flags;
459 sm_info.self = (vm_offset_t)shared_region;
460
461 if(shared_region->region_mappings) {
462 lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
463 }
464 if(((vm_named_entry_t)
465 (shared_region->text_region->ip_kobject))
466 ->backing.map->pmap) {
467 pmap_remove(((vm_named_entry_t)
468 (shared_region->text_region->ip_kobject))
469 ->backing.map->pmap,
470 sm_info.client_base,
471 sm_info.client_base + sm_info.text_size);
472 }
473 ipc_port_release_send(shared_region->text_region);
474 if(shared_region->data_region)
475 ipc_port_release_send(shared_region->data_region);
476 if (shared_region->object_chain) {
477 next = shared_region->object_chain->object_chain_region;
478 kfree(shared_region->object_chain,
479 sizeof (struct shared_region_object_chain));
480 } else {
481 next = NULL;
482 }
483 shared_region_mapping_unlock(shared_region);
484 SHARED_REGION_DEBUG(
485 ("shared_region_mapping_dealloc_lock(%p): "
486 "freeing\n",
487 shared_region));
488 bzero((void *)shared_region,
489 sizeof (*shared_region)); /* FBDP debug */
490 kfree(shared_region,
491 sizeof (struct shared_region_mapping));
492 shared_region = next;
493 } else {
494 /* Stale indicates that a system region is no */
495 /* longer in the default environment list. */
496 if((ref_count == 1) &&
497 (shared_region->flags & SHARED_REGION_SYSTEM)
498 && !(shared_region->flags & SHARED_REGION_STALE)) {
499 SHARED_REGION_DEBUG(
500 ("shared_region_mapping_dealloc_lock"
501 "(%p): removing stale\n",
502 shared_region));
503 remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
504 }
505 break;
506 }
507 }
508 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
509 shared_region));
510 return KERN_SUCCESS;
511 }
512
513 /*
514 * Stub function; always indicates that the lock needs to be taken in the
515 * call to lsf_remove_regions_mappings_lock().
516 */
517 kern_return_t
518 shared_region_mapping_dealloc(
519 shared_region_mapping_t shared_region)
520 {
521 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
522 "(shared_region=%p)\n",
523 shared_region));
524 if (shared_region) {
525 assert(shared_region->ref_count > 0);
526 }
527 return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
528 }
529
530 static
531 kern_return_t
532 shared_region_object_create(
533 vm_size_t size,
534 ipc_port_t *object_handle)
535 {
536 vm_named_entry_t user_entry;
537 ipc_port_t user_handle;
538
539 ipc_port_t previous;
540 vm_map_t new_map;
541
542 user_entry = (vm_named_entry_t)
543 kalloc(sizeof (struct vm_named_entry));
544 if(user_entry == NULL) {
545 return KERN_FAILURE;
546 }
547 named_entry_lock_init(user_entry);
548 user_handle = ipc_port_alloc_kernel();
549
550
551 ip_lock(user_handle);
552
553 /* make a sonce right */
554 user_handle->ip_sorights++;
555 ip_reference(user_handle);
556
557 user_handle->ip_destination = IP_NULL;
558 user_handle->ip_receiver_name = MACH_PORT_NULL;
559 user_handle->ip_receiver = ipc_space_kernel;
560
561 /* make a send right */
562 user_handle->ip_mscount++;
563 user_handle->ip_srights++;
564 ip_reference(user_handle);
565
566 ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
567 /* nsrequest unlocks user_handle */
568
569 /* Create a named object based on a submap of specified size */
570
571 new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE);
572 user_entry->backing.map = new_map;
573 user_entry->internal = TRUE;
574 user_entry->is_sub_map = TRUE;
575 user_entry->is_pager = FALSE;
576 user_entry->offset = 0;
577 user_entry->protection = VM_PROT_ALL;
578 user_entry->size = size;
579 user_entry->ref_count = 1;
580
581 ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
582 IKOT_NAMED_ENTRY);
583 *object_handle = user_handle;
584 return KERN_SUCCESS;
585 }
586
587 /* called for the non-default, private branch shared region support */
588 /* system default fields for fs_base and system supported are not */
589 /* relevant as the system default flag is not set */
590 kern_return_t
591 shared_file_create_system_region(
592 shared_region_mapping_t *shared_region,
593 int fs_base,
594 int system)
595 {
596 ipc_port_t text_handle;
597 ipc_port_t data_handle;
598 long text_size;
599 long data_size;
600 vm_offset_t mapping_array;
601 kern_return_t kret;
602
603 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
604
605 text_size = 0x10000000;
606 data_size = 0x10000000;
607
608 kret = shared_file_init(&text_handle,
609 text_size, &data_handle, data_size, &mapping_array);
610 if(kret) {
611 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
612 "shared_file_init failed kret=0x%x\n",
613 kret));
614 return kret;
615 }
616 kret = shared_region_mapping_create(text_handle, text_size,
617 data_handle, data_size,
618 mapping_array,
619 GLOBAL_SHARED_TEXT_SEGMENT,
620 shared_region,
621 SHARED_ALTERNATE_LOAD_BASE,
622 SHARED_ALTERNATE_LOAD_BASE,
623 fs_base,
624 system);
625 if(kret) {
626 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
627 "shared_region_mapping_create failed "
628 "kret=0x%x\n",
629 kret));
630 return kret;
631 }
632 (*shared_region)->flags = 0;
633 if(com_mapping_resource) {
634 shared_region_mapping_ref(com_mapping_resource);
635 (*shared_region)->next = com_mapping_resource;
636 }
637
638 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
639 "-> shared_region=%p\n",
640 *shared_region));
641 return KERN_SUCCESS;
642 }
643
644 /*
645 * load a new default for a specified environment into the default share
646 * regions list. If a previous default exists for the envrionment specification
647 * it is returned along with its reference. It is expected that the new
648 * sytem region structure passes a reference.
649 */
650
651 shared_region_mapping_t
652 update_default_shared_region(
653 shared_region_mapping_t new_system_region)
654 {
655 shared_region_mapping_t old_system_region;
656 unsigned int fs_base;
657 unsigned int system;
658
659 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
660 new_system_region));
661 assert(new_system_region->ref_count > 0);
662 fs_base = new_system_region->fs_base;
663 system = new_system_region->system;
664 new_system_region->flags |= SHARED_REGION_SYSTEM;
665 default_regions_list_lock();
666 old_system_region = default_environment_shared_regions;
667
668 if((old_system_region != NULL) &&
669 (old_system_region->fs_base == fs_base) &&
670 (old_system_region->system == system)) {
671 new_system_region->default_env_list =
672 old_system_region->default_env_list;
673 old_system_region->default_env_list = NULL;
674 default_environment_shared_regions = new_system_region;
675 old_system_region->flags |= SHARED_REGION_STALE;
676 default_regions_list_unlock();
677 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
678 "old=%p stale 1\n",
679 new_system_region, old_system_region));
680 assert(old_system_region->ref_count > 0);
681 return old_system_region;
682 }
683 if (old_system_region) {
684 while(old_system_region->default_env_list != NULL) {
685 if((old_system_region->default_env_list->fs_base == fs_base) &&
686 (old_system_region->default_env_list->system == system)) {
687 shared_region_mapping_t tmp_system_region;
688
689 tmp_system_region =
690 old_system_region->default_env_list;
691 new_system_region->default_env_list =
692 tmp_system_region->default_env_list;
693 tmp_system_region->default_env_list = NULL;
694 old_system_region->default_env_list =
695 new_system_region;
696 old_system_region = tmp_system_region;
697 old_system_region->flags |= SHARED_REGION_STALE;
698 default_regions_list_unlock();
699 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
700 ": old=%p stale 2\n",
701 new_system_region,
702 old_system_region));
703 assert(old_system_region->ref_count > 0);
704 return old_system_region;
705 }
706 old_system_region = old_system_region->default_env_list;
707 }
708 }
709 /* If we get here, we are at the end of the system list and we */
710 /* did not find a pre-existing entry */
711 if(old_system_region) {
712 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
713 "adding after old=%p\n",
714 new_system_region, old_system_region));
715 assert(old_system_region->ref_count > 0);
716 old_system_region->default_env_list = new_system_region;
717 } else {
718 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
719 "new default\n",
720 new_system_region));
721 default_environment_shared_regions = new_system_region;
722 }
723 assert(new_system_region->ref_count > 0);
724 default_regions_list_unlock();
725 return NULL;
726 }
727
728 /*
729 * lookup a system_shared_region for the environment specified. If one is
730 * found, it is returned along with a reference against the structure
731 */
732
733 shared_region_mapping_t
734 lookup_default_shared_region(
735 unsigned int fs_base,
736 unsigned int system)
737 {
738 shared_region_mapping_t system_region;
739 default_regions_list_lock();
740 system_region = default_environment_shared_regions;
741
742 SHARED_REGION_DEBUG(("lookup_default_shared_region"
743 "(base=0x%x, system=0x%x)\n",
744 fs_base, system));
745 while(system_region != NULL) {
746 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
747 ": system_region=%p base=0x%x system=0x%x"
748 " ref_count=%d\n",
749 fs_base, system, system_region,
750 system_region->fs_base,
751 system_region->system,
752 system_region->ref_count));
753 assert(system_region->ref_count > 0);
754 if((system_region->fs_base == fs_base) &&
755 (system_region->system == system)) {
756 break;
757 }
758 system_region = system_region->default_env_list;
759 }
760 if(system_region)
761 shared_region_mapping_ref(system_region);
762 default_regions_list_unlock();
763 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
764 system_region));
765 return system_region;
766 }
767
768 /*
769 * remove a system_region default if it appears in the default regions list.
770 * Drop a reference on removal.
771 */
772
773 __private_extern__ void
774 remove_default_shared_region_lock(
775 shared_region_mapping_t system_region,
776 int need_sfh_lock,
777 int need_drl_lock)
778 {
779 shared_region_mapping_t old_system_region;
780
781 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
782 "(system_region=%p, %d, %d)\n",
783 system_region, need_sfh_lock, need_drl_lock));
784 if (need_drl_lock) {
785 default_regions_list_lock();
786 }
787 old_system_region = default_environment_shared_regions;
788
789 if(old_system_region == NULL) {
790 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
791 "-> default_env=NULL\n",
792 system_region));
793 if (need_drl_lock) {
794 default_regions_list_unlock();
795 }
796 return;
797 }
798
799 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
800 "default_env=%p\n",
801 system_region, old_system_region));
802 assert(old_system_region->ref_count > 0);
803 if (old_system_region == system_region) {
804 default_environment_shared_regions
805 = old_system_region->default_env_list;
806 old_system_region->default_env_list = NULL;
807 old_system_region->flags |= SHARED_REGION_STALE;
808 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
809 "old=%p ref_count=%d STALE\n",
810 system_region, old_system_region,
811 old_system_region->ref_count));
812 shared_region_mapping_dealloc_lock(old_system_region,
813 need_sfh_lock,
814 0);
815 if (need_drl_lock) {
816 default_regions_list_unlock();
817 }
818 return;
819 }
820
821 while(old_system_region->default_env_list != NULL) {
822 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
823 "old=%p->default_env=%p\n",
824 system_region, old_system_region,
825 old_system_region->default_env_list));
826 assert(old_system_region->default_env_list->ref_count > 0);
827 if(old_system_region->default_env_list == system_region) {
828 shared_region_mapping_t dead_region;
829 dead_region = old_system_region->default_env_list;
830 old_system_region->default_env_list =
831 dead_region->default_env_list;
832 dead_region->default_env_list = NULL;
833 dead_region->flags |= SHARED_REGION_STALE;
834 SHARED_REGION_DEBUG(
835 ("remove_default_shared_region_lock(%p): "
836 "dead=%p ref_count=%d stale\n",
837 system_region, dead_region,
838 dead_region->ref_count));
839 shared_region_mapping_dealloc_lock(dead_region,
840 need_sfh_lock,
841 0);
842 if (need_drl_lock) {
843 default_regions_list_unlock();
844 }
845 return;
846 }
847 old_system_region = old_system_region->default_env_list;
848 }
849 if (need_drl_lock) {
850 default_regions_list_unlock();
851 }
852 }
853
854 /*
855 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
856 * the only caller. Remove this stub function and the corresponding symbol
857 * export for Merlot.
858 */
859 void
860 remove_default_shared_region(
861 shared_region_mapping_t system_region)
862 {
863 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
864 system_region));
865 if (system_region) {
866 assert(system_region->ref_count > 0);
867 }
868 remove_default_shared_region_lock(system_region, 1, 1);
869 }
870
871 void
872 remove_all_shared_regions(void)
873 {
874 shared_region_mapping_t system_region;
875 shared_region_mapping_t next_system_region;
876
877 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
878 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
879 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
880 default_regions_list_lock();
881 system_region = default_environment_shared_regions;
882
883 if(system_region == NULL) {
884 default_regions_list_unlock();
885 return;
886 }
887
888 while(system_region != NULL) {
889 next_system_region = system_region->default_env_list;
890 system_region->default_env_list = NULL;
891 system_region->flags |= SHARED_REGION_STALE;
892 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
893 "%p ref_count=%d stale\n",
894 system_region, system_region->ref_count));
895 assert(system_region->ref_count > 0);
896 shared_region_mapping_dealloc_lock(system_region, 1, 0);
897 system_region = next_system_region;
898 }
899 default_environment_shared_regions = NULL;
900 default_regions_list_unlock();
901 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
902 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
903 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
904 }
905
906 /* shared_com_boot_time_init initializes the common page shared data and */
907 /* text region. This region is semi independent of the split libs */
908 /* and so its policies have to be handled differently by the code that */
909 /* manipulates the mapping of shared region environments. However, */
910 /* the shared region delivery system supports both */
911 void shared_com_boot_time_init(void); /* forward */
912 void
913 shared_com_boot_time_init(void)
914 {
915 kern_return_t kret;
916 vm_named_entry_t named_entry;
917
918 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
919 if(com_region_handle32) {
920 panic("shared_com_boot_time_init: "
921 "com_region_handle32 already set\n");
922 }
923 if(com_region_handle64) {
924 panic("shared_com_boot_time_init: "
925 "com_region_handle64 already set\n");
926 }
927
928 /* create com page regions, 1 each for 32 and 64-bit code */
929 if((kret = shared_region_object_create(
930 com_region_size32,
931 &com_region_handle32))) {
932 panic("shared_com_boot_time_init: "
933 "unable to create 32-bit comm page\n");
934 return;
935 }
936 if((kret = shared_region_object_create(
937 com_region_size64,
938 &com_region_handle64))) {
939 panic("shared_com_boot_time_init: "
940 "unable to create 64-bit comm page\n");
941 return;
942 }
943
944 /* now set export the underlying region/map */
945 named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
946 com_region_map32 = named_entry->backing.map;
947 named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
948 com_region_map64 = named_entry->backing.map;
949
950 /* wrap the com region in its own shared file mapping structure */
951 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
952 kret = shared_region_mapping_create(com_region_handle32,
953 com_region_size32,
954 NULL, 0, 0,
955 _COMM_PAGE_BASE_ADDRESS,
956 &com_mapping_resource,
957 0, 0,
958 ENV_DEFAULT_ROOT, cpu_type());
959 if (kret) {
960 panic("shared_region_mapping_create failed for commpage");
961 }
962 }
963
964 void
965 shared_file_boot_time_init(
966 unsigned int fs_base,
967 unsigned int system)
968 {
969 mach_port_t text_region_handle;
970 mach_port_t data_region_handle;
971 long text_region_size;
972 long data_region_size;
973 shared_region_mapping_t new_system_region;
974 shared_region_mapping_t old_default_env;
975
976 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
977 "(base=0x%x,system=0x%x)\n",
978 fs_base, system));
979 text_region_size = 0x10000000;
980 data_region_size = 0x10000000;
981 shared_file_init(&text_region_handle,
982 text_region_size,
983 &data_region_handle,
984 data_region_size,
985 &shared_file_mapping_array);
986
987 shared_region_mapping_create(text_region_handle,
988 text_region_size,
989 data_region_handle,
990 data_region_size,
991 shared_file_mapping_array,
992 GLOBAL_SHARED_TEXT_SEGMENT,
993 &new_system_region,
994 SHARED_ALTERNATE_LOAD_BASE,
995 SHARED_ALTERNATE_LOAD_BASE,
996 fs_base, system);
997
998 new_system_region->flags = SHARED_REGION_SYSTEM;
999
1000 /* grab an extra reference for the caller */
1001 /* remember to grab before call to update */
1002 shared_region_mapping_ref(new_system_region);
1003 old_default_env = update_default_shared_region(new_system_region);
1004 /* hold an extra reference because these are the system */
1005 /* shared regions. */
1006 if(old_default_env)
1007 shared_region_mapping_dealloc(old_default_env);
1008 if(com_mapping_resource == NULL) {
1009 shared_com_boot_time_init();
1010 }
1011 shared_region_mapping_ref(com_mapping_resource);
1012 new_system_region->next = com_mapping_resource;
1013 vm_set_shared_region(current_task(), new_system_region);
1014 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
1015 fs_base, system));
1016 }
1017
1018
1019 /* called at boot time, allocates two regions, each 256 megs in size */
1020 /* these regions are later mapped into task spaces, allowing them to */
1021 /* share the contents of the regions. shared_file_init is part of */
1022 /* a shared_memory_server which not only allocates the backing maps */
1023 /* but also coordinates requests for space. */
1024
1025
1026 static kern_return_t
1027 shared_file_init(
1028 ipc_port_t *text_region_handle,
1029 vm_size_t text_region_size,
1030 ipc_port_t *data_region_handle,
1031 vm_size_t data_region_size,
1032 vm_offset_t *file_mapping_array)
1033 {
1034 shared_file_info_t *sf_head;
1035 vm_size_t data_table_size;
1036 int hash_size;
1037 kern_return_t kret;
1038
1039 vm_object_t buf_object;
1040 vm_map_entry_t entry;
1041 vm_size_t alloced;
1042 vm_offset_t b;
1043 vm_page_t p;
1044
1045 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1046 /* create text and data maps/regions */
1047 kret = shared_region_object_create(
1048 text_region_size,
1049 text_region_handle);
1050 if (kret) {
1051 return kret;
1052 }
1053 kret = shared_region_object_create(
1054 data_region_size,
1055 data_region_handle);
1056 if (kret) {
1057 ipc_port_release_send(*text_region_handle);
1058 return kret;
1059 }
1060
1061 data_table_size = data_region_size >> 9;
1062 hash_size = data_region_size >> 14;
1063
1064 if(shared_file_mapping_array == 0) {
1065 vm_map_address_t map_addr;
1066 buf_object = vm_object_allocate(data_table_size);
1067
1068 if(vm_map_find_space(kernel_map, &map_addr,
1069 data_table_size, 0, 0, &entry)
1070 != KERN_SUCCESS) {
1071 panic("shared_file_init: no space");
1072 }
1073 shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
1074 *file_mapping_array = shared_file_mapping_array;
1075 vm_map_unlock(kernel_map);
1076 entry->object.vm_object = buf_object;
1077 entry->offset = 0;
1078
1079 for (b = *file_mapping_array, alloced = 0;
1080 alloced < (hash_size +
1081 round_page(sizeof(struct sf_mapping)));
1082 alloced += PAGE_SIZE, b += PAGE_SIZE) {
1083 vm_object_lock(buf_object);
1084 p = vm_page_alloc(buf_object, alloced);
1085 if (p == VM_PAGE_NULL) {
1086 panic("shared_file_init: no space");
1087 }
1088 p->busy = FALSE;
1089 vm_object_unlock(buf_object);
1090 pmap_enter(kernel_pmap, b, p->phys_page,
1091 VM_PROT_READ | VM_PROT_WRITE,
1092 ((unsigned int)(p->object->wimg_bits))
1093 & VM_WIMG_MASK,
1094 TRUE);
1095 }
1096
1097
1098 /* initialize loaded file array */
1099 sf_head = (shared_file_info_t *)*file_mapping_array;
1100 sf_head->hash = (queue_head_t *)
1101 (((int)*file_mapping_array) +
1102 sizeof(struct shared_file_info));
1103 sf_head->hash_size = hash_size/sizeof(queue_head_t);
1104 mutex_init(&(sf_head->lock), 0);
1105 sf_head->hash_init = FALSE;
1106
1107
1108 mach_make_memory_entry(kernel_map, &data_table_size,
1109 *file_mapping_array, VM_PROT_READ, &sfma_handle,
1110 NULL);
1111
1112 if (vm_map_wire(kernel_map,
1113 vm_map_trunc_page(*file_mapping_array),
1114 vm_map_round_page(*file_mapping_array +
1115 hash_size +
1116 round_page(sizeof(struct sf_mapping))),
1117 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1118 panic("shared_file_init: No memory for data table");
1119 }
1120
1121 lsf_zone = zinit(sizeof(struct load_file_ele),
1122 data_table_size -
1123 (hash_size + round_page_32(sizeof(struct sf_mapping))),
1124 0, "load_file_server");
1125
1126 zone_change(lsf_zone, Z_EXHAUST, TRUE);
1127 zone_change(lsf_zone, Z_COLLECT, FALSE);
1128 zone_change(lsf_zone, Z_EXPAND, FALSE);
1129 zone_change(lsf_zone, Z_FOREIGN, TRUE);
1130
1131 /* initialize the global default environment lock */
1132 mutex_init(&default_regions_list_lock_data, 0);
1133
1134 } else {
1135 *file_mapping_array = shared_file_mapping_array;
1136 }
1137
1138 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1139 return KERN_SUCCESS;
1140 }
1141
1142 static kern_return_t
1143 shared_file_header_init(
1144 shared_file_info_t *shared_file_header)
1145 {
1146 vm_size_t hash_table_size;
1147 vm_size_t hash_table_offset;
1148 int i;
1149 /* wire hash entry pool only as needed, since we are the only */
1150 /* users, we take a few liberties with the population of our */
1151 /* zone. */
1152 static int allocable_hash_pages;
1153 static vm_offset_t hash_cram_address;
1154
1155
1156 hash_table_size = shared_file_header->hash_size
1157 * sizeof (struct queue_entry);
1158 hash_table_offset = hash_table_size +
1159 round_page(sizeof (struct sf_mapping));
1160 for (i = 0; i < shared_file_header->hash_size; i++)
1161 queue_init(&shared_file_header->hash[i]);
1162
1163 allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
1164 / PAGE_SIZE);
1165 hash_cram_address = ((vm_offset_t) shared_file_header)
1166 + hash_table_offset;
1167 shared_file_available_hash_ele = 0;
1168
1169 shared_file_header->hash_init = TRUE;
1170
1171 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
1172 int cram_pages, cram_size;
1173
1174 cram_pages = allocable_hash_pages > 3 ?
1175 3 : allocable_hash_pages;
1176 cram_size = cram_pages * PAGE_SIZE;
1177 if (vm_map_wire(kernel_map, hash_cram_address,
1178 hash_cram_address + cram_size,
1179 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
1180 SHARED_REGION_TRACE(
1181 SHARED_REGION_TRACE_ERROR,
1182 ("shared_region: shared_file_header_init: "
1183 "No memory for data table\n"));
1184 return KERN_NO_SPACE;
1185 }
1186 allocable_hash_pages -= cram_pages;
1187 zcram(lsf_zone, (void *) hash_cram_address, cram_size);
1188 shared_file_available_hash_ele
1189 += cram_size/sizeof(struct load_file_ele);
1190 hash_cram_address += cram_size;
1191 }
1192
1193 return KERN_SUCCESS;
1194 }
1195
1196
1197 /* A call made from user space, copyin_shared_file requires the user to */
1198 /* provide the address and size of a mapped file, the full path name of */
1199 /* that file and a list of offsets to be mapped into shared memory. */
1200 /* By requiring that the file be pre-mapped, copyin_shared_file can */
1201 /* guarantee that the file is neither deleted nor changed after the user */
1202 /* begins the call. */
1203
1204 kern_return_t
1205 copyin_shared_file(
1206 vm_offset_t mapped_file,
1207 vm_size_t mapped_file_size,
1208 vm_offset_t *base_address,
1209 int map_cnt,
1210 sf_mapping_t *mappings,
1211 memory_object_control_t file_control,
1212 shared_region_task_mappings_t sm_info,
1213 int *flags)
1214 {
1215 vm_object_t file_object;
1216 vm_map_entry_t entry;
1217 shared_file_info_t *shared_file_header;
1218 load_struct_t *file_entry;
1219 loaded_mapping_t *file_mapping;
1220 boolean_t alternate;
1221 int i;
1222 kern_return_t ret;
1223
1224 SHARED_REGION_DEBUG(("copyin_shared_file()\n"));
1225
1226 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1227
1228 mutex_lock(&shared_file_header->lock);
1229
1230 /* If this is the first call to this routine, take the opportunity */
1231 /* to initialize the hash table which will be used to look-up */
1232 /* mappings based on the file object */
1233
1234 if(shared_file_header->hash_init == FALSE) {
1235 ret = shared_file_header_init(shared_file_header);
1236 if (ret != KERN_SUCCESS) {
1237 mutex_unlock(&shared_file_header->lock);
1238 return ret;
1239 }
1240 }
1241
1242 /* Find the entry in the map associated with the current mapping */
1243 /* of the file object */
1244 file_object = memory_object_control_to_vm_object(file_control);
1245 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
1246 vm_object_t mapped_object;
1247 if(entry->is_sub_map ||
1248 entry->object.vm_object == VM_OBJECT_NULL) {
1249 mutex_unlock(&shared_file_header->lock);
1250 return KERN_INVALID_ADDRESS;
1251 }
1252 mapped_object = entry->object.vm_object;
1253 while(mapped_object->shadow != NULL) {
1254 mapped_object = mapped_object->shadow;
1255 }
1256 /* check to see that the file object passed is indeed the */
1257 /* same as the mapped object passed */
1258 if(file_object != mapped_object) {
1259 if(sm_info->flags & SHARED_REGION_SYSTEM) {
1260 mutex_unlock(&shared_file_header->lock);
1261 return KERN_PROTECTION_FAILURE;
1262 } else {
1263 file_object = mapped_object;
1264 }
1265 }
1266 } else {
1267 mutex_unlock(&shared_file_header->lock);
1268 return KERN_INVALID_ADDRESS;
1269 }
1270
1271 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
1272
1273 file_entry = lsf_hash_lookup(shared_file_header->hash,
1274 (void *) file_object,
1275 mappings[0].file_offset,
1276 shared_file_header->hash_size,
1277 !alternate, alternate, sm_info);
1278 if (file_entry) {
1279 /* File is loaded, check the load manifest for exact match */
1280 /* we simplify by requiring that the elements be the same */
1281 /* size and in the same order rather than checking for */
1282 /* semantic equivalence. */
1283
1284 /* If the file is being loaded in the alternate */
1285 /* area, one load to alternate is allowed per mapped */
1286 /* object the base address is passed back to the */
1287 /* caller and the mappings field is filled in. If the */
1288 /* caller does not pass the precise mappings_cnt */
1289 /* and the Alternate is already loaded, an error */
1290 /* is returned. */
1291 i = 0;
1292 file_mapping = file_entry->mappings;
1293 while(file_mapping != NULL) {
1294 if(i>=map_cnt) {
1295 mutex_unlock(&shared_file_header->lock);
1296 return KERN_INVALID_ARGUMENT;
1297 }
1298 if(((mappings[i].mapping_offset)
1299 & SHARED_DATA_REGION_MASK) !=
1300 file_mapping->mapping_offset ||
1301 mappings[i].size !=
1302 file_mapping->size ||
1303 mappings[i].file_offset !=
1304 file_mapping->file_offset ||
1305 mappings[i].protection !=
1306 file_mapping->protection) {
1307 break;
1308 }
1309 file_mapping = file_mapping->next;
1310 i++;
1311 }
1312 if(i!=map_cnt) {
1313 mutex_unlock(&shared_file_header->lock);
1314 return KERN_INVALID_ARGUMENT;
1315 }
1316 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
1317 + file_entry->base_address;
1318 *flags = SF_PREV_LOADED;
1319 mutex_unlock(&shared_file_header->lock);
1320 return KERN_SUCCESS;
1321 } else {
1322 /* File is not loaded, lets attempt to load it */
1323 ret = lsf_load(mapped_file, mapped_file_size, base_address,
1324 mappings, map_cnt,
1325 (void *)file_object,
1326 *flags, sm_info);
1327 *flags = 0;
1328 if(ret == KERN_NO_SPACE) {
1329 shared_region_mapping_t regions;
1330 shared_region_mapping_t system_region;
1331 regions = (shared_region_mapping_t)sm_info->self;
1332 regions->flags |= SHARED_REGION_FULL;
1333 system_region = lookup_default_shared_region(
1334 regions->fs_base, regions->system);
1335 if(system_region == regions) {
1336 shared_region_mapping_t new_system_shared_region;
1337 shared_file_boot_time_init(
1338 regions->fs_base, regions->system);
1339 /* current task must stay with its current */
1340 /* regions, drop count on system_shared_region */
1341 /* and put back our original set */
1342 vm_get_shared_region(current_task(),
1343 &new_system_shared_region);
1344 shared_region_mapping_dealloc_lock(
1345 new_system_shared_region, 0, 1);
1346 vm_set_shared_region(current_task(), regions);
1347 } else if(system_region != NULL) {
1348 shared_region_mapping_dealloc_lock(
1349 system_region, 0, 1);
1350 }
1351 }
1352 mutex_unlock(&shared_file_header->lock);
1353 return ret;
1354 }
1355 }
1356
1357 extern void shared_region_dump_file_entry(
1358 int trace_level,
1359 load_struct_t *entry); /* forward */
1360
1361 void shared_region_dump_file_entry(
1362 int trace_level,
1363 load_struct_t *entry)
1364 {
1365 int i;
1366 loaded_mapping_t *mapping;
1367
1368 if (trace_level > shared_region_trace_level) {
1369 return;
1370 }
1371 printf("shared region: %p: "
1372 "file_entry %p base_address=0x%x file_offset=0x%x "
1373 "%d mappings\n",
1374 current_thread(), entry,
1375 entry->base_address, entry->file_offset, entry->mapping_cnt);
1376 mapping = entry->mappings;
1377 for (i = 0; i < entry->mapping_cnt; i++) {
1378 printf("shared region: %p:\t#%d: "
1379 "offset=0x%x size=0x%x file_offset=0x%x prot=%d\n",
1380 current_thread(),
1381 i,
1382 mapping->mapping_offset,
1383 mapping->size,
1384 mapping->file_offset,
1385 mapping->protection);
1386 mapping = mapping->next;
1387 }
1388 }
1389
1390 extern void shared_region_dump_mappings(
1391 int trace_level,
1392 struct shared_file_mapping_np *mappings,
1393 int map_cnt,
1394 mach_vm_offset_t base_offset); /* forward */
1395
1396 void shared_region_dump_mappings(
1397 int trace_level,
1398 struct shared_file_mapping_np *mappings,
1399 int map_cnt,
1400 mach_vm_offset_t base_offset)
1401 {
1402 int i;
1403
1404 if (trace_level > shared_region_trace_level) {
1405 return;
1406 }
1407
1408 printf("shared region: %p: %d mappings base_offset=0x%llx\n",
1409 current_thread(), map_cnt, (uint64_t) base_offset);
1410 for (i = 0; i < map_cnt; i++) {
1411 printf("shared region: %p:\t#%d: "
1412 "addr=0x%llx, size=0x%llx, file_offset=0x%llx, "
1413 "prot=(%d,%d)\n",
1414 current_thread(),
1415 i,
1416 (uint64_t) mappings[i].sfm_address,
1417 (uint64_t) mappings[i].sfm_size,
1418 (uint64_t) mappings[i].sfm_file_offset,
1419 mappings[i].sfm_max_prot,
1420 mappings[i].sfm_init_prot);
1421 }
1422 }
1423
1424 extern void shared_region_dump_conflict_info(
1425 int trace_level,
1426 vm_map_t map,
1427 vm_map_offset_t offset,
1428 vm_map_size_t size); /* forward */
1429
1430 void
1431 shared_region_dump_conflict_info(
1432 int trace_level,
1433 vm_map_t map,
1434 vm_map_offset_t offset,
1435 vm_map_size_t size)
1436 {
1437 vm_map_entry_t entry;
1438 vm_object_t object;
1439 memory_object_t mem_object;
1440 kern_return_t kr;
1441 char *filename;
1442
1443 if (trace_level > shared_region_trace_level) {
1444 return;
1445 }
1446
1447 object = VM_OBJECT_NULL;
1448
1449 vm_map_lock_read(map);
1450 if (!vm_map_lookup_entry(map, offset, &entry)) {
1451 entry = entry->vme_next;
1452 }
1453
1454 if (entry != vm_map_to_entry(map)) {
1455 if (entry->is_sub_map) {
1456 printf("shared region: %p: conflict with submap "
1457 "at 0x%llx size 0x%llx !?\n",
1458 current_thread(),
1459 (uint64_t) offset,
1460 (uint64_t) size);
1461 goto done;
1462 }
1463
1464 object = entry->object.vm_object;
1465 if (object == VM_OBJECT_NULL) {
1466 printf("shared region: %p: conflict with NULL object "
1467 "at 0x%llx size 0x%llx !?\n",
1468 current_thread(),
1469 (uint64_t) offset,
1470 (uint64_t) size);
1471 object = VM_OBJECT_NULL;
1472 goto done;
1473 }
1474
1475 vm_object_lock(object);
1476 while (object->shadow != VM_OBJECT_NULL) {
1477 vm_object_t shadow;
1478
1479 shadow = object->shadow;
1480 vm_object_lock(shadow);
1481 vm_object_unlock(object);
1482 object = shadow;
1483 }
1484
1485 if (object->internal) {
1486 printf("shared region: %p: conflict with anonymous "
1487 "at 0x%llx size 0x%llx\n",
1488 current_thread(),
1489 (uint64_t) offset,
1490 (uint64_t) size);
1491 goto done;
1492 }
1493 if (! object->pager_ready) {
1494 printf("shared region: %p: conflict with uninitialized "
1495 "at 0x%llx size 0x%llx\n",
1496 current_thread(),
1497 (uint64_t) offset,
1498 (uint64_t) size);
1499 goto done;
1500 }
1501
1502 mem_object = object->pager;
1503
1504 /*
1505 * XXX FBDP: "!internal" doesn't mean it's a vnode pager...
1506 */
1507 kr = vnode_pager_get_object_filename(mem_object,
1508 &filename);
1509 if (kr != KERN_SUCCESS) {
1510 filename = NULL;
1511 }
1512 printf("shared region: %p: conflict with '%s' "
1513 "at 0x%llx size 0x%llx\n",
1514 current_thread(),
1515 filename ? filename : "<unknown>",
1516 (uint64_t) offset,
1517 (uint64_t) size);
1518 }
1519 done:
1520 if (object != VM_OBJECT_NULL) {
1521 vm_object_unlock(object);
1522 }
1523 vm_map_unlock_read(map);
1524 }
1525
1526 /*
1527 * map_shared_file:
1528 *
1529 * Attempt to map a split library into the shared region. Check if the mappings
1530 * are already in place.
1531 */
1532 kern_return_t
1533 map_shared_file(
1534 int map_cnt,
1535 struct shared_file_mapping_np *mappings,
1536 memory_object_control_t file_control,
1537 memory_object_size_t file_size,
1538 shared_region_task_mappings_t sm_info,
1539 mach_vm_offset_t base_offset,
1540 mach_vm_offset_t *slide_p)
1541 {
1542 vm_object_t file_object;
1543 shared_file_info_t *shared_file_header;
1544 load_struct_t *file_entry;
1545 loaded_mapping_t *file_mapping;
1546 int i;
1547 kern_return_t ret;
1548 mach_vm_offset_t slide;
1549
1550 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1551
1552 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1553
1554 mutex_lock(&shared_file_header->lock);
1555
1556 /* If this is the first call to this routine, take the opportunity */
1557 /* to initialize the hash table which will be used to look-up */
1558 /* mappings based on the file object */
1559
1560 if(shared_file_header->hash_init == FALSE) {
1561 ret = shared_file_header_init(shared_file_header);
1562 if (ret != KERN_SUCCESS) {
1563 SHARED_REGION_TRACE(
1564 SHARED_REGION_TRACE_ERROR,
1565 ("shared_region: %p: map_shared_file: "
1566 "shared_file_header_init() failed kr=0x%x\n",
1567 current_thread(), ret));
1568 mutex_unlock(&shared_file_header->lock);
1569 return KERN_NO_SPACE;
1570 }
1571 }
1572
1573
1574 /* Find the entry in the map associated with the current mapping */
1575 /* of the file object */
1576 file_object = memory_object_control_to_vm_object(file_control);
1577
1578 file_entry = lsf_hash_lookup(shared_file_header->hash,
1579 (void *) file_object,
1580 mappings[0].sfm_file_offset,
1581 shared_file_header->hash_size,
1582 TRUE, TRUE, sm_info);
1583 if (file_entry) {
1584 /* File is loaded, check the load manifest for exact match */
1585 /* we simplify by requiring that the elements be the same */
1586 /* size and in the same order rather than checking for */
1587 /* semantic equivalence. */
1588
1589 i = 0;
1590 file_mapping = file_entry->mappings;
1591 while(file_mapping != NULL) {
1592 if(i>=map_cnt) {
1593 SHARED_REGION_TRACE(
1594 SHARED_REGION_TRACE_CONFLICT,
1595 ("shared_region: %p: map_shared_file: "
1596 "already mapped with "
1597 "more than %d mappings\n",
1598 current_thread(), map_cnt));
1599 shared_region_dump_file_entry(
1600 SHARED_REGION_TRACE_INFO,
1601 file_entry);
1602 shared_region_dump_mappings(
1603 SHARED_REGION_TRACE_INFO,
1604 mappings, map_cnt, base_offset);
1605
1606 mutex_unlock(&shared_file_header->lock);
1607 return KERN_INVALID_ARGUMENT;
1608 }
1609 if(((mappings[i].sfm_address)
1610 & SHARED_DATA_REGION_MASK) !=
1611 file_mapping->mapping_offset ||
1612 mappings[i].sfm_size != file_mapping->size ||
1613 mappings[i].sfm_file_offset != file_mapping->file_offset ||
1614 mappings[i].sfm_init_prot != file_mapping->protection) {
1615 SHARED_REGION_TRACE(
1616 SHARED_REGION_TRACE_CONFLICT,
1617 ("shared_region: %p: "
1618 "mapping #%d differs\n",
1619 current_thread(), i));
1620 shared_region_dump_file_entry(
1621 SHARED_REGION_TRACE_INFO,
1622 file_entry);
1623 shared_region_dump_mappings(
1624 SHARED_REGION_TRACE_INFO,
1625 mappings, map_cnt, base_offset);
1626
1627 break;
1628 }
1629 file_mapping = file_mapping->next;
1630 i++;
1631 }
1632 if(i!=map_cnt) {
1633 SHARED_REGION_TRACE(
1634 SHARED_REGION_TRACE_CONFLICT,
1635 ("shared_region: %p: map_shared_file: "
1636 "already mapped with "
1637 "%d mappings instead of %d\n",
1638 current_thread(), i, map_cnt));
1639 shared_region_dump_file_entry(
1640 SHARED_REGION_TRACE_INFO,
1641 file_entry);
1642 shared_region_dump_mappings(
1643 SHARED_REGION_TRACE_INFO,
1644 mappings, map_cnt, base_offset);
1645
1646 mutex_unlock(&shared_file_header->lock);
1647 return KERN_INVALID_ARGUMENT;
1648 }
1649
1650 slide = file_entry->base_address - base_offset;
1651 if (slide_p != NULL) {
1652 /*
1653 * File already mapped but at different address,
1654 * and the caller is OK with the sliding.
1655 */
1656 *slide_p = slide;
1657 ret = KERN_SUCCESS;
1658 } else {
1659 /*
1660 * The caller doesn't want any sliding. The file needs
1661 * to be mapped at the requested address or not mapped.
1662 */
1663 if (slide != 0) {
1664 /*
1665 * The file is already mapped but at a different
1666 * address.
1667 * We fail.
1668 * XXX should we attempt to load at
1669 * requested address too ?
1670 */
1671 ret = KERN_FAILURE;
1672 SHARED_REGION_TRACE(
1673 SHARED_REGION_TRACE_CONFLICT,
1674 ("shared_region: %p: "
1675 "map_shared_file: already mapped, "
1676 "would need to slide 0x%llx\n",
1677 current_thread(),
1678 slide));
1679 } else {
1680 /*
1681 * The file is already mapped at the correct
1682 * address.
1683 * We're done !
1684 */
1685 ret = KERN_SUCCESS;
1686 }
1687 }
1688 mutex_unlock(&shared_file_header->lock);
1689 return ret;
1690 } else {
1691 /* File is not loaded, lets attempt to load it */
1692 ret = lsf_map(mappings, map_cnt,
1693 (void *)file_control,
1694 file_size,
1695 sm_info,
1696 base_offset,
1697 slide_p);
1698 if(ret == KERN_NO_SPACE) {
1699 shared_region_mapping_t regions;
1700 shared_region_mapping_t system_region;
1701 regions = (shared_region_mapping_t)sm_info->self;
1702 regions->flags |= SHARED_REGION_FULL;
1703 system_region = lookup_default_shared_region(
1704 regions->fs_base, regions->system);
1705 if (system_region == regions) {
1706 shared_region_mapping_t new_system_shared_region;
1707 shared_file_boot_time_init(
1708 regions->fs_base, regions->system);
1709 /* current task must stay with its current */
1710 /* regions, drop count on system_shared_region */
1711 /* and put back our original set */
1712 vm_get_shared_region(current_task(),
1713 &new_system_shared_region);
1714 shared_region_mapping_dealloc_lock(
1715 new_system_shared_region, 0, 1);
1716 vm_set_shared_region(current_task(), regions);
1717 } else if (system_region != NULL) {
1718 shared_region_mapping_dealloc_lock(
1719 system_region, 0, 1);
1720 }
1721 }
1722 mutex_unlock(&shared_file_header->lock);
1723 return ret;
1724 }
1725 }
1726
1727 /*
1728 * shared_region_cleanup:
1729 *
1730 * Deallocates all the mappings in the shared region, except those explicitly
1731 * specified in the "ranges" set of address ranges.
1732 */
1733 kern_return_t
1734 shared_region_cleanup(
1735 unsigned int range_count,
1736 struct shared_region_range_np *ranges,
1737 shared_region_task_mappings_t sm_info)
1738 {
1739 kern_return_t kr;
1740 ipc_port_t region_handle;
1741 vm_named_entry_t region_named_entry;
1742 vm_map_t text_submap, data_submap, submap, next_submap;
1743 unsigned int i_range;
1744 vm_map_offset_t range_start, range_end;
1745 vm_map_offset_t submap_base, submap_end, submap_offset;
1746 vm_map_size_t delete_size;
1747
1748 struct shared_region_range_np tmp_range;
1749 unsigned int sort_index, sorted_index;
1750 vm_map_offset_t sort_min_address;
1751 unsigned int sort_min_index;
1752
1753 /*
1754 * Since we want to deallocate the holes between the "ranges",
1755 * sort the array by increasing addresses.
1756 */
1757 for (sorted_index = 0;
1758 sorted_index < range_count;
1759 sorted_index++) {
1760
1761 /* first remaining entry is our new starting point */
1762 sort_min_index = sorted_index;
1763 sort_min_address = ranges[sort_min_index].srr_address;
1764
1765 /* find the lowest mapping_offset in the remaining entries */
1766 for (sort_index = sorted_index + 1;
1767 sort_index < range_count;
1768 sort_index++) {
1769 if (ranges[sort_index].srr_address < sort_min_address) {
1770 /* lowest address so far... */
1771 sort_min_index = sort_index;
1772 sort_min_address =
1773 ranges[sort_min_index].srr_address;
1774 }
1775 }
1776
1777 if (sort_min_index != sorted_index) {
1778 /* swap entries */
1779 tmp_range = ranges[sort_min_index];
1780 ranges[sort_min_index] = ranges[sorted_index];
1781 ranges[sorted_index] = tmp_range;
1782 }
1783 }
1784
1785 region_handle = (ipc_port_t) sm_info->text_region;
1786 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1787 text_submap = region_named_entry->backing.map;
1788
1789 region_handle = (ipc_port_t) sm_info->data_region;
1790 region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
1791 data_submap = region_named_entry->backing.map;
1792
1793 submap = text_submap;
1794 next_submap = submap;
1795 submap_base = sm_info->client_base;
1796 submap_offset = 0;
1797 submap_end = submap_base + sm_info->text_size;
1798 for (i_range = 0;
1799 i_range < range_count;
1800 i_range++) {
1801
1802 /* get the next range of addresses to keep */
1803 range_start = ranges[i_range].srr_address;
1804 range_end = range_start + ranges[i_range].srr_size;
1805 /* align them to page boundaries */
1806 range_start = vm_map_trunc_page(range_start);
1807 range_end = vm_map_round_page(range_end);
1808
1809 /* make sure we don't go beyond the submap's boundaries */
1810 if (range_start < submap_base) {
1811 range_start = submap_base;
1812 } else if (range_start >= submap_end) {
1813 range_start = submap_end;
1814 }
1815 if (range_end < submap_base) {
1816 range_end = submap_base;
1817 } else if (range_end >= submap_end) {
1818 range_end = submap_end;
1819 }
1820
1821 if (range_start > submap_base + submap_offset) {
1822 /*
1823 * Deallocate everything between the last offset in the
1824 * submap and the start of this range.
1825 */
1826 delete_size = range_start -
1827 (submap_base + submap_offset);
1828 (void) vm_deallocate(submap,
1829 submap_offset,
1830 delete_size);
1831 } else {
1832 delete_size = 0;
1833 }
1834
1835 /* skip to the end of the range */
1836 submap_offset += delete_size + (range_end - range_start);
1837
1838 if (submap_base + submap_offset >= submap_end) {
1839 /* get to next submap */
1840
1841 if (submap == data_submap) {
1842 /* no other submap after data: done ! */
1843 break;
1844 }
1845
1846 /* get original range again */
1847 range_start = ranges[i_range].srr_address;
1848 range_end = range_start + ranges[i_range].srr_size;
1849 range_start = vm_map_trunc_page(range_start);
1850 range_end = vm_map_round_page(range_end);
1851
1852 if (range_end > submap_end) {
1853 /*
1854 * This last range overlaps with the next
1855 * submap. We need to process it again
1856 * after switching submaps. Otherwise, we'll
1857 * just continue with the next range.
1858 */
1859 i_range--;
1860 }
1861
1862 if (submap == text_submap) {
1863 /*
1864 * Switch to the data submap.
1865 */
1866 submap = data_submap;
1867 submap_offset = 0;
1868 submap_base = sm_info->client_base +
1869 sm_info->text_size;
1870 submap_end = submap_base + sm_info->data_size;
1871 }
1872 }
1873 }
1874
1875 if (submap_base + submap_offset < submap_end) {
1876 /* delete remainder of this submap, from "offset" to the end */
1877 (void) vm_deallocate(submap,
1878 submap_offset,
1879 submap_end - submap_base - submap_offset);
1880 /* if nothing to keep in data submap, delete it all */
1881 if (submap == text_submap) {
1882 submap = data_submap;
1883 submap_offset = 0;
1884 submap_base = sm_info->client_base + sm_info->text_size;
1885 submap_end = submap_base + sm_info->data_size;
1886 (void) vm_deallocate(data_submap,
1887 0,
1888 submap_end - submap_base);
1889 }
1890 }
1891
1892 kr = KERN_SUCCESS;
1893 return kr;
1894 }
1895
1896 /* A hash lookup function for the list of loaded files in */
1897 /* shared_memory_server space. */
1898
1899 static load_struct_t *
1900 lsf_hash_lookup(
1901 queue_head_t *hash_table,
1902 void *file_object,
1903 vm_offset_t recognizableOffset,
1904 int size,
1905 boolean_t regular,
1906 boolean_t alternate,
1907 shared_region_task_mappings_t sm_info)
1908 {
1909 register queue_t bucket;
1910 load_struct_t *entry;
1911 shared_region_mapping_t target_region;
1912 int depth;
1913
1914 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1915 "reg=%d alt=%d sm_info=%p\n",
1916 hash_table, file_object, recognizableOffset, size,
1917 regular, alternate, sm_info));
1918
1919 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
1920 for (entry = (load_struct_t *)queue_first(bucket);
1921 !queue_end(bucket, &entry->links);
1922 entry = (load_struct_t *)queue_next(&entry->links)) {
1923
1924 if ((entry->file_object == (int)file_object) &&
1925 (entry->file_offset == recognizableOffset)) {
1926 target_region = (shared_region_mapping_t)sm_info->self;
1927 depth = target_region->depth;
1928 while(target_region) {
1929 if((!(sm_info->self)) ||
1930 ((target_region == entry->regions_instance) &&
1931 (target_region->depth >= entry->depth))) {
1932 if(alternate &&
1933 entry->base_address >= sm_info->alternate_base) {
1934 LSF_DEBUG(("lsf_hash_lookup: "
1935 "alt=%d found entry %p "
1936 "(base=0x%x "
1937 "alt_base=0x%x)\n",
1938 alternate, entry,
1939 entry->base_address,
1940 sm_info->alternate_base));
1941 return entry;
1942 }
1943 if (regular &&
1944 entry->base_address < sm_info->alternate_base) {
1945 LSF_DEBUG(("lsf_hash_lookup: "
1946 "reg=%d found entry %p "
1947 "(base=0x%x "
1948 "alt_base=0x%x)\n",
1949 regular, entry,
1950 entry->base_address,
1951 sm_info->alternate_base));
1952 return entry;
1953 }
1954 }
1955 if(target_region->object_chain) {
1956 target_region = (shared_region_mapping_t)
1957 target_region->object_chain->object_chain_region;
1958 depth = target_region->object_chain->depth;
1959 } else {
1960 target_region = NULL;
1961 }
1962 }
1963 }
1964 }
1965
1966 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1967 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1968 hash_table, file_object, recognizableOffset, size,
1969 regular, alternate, sm_info));
1970 return (load_struct_t *)0;
1971 }
1972
1973 __private_extern__ load_struct_t *
1974 lsf_remove_regions_mappings_lock(
1975 shared_region_mapping_t region,
1976 shared_region_task_mappings_t sm_info,
1977 int need_sfh_lock)
1978 {
1979 int i;
1980 register queue_t bucket;
1981 shared_file_info_t *shared_file_header;
1982 load_struct_t *entry;
1983 load_struct_t *next_entry;
1984
1985 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
1986
1987 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1988 "sfh=%p\n",
1989 region, sm_info, shared_file_header));
1990 if (need_sfh_lock)
1991 mutex_lock(&shared_file_header->lock);
1992 if(shared_file_header->hash_init == FALSE) {
1993 if (need_sfh_lock)
1994 mutex_unlock(&shared_file_header->lock);
1995 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1996 "(region=%p,sm_info=%p): not inited\n",
1997 region, sm_info));
1998 return NULL;
1999 }
2000 for(i = 0; i<shared_file_header->hash_size; i++) {
2001 bucket = &shared_file_header->hash[i];
2002 for (entry = (load_struct_t *)queue_first(bucket);
2003 !queue_end(bucket, &entry->links);) {
2004 next_entry = (load_struct_t *)queue_next(&entry->links);
2005 if(region == entry->regions_instance) {
2006 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
2007 "entry %p region %p: "
2008 "unloading\n",
2009 entry, region));
2010 lsf_unload((void *)entry->file_object,
2011 entry->base_address, sm_info);
2012 } else {
2013 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
2014 "entry %p region %p target region %p: "
2015 "not unloading\n",
2016 entry, entry->regions_instance, region));
2017 }
2018
2019 entry = next_entry;
2020 }
2021 }
2022 if (need_sfh_lock)
2023 mutex_unlock(&shared_file_header->lock);
2024 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
2025
2026 return NULL; /* XXX */
2027 }
2028
2029 /*
2030 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
2031 * only caller. Remove this stub function and the corresponding symbol
2032 * export for Merlot.
2033 */
2034 load_struct_t *
2035 lsf_remove_regions_mappings(
2036 shared_region_mapping_t region,
2037 shared_region_task_mappings_t sm_info)
2038 {
2039 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
2040 }
2041
2042 /* Removes a map_list, (list of loaded extents) for a file from */
2043 /* the loaded file hash table. */
2044
2045 static load_struct_t *
2046 lsf_hash_delete(
2047 load_struct_t *target_entry, /* optional: NULL if not relevant */
2048 void *file_object,
2049 vm_offset_t base_offset,
2050 shared_region_task_mappings_t sm_info)
2051 {
2052 register queue_t bucket;
2053 shared_file_info_t *shared_file_header;
2054 load_struct_t *entry;
2055
2056 LSF_DEBUG(("lsf_hash_delete(target=%p,file=%p,base=0x%x,sm_info=%p)\n",
2057 target_entry, file_object, base_offset, sm_info));
2058
2059 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
2060
2061 bucket = &shared_file_header->hash
2062 [load_file_hash((int)file_object, shared_file_header->hash_size)];
2063
2064 for (entry = (load_struct_t *)queue_first(bucket);
2065 !queue_end(bucket, &entry->links);
2066 entry = (load_struct_t *)queue_next(&entry->links)) {
2067 if((!(sm_info->self)) || ((shared_region_mapping_t)
2068 sm_info->self == entry->regions_instance)) {
2069 if ((target_entry == NULL ||
2070 entry == target_entry) &&
2071 (entry->file_object == (int) file_object) &&
2072 (entry->base_address == base_offset)) {
2073 queue_remove(bucket, entry,
2074 load_struct_ptr_t, links);
2075 LSF_DEBUG(("lsf_hash_delete: found it\n"));
2076 return entry;
2077 }
2078 }
2079 }
2080
2081 LSF_DEBUG(("lsf_hash_delete; not found\n"));
2082 return (load_struct_t *)0;
2083 }
2084
2085 /* Inserts a new map_list, (list of loaded file extents), into the */
2086 /* server loaded file hash table. */
2087
2088 static void
2089 lsf_hash_insert(
2090 load_struct_t *entry,
2091 shared_region_task_mappings_t sm_info)
2092 {
2093 shared_file_info_t *shared_file_header;
2094
2095 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
2096 entry, sm_info, entry->file_object, entry->base_address));
2097
2098 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
2099 queue_enter(&shared_file_header->hash
2100 [load_file_hash(entry->file_object,
2101 shared_file_header->hash_size)],
2102 entry, load_struct_ptr_t, links);
2103 }
2104
2105 /* Looks up the file type requested. If already loaded and the */
2106 /* file extents are an exact match, returns Success. If not */
2107 /* loaded attempts to load the file extents at the given offsets */
2108 /* if any extent fails to load or if the file was already loaded */
2109 /* in a different configuration, lsf_load fails. */
2110
2111 static kern_return_t
2112 lsf_load(
2113 vm_offset_t mapped_file,
2114 vm_size_t mapped_file_size,
2115 vm_offset_t *base_address,
2116 sf_mapping_t *mappings,
2117 int map_cnt,
2118 void *file_object,
2119 int flags,
2120 shared_region_task_mappings_t sm_info)
2121 {
2122
2123 load_struct_t *entry;
2124 vm_map_copy_t copy_object;
2125 loaded_mapping_t *file_mapping;
2126 loaded_mapping_t **tptr;
2127 int i;
2128 ipc_port_t local_map;
2129 vm_offset_t original_alt_load_next;
2130 vm_offset_t alternate_load_next;
2131
2132 LSF_DEBUG(("lsf_load"
2133 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
2134 "\n",
2135 mapped_file_size, *base_address, map_cnt, file_object,
2136 flags, sm_info));
2137 entry = (load_struct_t *)zalloc(lsf_zone);
2138 LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry, map_cnt));
2139 LSF_DEBUG(("lsf_load"
2140 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
2141 "entry=%p\n",
2142 mapped_file_size, *base_address, map_cnt, file_object,
2143 flags, sm_info, entry));
2144 if (entry == NULL) {
2145 printf("lsf_load: unable to allocate memory\n");
2146 return KERN_NO_SPACE;
2147 }
2148
2149 shared_file_available_hash_ele--;
2150 entry->file_object = (int)file_object;
2151 entry->mapping_cnt = map_cnt;
2152 entry->mappings = NULL;
2153 entry->links.prev = (queue_entry_t) 0;
2154 entry->links.next = (queue_entry_t) 0;
2155 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2156 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2157 entry->file_offset = mappings[0].file_offset;
2158
2159 lsf_hash_insert(entry, sm_info);
2160 tptr = &(entry->mappings);
2161
2162
2163 alternate_load_next = sm_info->alternate_next;
2164 original_alt_load_next = alternate_load_next;
2165 if (flags & ALTERNATE_LOAD_SITE) {
2166 vm_offset_t max_loadfile_offset;
2167
2168 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
2169 sm_info->alternate_next;
2170 max_loadfile_offset = 0;
2171 for(i = 0; i<map_cnt; i++) {
2172 if(((mappings[i].mapping_offset
2173 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
2174 max_loadfile_offset) {
2175 max_loadfile_offset =
2176 (mappings[i].mapping_offset
2177 & SHARED_TEXT_REGION_MASK)
2178 + mappings[i].size;
2179 }
2180 }
2181 if((alternate_load_next + round_page(max_loadfile_offset)) >=
2182 (sm_info->data_size - (sm_info->data_size>>9))) {
2183 entry->base_address =
2184 (*base_address) & SHARED_TEXT_REGION_MASK;
2185 lsf_unload(file_object, entry->base_address, sm_info);
2186
2187 return KERN_NO_SPACE;
2188 }
2189 alternate_load_next += round_page(max_loadfile_offset);
2190
2191 } else {
2192 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
2193 sm_info->alternate_base) {
2194 entry->base_address =
2195 (*base_address) & SHARED_TEXT_REGION_MASK;
2196 lsf_unload(file_object, entry->base_address, sm_info);
2197 return KERN_INVALID_ARGUMENT;
2198 }
2199 }
2200
2201 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
2202
2203 // Sanity check the mappings -- make sure we don't stray across the
2204 // alternate boundary. If any bit of a library that we're not trying
2205 // to load in the alternate load space strays across that boundary,
2206 // return KERN_INVALID_ARGUMENT immediately so that the caller can
2207 // try to load it in the alternate shared area. We do this to avoid
2208 // a nasty case: if a library tries to load so that it crosses the
2209 // boundary, it'll occupy a bit of the alternate load area without
2210 // the kernel being aware. When loads into the alternate load area
2211 // at the first free address are tried, the load will fail.
2212 // Thus, a single library straddling the boundary causes all sliding
2213 // libraries to fail to load. This check will avoid such a case.
2214
2215 if (!(flags & ALTERNATE_LOAD_SITE)) {
2216 for (i = 0; i<map_cnt;i++) {
2217 vm_offset_t region_mask;
2218 vm_address_t region_start;
2219 vm_address_t region_end;
2220
2221 if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
2222 // mapping offsets are relative to start of shared segments.
2223 region_mask = SHARED_TEXT_REGION_MASK;
2224 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
2225 region_end = (mappings[i].size + region_start);
2226 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
2227 // No library is permitted to load so any bit of it is in the
2228 // shared alternate space. If they want it loaded, they can put
2229 // it in the alternate space explicitly.
2230 printf("Library trying to load across alternate shared region boundary -- denied!\n");
2231 lsf_unload(file_object, entry->base_address, sm_info);
2232 return KERN_INVALID_ARGUMENT;
2233 }
2234 } else {
2235 // rw section?
2236 region_mask = SHARED_DATA_REGION_MASK;
2237 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
2238 region_end = (mappings[i].size + region_start);
2239 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
2240 printf("Library trying to load across alternate shared region boundary-- denied!\n");
2241 lsf_unload(file_object, entry->base_address, sm_info);
2242 return KERN_INVALID_ARGUMENT;
2243 }
2244 } // write?
2245 } // for
2246 } // if not alternate load site.
2247
2248 /* copyin mapped file data */
2249 for(i = 0; i<map_cnt; i++) {
2250 vm_offset_t target_address;
2251 vm_offset_t region_mask;
2252
2253 if(mappings[i].protection & VM_PROT_COW) {
2254 local_map = (ipc_port_t)sm_info->data_region;
2255 region_mask = SHARED_DATA_REGION_MASK;
2256 if((mappings[i].mapping_offset
2257 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
2258 lsf_unload(file_object,
2259 entry->base_address, sm_info);
2260 return KERN_INVALID_ARGUMENT;
2261 }
2262 } else {
2263 region_mask = SHARED_TEXT_REGION_MASK;
2264 local_map = (ipc_port_t)sm_info->text_region;
2265 if(mappings[i].mapping_offset
2266 & GLOBAL_SHARED_SEGMENT_MASK) {
2267 lsf_unload(file_object,
2268 entry->base_address, sm_info);
2269 return KERN_INVALID_ARGUMENT;
2270 }
2271 }
2272 if(!(mappings[i].protection & VM_PROT_ZF)
2273 && ((mapped_file + mappings[i].file_offset +
2274 mappings[i].size) >
2275 (mapped_file + mapped_file_size))) {
2276 lsf_unload(file_object, entry->base_address, sm_info);
2277 return KERN_INVALID_ARGUMENT;
2278 }
2279 target_address = ((mappings[i].mapping_offset) & region_mask)
2280 + entry->base_address;
2281 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
2282 ->backing.map, &target_address,
2283 mappings[i].size, VM_FLAGS_FIXED)) {
2284 lsf_unload(file_object, entry->base_address, sm_info);
2285 return KERN_FAILURE;
2286 }
2287 target_address = ((mappings[i].mapping_offset) & region_mask)
2288 + entry->base_address;
2289 if(!(mappings[i].protection & VM_PROT_ZF)) {
2290 if(vm_map_copyin(current_map(),
2291 (vm_map_address_t)(mapped_file + mappings[i].file_offset),
2292 vm_map_round_page(mappings[i].size), FALSE, &copy_object)) {
2293 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
2294 ->backing.map, target_address, mappings[i].size);
2295 lsf_unload(file_object, entry->base_address, sm_info);
2296 return KERN_FAILURE;
2297 }
2298 if(vm_map_copy_overwrite(((vm_named_entry_t)
2299 local_map->ip_kobject)->backing.map,
2300 (vm_map_address_t)target_address,
2301 copy_object, FALSE)) {
2302 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
2303 ->backing.map, target_address, mappings[i].size);
2304 lsf_unload(file_object, entry->base_address, sm_info);
2305 return KERN_FAILURE;
2306 }
2307 }
2308
2309 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2310 if (file_mapping == NULL) {
2311 lsf_unload(file_object, entry->base_address, sm_info);
2312 printf("lsf_load: unable to allocate memory\n");
2313 return KERN_NO_SPACE;
2314 }
2315 shared_file_available_hash_ele--;
2316 file_mapping->mapping_offset = (mappings[i].mapping_offset)
2317 & region_mask;
2318 file_mapping->size = mappings[i].size;
2319 file_mapping->file_offset = mappings[i].file_offset;
2320 file_mapping->protection = mappings[i].protection;
2321 file_mapping->next = NULL;
2322 LSF_DEBUG(("lsf_load: file_mapping %p "
2323 "for offset=0x%x size=0x%x\n",
2324 file_mapping, file_mapping->mapping_offset,
2325 file_mapping->size));
2326
2327 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
2328 ->backing.map, target_address,
2329 round_page(target_address + mappings[i].size),
2330 (mappings[i].protection &
2331 (VM_PROT_READ | VM_PROT_EXECUTE)),
2332 TRUE);
2333 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
2334 ->backing.map, target_address,
2335 round_page(target_address + mappings[i].size),
2336 (mappings[i].protection &
2337 (VM_PROT_READ | VM_PROT_EXECUTE)),
2338 FALSE);
2339
2340 *tptr = file_mapping;
2341 tptr = &(file_mapping->next);
2342 }
2343 shared_region_mapping_set_alt_next(
2344 (shared_region_mapping_t) sm_info->self,
2345 alternate_load_next);
2346 LSF_DEBUG(("lsf_load: done\n"));
2347 return KERN_SUCCESS;
2348 }
2349
2350
2351 /*
2352 * lsf_slide:
2353 *
2354 * Look in the shared region, starting from the end, for a place to fit all the
2355 * mappings while respecting their relative offsets.
2356 */
2357 static kern_return_t
2358 lsf_slide(
2359 unsigned int map_cnt,
2360 struct shared_file_mapping_np *mappings_in,
2361 shared_region_task_mappings_t sm_info,
2362 mach_vm_offset_t *base_offset_p)
2363 {
2364 mach_vm_offset_t max_mapping_offset;
2365 int i;
2366 vm_map_entry_t map_entry, prev_entry, next_entry;
2367 mach_vm_offset_t prev_hole_start, prev_hole_end;
2368 mach_vm_offset_t mapping_offset, mapping_end_offset;
2369 mach_vm_offset_t base_offset;
2370 mach_vm_size_t mapping_size;
2371 mach_vm_offset_t wiggle_room, wiggle;
2372 vm_map_t text_map, data_map, map;
2373 vm_named_entry_t region_entry;
2374 ipc_port_t region_handle;
2375 kern_return_t kr;
2376
2377 struct shared_file_mapping_np *mappings, tmp_mapping;
2378 unsigned int sort_index, sorted_index;
2379 vm_map_offset_t sort_min_address;
2380 unsigned int sort_min_index;
2381
2382 /*
2383 * Sort the mappings array, so that we can try and fit them in
2384 * in the right order as we progress along the VM maps.
2385 *
2386 * We can't modify the original array (the original order is
2387 * important when doing lookups of the mappings), so copy it first.
2388 */
2389
2390 kr = kmem_alloc(kernel_map,
2391 (vm_offset_t *) &mappings,
2392 (vm_size_t) (map_cnt * sizeof (mappings[0])));
2393 if (kr != KERN_SUCCESS) {
2394 return KERN_NO_SPACE;
2395 }
2396
2397 bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
2398
2399 max_mapping_offset = 0;
2400 for (sorted_index = 0;
2401 sorted_index < map_cnt;
2402 sorted_index++) {
2403
2404 /* first remaining entry is our new starting point */
2405 sort_min_index = sorted_index;
2406 mapping_end_offset = ((mappings[sort_min_index].sfm_address &
2407 SHARED_TEXT_REGION_MASK) +
2408 mappings[sort_min_index].sfm_size);
2409 sort_min_address = mapping_end_offset;
2410 /* compute the highest mapping_offset as well... */
2411 if (mapping_end_offset > max_mapping_offset) {
2412 max_mapping_offset = mapping_end_offset;
2413 }
2414 /* find the lowest mapping_offset in the remaining entries */
2415 for (sort_index = sorted_index + 1;
2416 sort_index < map_cnt;
2417 sort_index++) {
2418
2419 mapping_end_offset =
2420 ((mappings[sort_index].sfm_address &
2421 SHARED_TEXT_REGION_MASK) +
2422 mappings[sort_index].sfm_size);
2423
2424 if (mapping_end_offset < sort_min_address) {
2425 /* lowest mapping_offset so far... */
2426 sort_min_index = sort_index;
2427 sort_min_address = mapping_end_offset;
2428 }
2429 }
2430 if (sort_min_index != sorted_index) {
2431 /* swap entries */
2432 tmp_mapping = mappings[sort_min_index];
2433 mappings[sort_min_index] = mappings[sorted_index];
2434 mappings[sorted_index] = tmp_mapping;
2435 }
2436
2437 }
2438
2439 max_mapping_offset = vm_map_round_page(max_mapping_offset);
2440
2441 /* start from the end of the shared area */
2442 base_offset = sm_info->text_size;
2443
2444 /* can all the mappings fit ? */
2445 if (max_mapping_offset > base_offset) {
2446 kmem_free(kernel_map,
2447 (vm_offset_t) mappings,
2448 map_cnt * sizeof (mappings[0]));
2449 return KERN_FAILURE;
2450 }
2451
2452 /*
2453 * Align the last mapping to the end of the submaps
2454 * and start from there.
2455 */
2456 base_offset -= max_mapping_offset;
2457
2458 region_handle = (ipc_port_t) sm_info->text_region;
2459 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2460 text_map = region_entry->backing.map;
2461
2462 region_handle = (ipc_port_t) sm_info->data_region;
2463 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2464 data_map = region_entry->backing.map;
2465
2466 vm_map_lock_read(text_map);
2467 vm_map_lock_read(data_map);
2468
2469 start_over:
2470 /*
2471 * At first, we can wiggle all the way from our starting point
2472 * (base_offset) towards the start of the map (0), if needed.
2473 */
2474 wiggle_room = base_offset;
2475
2476 for (i = (signed) map_cnt - 1; i >= 0; i--) {
2477 if (mappings[i].sfm_size == 0) {
2478 /* nothing to map here... */
2479 continue;
2480 }
2481 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2482 /* copy-on-write mappings are in the data submap */
2483 map = data_map;
2484 } else {
2485 /* other mappings are in the text submap */
2486 map = text_map;
2487 }
2488 /* get the offset within the appropriate submap */
2489 mapping_offset = (mappings[i].sfm_address &
2490 SHARED_TEXT_REGION_MASK);
2491 mapping_size = mappings[i].sfm_size;
2492 mapping_end_offset = mapping_offset + mapping_size;
2493 mapping_offset = vm_map_trunc_page(mapping_offset);
2494 mapping_end_offset = vm_map_round_page(mapping_end_offset);
2495 mapping_size = mapping_end_offset - mapping_offset;
2496
2497 for (;;) {
2498 if (vm_map_lookup_entry(map,
2499 base_offset + mapping_offset,
2500 &map_entry)) {
2501 /*
2502 * The start address for that mapping
2503 * is already mapped: no fit.
2504 * Locate the hole immediately before this map
2505 * entry.
2506 */
2507 prev_hole_end = map_entry->vme_start;
2508 prev_entry = map_entry->vme_prev;
2509 if (prev_entry == vm_map_to_entry(map)) {
2510 /* no previous entry */
2511 prev_hole_start = map->min_offset;
2512 } else {
2513 /* previous entry ends here */
2514 prev_hole_start = prev_entry->vme_end;
2515 }
2516 } else {
2517 /*
2518 * The start address for that mapping is not
2519 * mapped.
2520 * Locate the start and end of the hole
2521 * at that location.
2522 */
2523 /* map_entry is the previous entry */
2524 if (map_entry == vm_map_to_entry(map)) {
2525 /* no previous entry */
2526 prev_hole_start = map->min_offset;
2527 } else {
2528 /* previous entry ends there */
2529 prev_hole_start = map_entry->vme_end;
2530 }
2531 next_entry = map_entry->vme_next;
2532 if (next_entry == vm_map_to_entry(map)) {
2533 /* no next entry */
2534 prev_hole_end = map->max_offset;
2535 } else {
2536 prev_hole_end = next_entry->vme_start;
2537 }
2538 }
2539
2540 if (prev_hole_end <= base_offset + mapping_offset) {
2541 /* hole is to our left: try and wiggle to fit */
2542 wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
2543 if (wiggle > base_offset) {
2544 /* we're getting out of the map */
2545 kr = KERN_FAILURE;
2546 goto done;
2547 }
2548 base_offset -= wiggle;
2549 if (wiggle > wiggle_room) {
2550 /* can't wiggle that much: start over */
2551 goto start_over;
2552 }
2553 /* account for the wiggling done */
2554 wiggle_room -= wiggle;
2555 }
2556
2557 if (prev_hole_end >
2558 base_offset + mapping_offset + mapping_size) {
2559 /*
2560 * The hole extends further to the right
2561 * than what we need. Ignore the extra space.
2562 */
2563 prev_hole_end = (base_offset + mapping_offset +
2564 mapping_size);
2565 }
2566
2567 if (prev_hole_end <
2568 base_offset + mapping_offset + mapping_size) {
2569 /*
2570 * The hole is not big enough to establish
2571 * the mapping right there: wiggle towards
2572 * the beginning of the hole so that the end
2573 * of our mapping fits in the hole...
2574 */
2575 wiggle = base_offset + mapping_offset
2576 + mapping_size - prev_hole_end;
2577 if (wiggle > base_offset) {
2578 /* we're getting out of the map */
2579 kr = KERN_FAILURE;
2580 goto done;
2581 }
2582 base_offset -= wiggle;
2583 if (wiggle > wiggle_room) {
2584 /* can't wiggle that much: start over */
2585 goto start_over;
2586 }
2587 /* account for the wiggling done */
2588 wiggle_room -= wiggle;
2589
2590 /* keep searching from this new base */
2591 continue;
2592 }
2593
2594 if (prev_hole_start > base_offset + mapping_offset) {
2595 /* no hole found: keep looking */
2596 continue;
2597 }
2598
2599 /* compute wiggling room at this hole */
2600 wiggle = base_offset + mapping_offset - prev_hole_start;
2601 if (wiggle < wiggle_room) {
2602 /* less wiggle room than before... */
2603 wiggle_room = wiggle;
2604 }
2605
2606 /* found a hole that fits: skip to next mapping */
2607 break;
2608 } /* while we look for a hole */
2609 } /* for each mapping */
2610
2611 *base_offset_p = base_offset;
2612 kr = KERN_SUCCESS;
2613
2614 done:
2615 vm_map_unlock_read(text_map);
2616 vm_map_unlock_read(data_map);
2617
2618 kmem_free(kernel_map,
2619 (vm_offset_t) mappings,
2620 map_cnt * sizeof (mappings[0]));
2621
2622 return kr;
2623 }
2624
2625 /*
2626 * lsf_map:
2627 *
2628 * Attempt to establish the mappings for a split library into the shared region.
2629 */
2630 static kern_return_t
2631 lsf_map(
2632 struct shared_file_mapping_np *mappings,
2633 int map_cnt,
2634 void *file_control,
2635 memory_object_offset_t file_size,
2636 shared_region_task_mappings_t sm_info,
2637 mach_vm_offset_t base_offset,
2638 mach_vm_offset_t *slide_p)
2639 {
2640 load_struct_t *entry;
2641 loaded_mapping_t *file_mapping;
2642 loaded_mapping_t **tptr;
2643 ipc_port_t region_handle;
2644 vm_named_entry_t region_entry;
2645 mach_port_t map_port;
2646 vm_object_t file_object;
2647 kern_return_t kr;
2648 int i;
2649 mach_vm_offset_t original_base_offset;
2650 mach_vm_size_t total_size;
2651
2652 /* get the VM object from the file's memory object handle */
2653 file_object = memory_object_control_to_vm_object(file_control);
2654
2655 original_base_offset = base_offset;
2656
2657 LSF_DEBUG(("lsf_map"
2658 "(cnt=%d,file=%p,sm_info=%p)"
2659 "\n",
2660 map_cnt, file_object,
2661 sm_info));
2662
2663 restart_after_slide:
2664 /* get a new "load_struct_t" to described the mappings for that file */
2665 entry = (load_struct_t *)zalloc(lsf_zone);
2666 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
2667 LSF_DEBUG(("lsf_map"
2668 "(cnt=%d,file=%p,sm_info=%p) "
2669 "entry=%p\n",
2670 map_cnt, file_object,
2671 sm_info, entry));
2672 if (entry == NULL) {
2673 SHARED_REGION_TRACE(
2674 SHARED_REGION_TRACE_ERROR,
2675 ("shared_region: %p: "
2676 "lsf_map: unable to allocate entry\n",
2677 current_thread()));
2678 return KERN_NO_SPACE;
2679 }
2680 shared_file_available_hash_ele--;
2681 entry->file_object = (int)file_object;
2682 entry->mapping_cnt = map_cnt;
2683 entry->mappings = NULL;
2684 entry->links.prev = (queue_entry_t) 0;
2685 entry->links.next = (queue_entry_t) 0;
2686 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
2687 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
2688 entry->file_offset = mappings[0].sfm_file_offset;
2689
2690 /* insert the new file entry in the hash table, for later lookups */
2691 lsf_hash_insert(entry, sm_info);
2692
2693 /* where we should add the next mapping description for that file */
2694 tptr = &(entry->mappings);
2695
2696 entry->base_address = base_offset;
2697 total_size = 0;
2698
2699 /* establish each requested mapping */
2700 for (i = 0; i < map_cnt; i++) {
2701 mach_vm_offset_t target_address;
2702 mach_vm_offset_t region_mask;
2703
2704 if (mappings[i].sfm_init_prot & VM_PROT_COW) {
2705 region_handle = (ipc_port_t)sm_info->data_region;
2706 region_mask = SHARED_DATA_REGION_MASK;
2707 if ((((mappings[i].sfm_address + base_offset)
2708 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
2709 (((mappings[i].sfm_address + base_offset +
2710 mappings[i].sfm_size - 1)
2711 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
2712 SHARED_REGION_TRACE(
2713 SHARED_REGION_TRACE_ERROR,
2714 ("shared_region: %p: lsf_map: "
2715 "RW mapping #%d not in segment",
2716 current_thread(), i));
2717 shared_region_dump_mappings(
2718 SHARED_REGION_TRACE_ERROR,
2719 mappings, map_cnt, base_offset);
2720
2721 lsf_deallocate(entry,
2722 file_object,
2723 entry->base_address,
2724 sm_info,
2725 TRUE);
2726 return KERN_INVALID_ARGUMENT;
2727 }
2728 } else {
2729 region_mask = SHARED_TEXT_REGION_MASK;
2730 region_handle = (ipc_port_t)sm_info->text_region;
2731 if (((mappings[i].sfm_address + base_offset)
2732 & GLOBAL_SHARED_SEGMENT_MASK) ||
2733 ((mappings[i].sfm_address + base_offset +
2734 mappings[i].sfm_size - 1)
2735 & GLOBAL_SHARED_SEGMENT_MASK)) {
2736 SHARED_REGION_TRACE(
2737 SHARED_REGION_TRACE_ERROR,
2738 ("shared_region: %p: lsf_map: "
2739 "RO mapping #%d not in segment",
2740 current_thread(), i));
2741 shared_region_dump_mappings(
2742 SHARED_REGION_TRACE_ERROR,
2743 mappings, map_cnt, base_offset);
2744
2745 lsf_deallocate(entry,
2746 file_object,
2747 entry->base_address,
2748 sm_info,
2749 TRUE);
2750 return KERN_INVALID_ARGUMENT;
2751 }
2752 }
2753 if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
2754 ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
2755 (file_size))) {
2756 SHARED_REGION_TRACE(
2757 SHARED_REGION_TRACE_ERROR,
2758 ("shared_region: %p: lsf_map: "
2759 "ZF mapping #%d beyond EOF",
2760 current_thread(), i));
2761 shared_region_dump_mappings(SHARED_REGION_TRACE_ERROR,
2762 mappings, map_cnt,
2763 base_offset);
2764
2765
2766 lsf_deallocate(entry,
2767 file_object,
2768 entry->base_address,
2769 sm_info,
2770 TRUE);
2771 return KERN_INVALID_ARGUMENT;
2772 }
2773 target_address = entry->base_address +
2774 ((mappings[i].sfm_address) & region_mask);
2775 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
2776 map_port = MACH_PORT_NULL;
2777 } else {
2778 map_port = (ipc_port_t) file_object->pager;
2779 }
2780 region_entry = (vm_named_entry_t) region_handle->ip_kobject;
2781
2782 total_size += mappings[i].sfm_size;
2783 if (mappings[i].sfm_size == 0) {
2784 /* nothing to map... */
2785 kr = KERN_SUCCESS;
2786 } else {
2787 kr = mach_vm_map(
2788 region_entry->backing.map,
2789 &target_address,
2790 vm_map_round_page(mappings[i].sfm_size),
2791 0,
2792 VM_FLAGS_FIXED,
2793 map_port,
2794 mappings[i].sfm_file_offset,
2795 TRUE,
2796 (mappings[i].sfm_init_prot &
2797 (VM_PROT_READ|VM_PROT_EXECUTE)),
2798 (mappings[i].sfm_max_prot &
2799 (VM_PROT_READ|VM_PROT_EXECUTE)),
2800 VM_INHERIT_DEFAULT);
2801 }
2802 if (kr != KERN_SUCCESS) {
2803 vm_offset_t old_base_address;
2804
2805 old_base_address = entry->base_address;
2806 lsf_deallocate(entry,
2807 file_object,
2808 entry->base_address,
2809 sm_info,
2810 TRUE);
2811 entry = NULL;
2812
2813 if (slide_p != NULL) {
2814 /*
2815 * Requested mapping failed but the caller
2816 * is OK with sliding the library in the
2817 * shared region, so let's try and slide it...
2818 */
2819
2820 SHARED_REGION_TRACE(
2821 SHARED_REGION_TRACE_CONFLICT,
2822 ("shared_region: %p: lsf_map: "
2823 "mapping #%d failed to map, "
2824 "kr=0x%x, sliding...\n",
2825 current_thread(), i, kr));
2826 shared_region_dump_mappings(
2827 SHARED_REGION_TRACE_INFO,
2828 mappings, map_cnt, base_offset);
2829 shared_region_dump_conflict_info(
2830 SHARED_REGION_TRACE_CONFLICT,
2831 region_entry->backing.map,
2832 (old_base_address +
2833 ((mappings[i].sfm_address)
2834 & region_mask)),
2835 vm_map_round_page(mappings[i].sfm_size));
2836
2837 /* lookup an appropriate spot */
2838 kr = lsf_slide(map_cnt, mappings,
2839 sm_info, &base_offset);
2840 if (kr == KERN_SUCCESS) {
2841 /* try and map it there ... */
2842 goto restart_after_slide;
2843 }
2844 /* couldn't slide ... */
2845 }
2846
2847 SHARED_REGION_TRACE(
2848 SHARED_REGION_TRACE_CONFLICT,
2849 ("shared_region: %p: lsf_map: "
2850 "mapping #%d failed to map, "
2851 "kr=0x%x, no sliding\n",
2852 current_thread(), i, kr));
2853 shared_region_dump_mappings(
2854 SHARED_REGION_TRACE_INFO,
2855 mappings, map_cnt, base_offset);
2856 shared_region_dump_conflict_info(
2857 SHARED_REGION_TRACE_CONFLICT,
2858 region_entry->backing.map,
2859 (old_base_address +
2860 ((mappings[i].sfm_address)
2861 & region_mask)),
2862 vm_map_round_page(mappings[i].sfm_size));
2863 return KERN_FAILURE;
2864 }
2865
2866 /* record this mapping */
2867 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
2868 if (file_mapping == NULL) {
2869 lsf_deallocate(entry,
2870 file_object,
2871 entry->base_address,
2872 sm_info,
2873 TRUE);
2874 SHARED_REGION_TRACE(
2875 SHARED_REGION_TRACE_ERROR,
2876 ("shared_region: %p: "
2877 "lsf_map: unable to allocate mapping\n",
2878 current_thread()));
2879 return KERN_NO_SPACE;
2880 }
2881 shared_file_available_hash_ele--;
2882 file_mapping->mapping_offset = (mappings[i].sfm_address)
2883 & region_mask;
2884 file_mapping->size = mappings[i].sfm_size;
2885 file_mapping->file_offset = mappings[i].sfm_file_offset;
2886 file_mapping->protection = mappings[i].sfm_init_prot;
2887 file_mapping->next = NULL;
2888 LSF_DEBUG(("lsf_map: file_mapping %p "
2889 "for offset=0x%x size=0x%x\n",
2890 file_mapping, file_mapping->mapping_offset,
2891 file_mapping->size));
2892
2893 /* and link it to the file entry */
2894 *tptr = file_mapping;
2895
2896 /* where to put the next mapping's description */
2897 tptr = &(file_mapping->next);
2898 }
2899
2900 if (slide_p != NULL) {
2901 *slide_p = base_offset - original_base_offset;
2902 }
2903
2904 if ((sm_info->flags & SHARED_REGION_STANDALONE) ||
2905 (total_size == 0)) {
2906 /*
2907 * Two cases:
2908 * 1. we have a standalone and private shared region, so we
2909 * don't really need to keep the information about each file
2910 * and each mapping. Just deallocate it all.
2911 * 2. the total size of the mappings is 0, so nothing at all
2912 * was mapped. Let's not waste kernel resources to describe
2913 * nothing.
2914 *
2915 * XXX we still have the hash table, though...
2916 */
2917 lsf_deallocate(entry, file_object, entry->base_address, sm_info,
2918 FALSE);
2919 }
2920
2921 LSF_DEBUG(("lsf_map: done\n"));
2922 return KERN_SUCCESS;
2923 }
2924
2925
2926 /* finds the file_object extent list in the shared memory hash table */
2927 /* If one is found the associated extents in shared memory are deallocated */
2928 /* and the extent list is freed */
2929
2930 static void
2931 lsf_unload(
2932 void *file_object,
2933 vm_offset_t base_offset,
2934 shared_region_task_mappings_t sm_info)
2935 {
2936 lsf_deallocate(NULL, file_object, base_offset, sm_info, TRUE);
2937 }
2938
2939 /*
2940 * lsf_deallocate:
2941 *
2942 * Deallocates all the "shared region" internal data structures describing
2943 * the file and its mappings.
2944 * Also deallocate the actual file mappings if requested ("unload" arg).
2945 */
2946 static void
2947 lsf_deallocate(
2948 load_struct_t *target_entry,
2949 void *file_object,
2950 vm_offset_t base_offset,
2951 shared_region_task_mappings_t sm_info,
2952 boolean_t unload)
2953 {
2954 load_struct_t *entry;
2955 loaded_mapping_t *map_ele;
2956 loaded_mapping_t *back_ptr;
2957 kern_return_t kr;
2958
2959 LSF_DEBUG(("lsf_deallocate(target=%p,file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2960 target_entry, file_object, base_offset, sm_info, unload));
2961 entry = lsf_hash_delete(target_entry,
2962 file_object,
2963 base_offset,
2964 sm_info);
2965 if (entry) {
2966 map_ele = entry->mappings;
2967 while(map_ele != NULL) {
2968 if (unload) {
2969 ipc_port_t region_handle;
2970 vm_named_entry_t region_entry;
2971
2972 if(map_ele->protection & VM_PROT_COW) {
2973 region_handle = (ipc_port_t)
2974 sm_info->data_region;
2975 } else {
2976 region_handle = (ipc_port_t)
2977 sm_info->text_region;
2978 }
2979 region_entry = (vm_named_entry_t)
2980 region_handle->ip_kobject;
2981
2982 kr = vm_deallocate(region_entry->backing.map,
2983 (entry->base_address +
2984 map_ele->mapping_offset),
2985 map_ele->size);
2986 assert(kr == KERN_SUCCESS);
2987 }
2988 back_ptr = map_ele;
2989 map_ele = map_ele->next;
2990 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2991 "offset 0x%x size 0x%x\n",
2992 back_ptr, back_ptr->mapping_offset,
2993 back_ptr->size));
2994 zfree(lsf_zone, back_ptr);
2995 shared_file_available_hash_ele++;
2996 }
2997 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
2998 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
2999 zfree(lsf_zone, entry);
3000 shared_file_available_hash_ele++;
3001 }
3002 LSF_DEBUG(("lsf_deallocate: done\n"));
3003 }
3004
3005 /* integer is from 1 to 100 and represents percent full */
3006 unsigned int
3007 lsf_mapping_pool_gauge(void)
3008 {
3009 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
3010 }