]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_shared_region.c
xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_region.c
CommitLineData
2d21ac55
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24/*
25 * Shared region (... and comm page)
26 *
27 * This file handles the VM shared region and comm page.
28 *
29 */
30/*
31 * SHARED REGIONS
32 * --------------
33 *
34 * A shared region is a submap that contains the most common system shared
35 * libraries for a given environment.
36 * An environment is defined by (cpu-type, 64-bitness, root directory).
37 *
38 * The point of a shared region is to reduce the setup overhead when exec'ing
39 * a new process.
40 * A shared region uses a shared VM submap that gets mapped automatically
41 * at exec() time (see vm_map_exec()). The first process of a given
42 * environment sets up the shared region and all further processes in that
43 * environment can re-use that shared region without having to re-create
44 * the same mappings in their VM map. All they need is contained in the shared
45 * region.
46 * It can also shared a pmap (mostly for read-only parts but also for the
47 * initial version of some writable parts), which gets "nested" into the
48 * process's pmap. This reduces the number of soft faults: once one process
49 * brings in a page in the shared region, all the other processes can access
50 * it without having to enter it in their own pmap.
51 *
52 *
53 * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter()
54 * to map the appropriate shared region in the process's address space.
55 * We look up the appropriate shared region for the process's environment.
56 * If we can't find one, we create a new (empty) one and add it to the list.
57 * Otherwise, we just take an extra reference on the shared region we found.
58 *
59 * The "dyld" runtime (mapped into the process's address space at exec() time)
60 * will then use the shared_region_check_np() and shared_region_map_np()
61 * system call to validate and/or populate the shared region with the
62 * appropriate dyld_shared_cache file.
63 *
64 * The shared region is inherited on fork() and the child simply takes an
65 * extra reference on its parent's shared region.
66 *
67 * When the task terminates, we release a reference on its shared region.
68 * When the last reference is released, we destroy the shared region.
69 *
70 * After a chroot(), the calling process keeps using its original shared region,
71 * since that's what was mapped when it was started. But its children
72 * will use a different shared region, because they need to use the shared
73 * cache that's relative to the new root directory.
74 */
75/*
76 * COMM PAGE
77 *
78 * A "comm page" is an area of memory that is populated by the kernel with
79 * the appropriate platform-specific version of some commonly used code.
80 * There is one "comm page" per platform (cpu-type, 64-bitness) but only
81 * for the native cpu-type. No need to overly optimize translated code
82 * for hardware that is not really there !
83 *
84 * The comm pages are created and populated at boot time.
85 *
86 * The appropriate comm page is mapped into a process's address space
87 * at exec() time, in vm_map_exec().
88 * It is then inherited on fork().
89 *
90 * The comm page is shared between the kernel and all applications of
91 * a given platform. Only the kernel can modify it.
92 *
93 * Applications just branch to fixed addresses in the comm page and find
94 * the right version of the code for the platform. There is also some
95 * data provided and updated by the kernel for processes to retrieve easily
96 * without having to do a system call.
97 */
98
99#include <debug.h>
100
101#include <kern/ipc_tt.h>
102#include <kern/kalloc.h>
b0d623f7 103#include <kern/thread_call.h>
2d21ac55 104
4a3eedf9
A
105#include <mach/mach_vm.h>
106
2d21ac55
A
107#include <vm/vm_map.h>
108#include <vm/vm_shared_region.h>
109
110#include <vm/vm_protos.h>
111
112#include <machine/commpage.h>
113#include <machine/cpu_capabilities.h>
114
115/* "dyld" uses this to figure out what the kernel supports */
116int shared_region_version = 3;
117
2d21ac55
A
118/* trace level, output is sent to the system log file */
119int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL;
120
b0d623f7
A
121/* should local (non-chroot) shared regions persist when no task uses them ? */
122int shared_region_persistence = 0; /* no by default */
123
124/* delay before reclaiming an unused shared region */
125int shared_region_destroy_delay = 120; /* in seconds */
126
39236c6e
A
127/*
128 * Only one cache gets to slide on Desktop, since we can't
129 * tear down slide info properly today and the desktop actually
130 * produces lots of shared caches.
131 */
6d2010ae
A
132boolean_t shared_region_completed_slide = FALSE;
133
2d21ac55
A
134/* this lock protects all the shared region data structures */
135lck_grp_t *vm_shared_region_lck_grp;
136lck_mtx_t vm_shared_region_lock;
137
138#define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock)
139#define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock)
140#define vm_shared_region_sleep(event, interruptible) \
141 lck_mtx_sleep(&vm_shared_region_lock, \
142 LCK_SLEEP_DEFAULT, \
143 (event_t) (event), \
144 (interruptible))
145
146/* the list of currently available shared regions (one per environment) */
147queue_head_t vm_shared_region_queue;
148
149static void vm_shared_region_reference_locked(vm_shared_region_t shared_region);
150static vm_shared_region_t vm_shared_region_create(
151 void *root_dir,
152 cpu_type_t cputype,
153 boolean_t is_64bit);
154static void vm_shared_region_destroy(vm_shared_region_t shared_region);
155
b0d623f7
A
156static void vm_shared_region_timeout(thread_call_param_t param0,
157 thread_call_param_t param1);
158
159static int __commpage_setup = 0;
160#if defined(__i386__) || defined(__x86_64__)
161static int __system_power_source = 1; /* init to extrnal power source */
162static void post_sys_powersource_internal(int i, int internal);
163#endif /* __i386__ || __x86_64__ */
164
165
2d21ac55
A
166/*
167 * Initialize the module...
168 */
169void
170vm_shared_region_init(void)
171{
172 SHARED_REGION_TRACE_DEBUG(
173 ("shared_region: -> init\n"));
174
175 vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region",
176 LCK_GRP_ATTR_NULL);
177 lck_mtx_init(&vm_shared_region_lock,
178 vm_shared_region_lck_grp,
179 LCK_ATTR_NULL);
180
181 queue_init(&vm_shared_region_queue);
182
183 SHARED_REGION_TRACE_DEBUG(
184 ("shared_region: <- init\n"));
185}
186
187/*
188 * Retrieve a task's shared region and grab an extra reference to
189 * make sure it doesn't disappear while the caller is using it.
190 * The caller is responsible for consuming that extra reference if
191 * necessary.
192 */
193vm_shared_region_t
194vm_shared_region_get(
195 task_t task)
196{
197 vm_shared_region_t shared_region;
198
199 SHARED_REGION_TRACE_DEBUG(
200 ("shared_region: -> get(%p)\n",
201 task));
202
203 task_lock(task);
204 vm_shared_region_lock();
205 shared_region = task->shared_region;
206 if (shared_region) {
207 assert(shared_region->sr_ref_count > 0);
208 vm_shared_region_reference_locked(shared_region);
209 }
210 vm_shared_region_unlock();
211 task_unlock(task);
212
213 SHARED_REGION_TRACE_DEBUG(
214 ("shared_region: get(%p) <- %p\n",
215 task, shared_region));
216
217 return shared_region;
218}
219
220/*
221 * Get the base address of the shared region.
222 * That's the address at which it needs to be mapped in the process's address
223 * space.
224 * No need to lock since this data is set when the shared region is
225 * created and is never modified after that. The caller must hold an extra
226 * reference on the shared region to prevent it from being destroyed.
227 */
228mach_vm_offset_t
229vm_shared_region_base_address(
230 vm_shared_region_t shared_region)
231{
232 SHARED_REGION_TRACE_DEBUG(
233 ("shared_region: -> base_address(%p)\n",
234 shared_region));
235 assert(shared_region->sr_ref_count > 1);
236 SHARED_REGION_TRACE_DEBUG(
237 ("shared_region: base_address(%p) <- 0x%llx\n",
238 shared_region, (long long)shared_region->sr_base_address));
239 return shared_region->sr_base_address;
240}
241
242/*
243 * Get the size of the shared region.
244 * That's the size that needs to be mapped in the process's address
245 * space.
246 * No need to lock since this data is set when the shared region is
247 * created and is never modified after that. The caller must hold an extra
248 * reference on the shared region to prevent it from being destroyed.
249 */
250mach_vm_size_t
251vm_shared_region_size(
252 vm_shared_region_t shared_region)
253{
254 SHARED_REGION_TRACE_DEBUG(
255 ("shared_region: -> size(%p)\n",
256 shared_region));
257 assert(shared_region->sr_ref_count > 1);
258 SHARED_REGION_TRACE_DEBUG(
259 ("shared_region: size(%p) <- 0x%llx\n",
260 shared_region, (long long)shared_region->sr_size));
261 return shared_region->sr_size;
262}
263
264/*
265 * Get the memory entry of the shared region.
266 * That's the "memory object" that needs to be mapped in the process's address
267 * space.
268 * No need to lock since this data is set when the shared region is
269 * created and is never modified after that. The caller must hold an extra
270 * reference on the shared region to prevent it from being destroyed.
271 */
272ipc_port_t
273vm_shared_region_mem_entry(
274 vm_shared_region_t shared_region)
275{
276 SHARED_REGION_TRACE_DEBUG(
277 ("shared_region: -> mem_entry(%p)\n",
278 shared_region));
279 assert(shared_region->sr_ref_count > 1);
280 SHARED_REGION_TRACE_DEBUG(
281 ("shared_region: mem_entry(%p) <- %p\n",
282 shared_region, shared_region->sr_mem_entry));
283 return shared_region->sr_mem_entry;
284}
285
39236c6e
A
286uint32_t
287vm_shared_region_get_slide(
288 vm_shared_region_t shared_region)
289{
290 SHARED_REGION_TRACE_DEBUG(
291 ("shared_region: -> vm_shared_region_get_slide(%p)\n",
292 shared_region));
293 assert(shared_region->sr_ref_count > 1);
294 SHARED_REGION_TRACE_DEBUG(
295 ("shared_region: vm_shared_region_get_slide(%p) <- %u\n",
296 shared_region, shared_region->sr_slide_info.slide));
297
298 /* 0 if we haven't slid */
299 assert(shared_region->sr_slide_info.slide_object != NULL ||
300 shared_region->sr_slide_info.slide == 0);
301
302 return shared_region->sr_slide_info.slide;
303}
304
305vm_shared_region_slide_info_t
306vm_shared_region_get_slide_info(
307 vm_shared_region_t shared_region)
308{
309 SHARED_REGION_TRACE_DEBUG(
310 ("shared_region: -> vm_shared_region_get_slide_info(%p)\n",
311 shared_region));
312 assert(shared_region->sr_ref_count > 1);
313 SHARED_REGION_TRACE_DEBUG(
314 ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n",
315 shared_region, &shared_region->sr_slide_info));
316 return &shared_region->sr_slide_info;
317}
318
2d21ac55
A
319/*
320 * Set the shared region the process should use.
321 * A NULL new shared region means that we just want to release the old
322 * shared region.
323 * The caller should already have an extra reference on the new shared region
324 * (if any). We release a reference on the old shared region (if any).
325 */
326void
327vm_shared_region_set(
328 task_t task,
329 vm_shared_region_t new_shared_region)
330{
331 vm_shared_region_t old_shared_region;
332
333 SHARED_REGION_TRACE_DEBUG(
334 ("shared_region: -> set(%p, %p)\n",
335 task, new_shared_region));
336
337 task_lock(task);
338 vm_shared_region_lock();
339
340 old_shared_region = task->shared_region;
341 if (new_shared_region) {
342 assert(new_shared_region->sr_ref_count > 0);
343 }
344
345 task->shared_region = new_shared_region;
346
347 vm_shared_region_unlock();
348 task_unlock(task);
349
350 if (old_shared_region) {
351 assert(old_shared_region->sr_ref_count > 0);
352 vm_shared_region_deallocate(old_shared_region);
353 }
354
355 SHARED_REGION_TRACE_DEBUG(
356 ("shared_region: set(%p) <- old=%p new=%p\n",
357 task, old_shared_region, new_shared_region));
358}
359
360/*
361 * Lookup up the shared region for the desired environment.
362 * If none is found, create a new (empty) one.
363 * Grab an extra reference on the returned shared region, to make sure
364 * it doesn't get destroyed before the caller is done with it. The caller
365 * is responsible for consuming that extra reference if necessary.
366 */
367vm_shared_region_t
368vm_shared_region_lookup(
369 void *root_dir,
370 cpu_type_t cputype,
371 boolean_t is_64bit)
372{
373 vm_shared_region_t shared_region;
374 vm_shared_region_t new_shared_region;
375
376 SHARED_REGION_TRACE_DEBUG(
377 ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n",
378 root_dir, cputype, is_64bit));
379
380 shared_region = NULL;
381 new_shared_region = NULL;
382
383 vm_shared_region_lock();
384 for (;;) {
385 queue_iterate(&vm_shared_region_queue,
386 shared_region,
387 vm_shared_region_t,
388 sr_q) {
389 assert(shared_region->sr_ref_count > 0);
390 if (shared_region->sr_cpu_type == cputype &&
391 shared_region->sr_root_dir == root_dir &&
392 shared_region->sr_64bit == is_64bit) {
393 /* found a match ! */
394 vm_shared_region_reference_locked(shared_region);
395 goto done;
396 }
397 }
398 if (new_shared_region == NULL) {
399 /* no match: create a new one */
400 vm_shared_region_unlock();
401 new_shared_region = vm_shared_region_create(root_dir,
402 cputype,
403 is_64bit);
404 /* do the lookup again, in case we lost a race */
405 vm_shared_region_lock();
406 continue;
407 }
408 /* still no match: use our new one */
409 shared_region = new_shared_region;
410 new_shared_region = NULL;
411 queue_enter(&vm_shared_region_queue,
412 shared_region,
413 vm_shared_region_t,
414 sr_q);
415 break;
416 }
417
418done:
419 vm_shared_region_unlock();
420
421 if (new_shared_region) {
422 /*
423 * We lost a race with someone else to create a new shared
424 * region for that environment. Get rid of our unused one.
425 */
426 assert(new_shared_region->sr_ref_count == 1);
427 new_shared_region->sr_ref_count--;
428 vm_shared_region_destroy(new_shared_region);
429 new_shared_region = NULL;
430 }
431
432 SHARED_REGION_TRACE_DEBUG(
433 ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n",
434 root_dir, cputype, is_64bit, shared_region));
435
436 assert(shared_region->sr_ref_count > 0);
437 return shared_region;
438}
439
440/*
441 * Take an extra reference on a shared region.
442 * The vm_shared_region_lock should already be held by the caller.
443 */
444static void
445vm_shared_region_reference_locked(
446 vm_shared_region_t shared_region)
447{
448#if DEBUG
449 lck_mtx_assert(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED);
450#endif
451
452 SHARED_REGION_TRACE_DEBUG(
453 ("shared_region: -> reference_locked(%p)\n",
454 shared_region));
455 assert(shared_region->sr_ref_count > 0);
456 shared_region->sr_ref_count++;
b0d623f7
A
457
458 if (shared_region->sr_timer_call != NULL) {
459 boolean_t cancelled;
460
461 /* cancel and free any pending timeout */
462 cancelled = thread_call_cancel(shared_region->sr_timer_call);
463 if (cancelled) {
464 thread_call_free(shared_region->sr_timer_call);
465 shared_region->sr_timer_call = NULL;
466 /* release the reference held by the cancelled timer */
467 shared_region->sr_ref_count--;
468 } else {
469 /* the timer will drop the reference and free itself */
470 }
471 }
472
2d21ac55
A
473 SHARED_REGION_TRACE_DEBUG(
474 ("shared_region: reference_locked(%p) <- %d\n",
475 shared_region, shared_region->sr_ref_count));
476}
477
478/*
479 * Release a reference on the shared region.
480 * Destroy it if there are no references left.
481 */
482void
483vm_shared_region_deallocate(
484 vm_shared_region_t shared_region)
485{
486 SHARED_REGION_TRACE_DEBUG(
487 ("shared_region: -> deallocate(%p)\n",
488 shared_region));
489
490 vm_shared_region_lock();
491
492 assert(shared_region->sr_ref_count > 0);
493
494 if (shared_region->sr_root_dir == NULL) {
495 /*
496 * Local (i.e. based on the boot volume) shared regions
497 * can persist or not based on the "shared_region_persistence"
498 * sysctl.
499 * Make sure that this one complies.
39236c6e
A
500 *
501 * See comments in vm_shared_region_slide() for notes about
502 * shared regions we have slid (which are not torn down currently).
2d21ac55
A
503 */
504 if (shared_region_persistence &&
505 !shared_region->sr_persists) {
506 /* make this one persistent */
507 shared_region->sr_ref_count++;
508 shared_region->sr_persists = TRUE;
509 } else if (!shared_region_persistence &&
510 shared_region->sr_persists) {
511 /* make this one no longer persistent */
512 assert(shared_region->sr_ref_count > 1);
513 shared_region->sr_ref_count--;
514 shared_region->sr_persists = FALSE;
515 }
516 }
517
518 assert(shared_region->sr_ref_count > 0);
519 shared_region->sr_ref_count--;
520 SHARED_REGION_TRACE_DEBUG(
521 ("shared_region: deallocate(%p): ref now %d\n",
522 shared_region, shared_region->sr_ref_count));
523
524 if (shared_region->sr_ref_count == 0) {
b0d623f7
A
525 uint64_t deadline;
526
39236c6e
A
527 assert(!shared_region->sr_slid);
528
b0d623f7
A
529 if (shared_region->sr_timer_call == NULL) {
530 /* hold one reference for the timer */
531 assert(! shared_region->sr_mapping_in_progress);
532 shared_region->sr_ref_count++;
533
534 /* set up the timer */
535 shared_region->sr_timer_call = thread_call_allocate(
536 (thread_call_func_t) vm_shared_region_timeout,
537 (thread_call_param_t) shared_region);
538
539 /* schedule the timer */
540 clock_interval_to_deadline(shared_region_destroy_delay,
541 1000 * 1000 * 1000,
542 &deadline);
543 thread_call_enter_delayed(shared_region->sr_timer_call,
544 deadline);
545
546 SHARED_REGION_TRACE_DEBUG(
547 ("shared_region: deallocate(%p): armed timer\n",
548 shared_region));
549
550 vm_shared_region_unlock();
551 } else {
552 /* timer expired: let go of this shared region */
553
39236c6e
A
554 /*
555 * We can't properly handle teardown of a slid object today.
556 */
557 assert(!shared_region->sr_slid);
558
b0d623f7
A
559 /*
560 * Remove it from the queue first, so no one can find
561 * it...
562 */
563 queue_remove(&vm_shared_region_queue,
564 shared_region,
565 vm_shared_region_t,
566 sr_q);
567 vm_shared_region_unlock();
39236c6e 568
b0d623f7
A
569 /* ... and destroy it */
570 vm_shared_region_destroy(shared_region);
571 shared_region = NULL;
572 }
2d21ac55
A
573 } else {
574 vm_shared_region_unlock();
575 }
576
577 SHARED_REGION_TRACE_DEBUG(
578 ("shared_region: deallocate(%p) <-\n",
579 shared_region));
580}
581
b0d623f7
A
582void
583vm_shared_region_timeout(
584 thread_call_param_t param0,
585 __unused thread_call_param_t param1)
586{
587 vm_shared_region_t shared_region;
588
589 shared_region = (vm_shared_region_t) param0;
590
591 vm_shared_region_deallocate(shared_region);
592}
593
2d21ac55
A
594/*
595 * Create a new (empty) shared region for a new environment.
596 */
597static vm_shared_region_t
598vm_shared_region_create(
599 void *root_dir,
600 cpu_type_t cputype,
601 boolean_t is_64bit)
602{
603 kern_return_t kr;
604 vm_named_entry_t mem_entry;
605 ipc_port_t mem_entry_port;
606 vm_shared_region_t shared_region;
39236c6e 607 vm_shared_region_slide_info_t si;
2d21ac55
A
608 vm_map_t sub_map;
609 mach_vm_offset_t base_address, pmap_nesting_start;
610 mach_vm_size_t size, pmap_nesting_size;
611
612 SHARED_REGION_TRACE_DEBUG(
613 ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n",
614 root_dir, cputype, is_64bit));
615
616 base_address = 0;
617 size = 0;
618 mem_entry = NULL;
619 mem_entry_port = IPC_PORT_NULL;
620 sub_map = VM_MAP_NULL;
621
622 /* create a new shared region structure... */
623 shared_region = kalloc(sizeof (*shared_region));
624 if (shared_region == NULL) {
625 SHARED_REGION_TRACE_ERROR(
626 ("shared_region: create: couldn't allocate\n"));
627 goto done;
628 }
629
630 /* figure out the correct settings for the desired environment */
631 if (is_64bit) {
632 switch (cputype) {
633 case CPU_TYPE_I386:
634 base_address = SHARED_REGION_BASE_X86_64;
635 size = SHARED_REGION_SIZE_X86_64;
636 pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64;
637 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64;
638 break;
639 case CPU_TYPE_POWERPC:
640 base_address = SHARED_REGION_BASE_PPC64;
641 size = SHARED_REGION_SIZE_PPC64;
642 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64;
643 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64;
644 break;
645 default:
646 SHARED_REGION_TRACE_ERROR(
647 ("shared_region: create: unknown cpu type %d\n",
648 cputype));
649 kfree(shared_region, sizeof (*shared_region));
650 shared_region = NULL;
651 goto done;
652 }
653 } else {
654 switch (cputype) {
655 case CPU_TYPE_I386:
656 base_address = SHARED_REGION_BASE_I386;
657 size = SHARED_REGION_SIZE_I386;
658 pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386;
659 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386;
660 break;
661 case CPU_TYPE_POWERPC:
662 base_address = SHARED_REGION_BASE_PPC;
663 size = SHARED_REGION_SIZE_PPC;
664 pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC;
665 pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC;
666 break;
2d21ac55
A
667 default:
668 SHARED_REGION_TRACE_ERROR(
669 ("shared_region: create: unknown cpu type %d\n",
670 cputype));
671 kfree(shared_region, sizeof (*shared_region));
672 shared_region = NULL;
673 goto done;
2d21ac55
A
674 }
675 }
676
677 /* create a memory entry structure and a Mach port handle */
678 kr = mach_memory_entry_allocate(&mem_entry,
679 &mem_entry_port);
680 if (kr != KERN_SUCCESS) {
681 kfree(shared_region, sizeof (*shared_region));
682 shared_region = NULL;
683 SHARED_REGION_TRACE_ERROR(
684 ("shared_region: create: "
685 "couldn't allocate mem_entry\n"));
686 goto done;
687 }
688
689 /* create a VM sub map and its pmap */
316670eb 690 sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit),
2d21ac55
A
691 0, size,
692 TRUE);
693 if (sub_map == VM_MAP_NULL) {
694 ipc_port_release_send(mem_entry_port);
695 kfree(shared_region, sizeof (*shared_region));
696 shared_region = NULL;
697 SHARED_REGION_TRACE_ERROR(
698 ("shared_region: create: "
699 "couldn't allocate map\n"));
700 goto done;
701 }
702
703 /* make the memory entry point to the VM sub map */
704 mem_entry->is_sub_map = TRUE;
705 mem_entry->backing.map = sub_map;
706 mem_entry->size = size;
707 mem_entry->protection = VM_PROT_ALL;
708
709 /* make the shared region point at the memory entry */
710 shared_region->sr_mem_entry = mem_entry_port;
711
712 /* fill in the shared region's environment and settings */
713 shared_region->sr_base_address = base_address;
714 shared_region->sr_size = size;
715 shared_region->sr_pmap_nesting_start = pmap_nesting_start;
716 shared_region->sr_pmap_nesting_size = pmap_nesting_size;
717 shared_region->sr_cpu_type = cputype;
718 shared_region->sr_64bit = is_64bit;
719 shared_region->sr_root_dir = root_dir;
720
721 queue_init(&shared_region->sr_q);
722 shared_region->sr_mapping_in_progress = FALSE;
39236c6e 723 shared_region->sr_slide_in_progress = FALSE;
2d21ac55 724 shared_region->sr_persists = FALSE;
39236c6e 725 shared_region->sr_slid = FALSE;
b0d623f7 726 shared_region->sr_timer_call = NULL;
2d21ac55
A
727 shared_region->sr_first_mapping = (mach_vm_offset_t) -1;
728
729 /* grab a reference for the caller */
730 shared_region->sr_ref_count = 1;
731
39236c6e
A
732 /* And set up slide info */
733 si = &shared_region->sr_slide_info;
734 si->start = 0;
735 si->end = 0;
736 si->slide = 0;
737 si->slide_object = NULL;
738 si->slide_info_size = 0;
739 si->slide_info_entry = NULL;
740
2d21ac55
A
741done:
742 if (shared_region) {
743 SHARED_REGION_TRACE_INFO(
744 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
745 "base=0x%llx,size=0x%llx) <- "
746 "%p mem=(%p,%p) map=%p pmap=%p\n",
747 root_dir, cputype, is_64bit, (long long)base_address,
748 (long long)size, shared_region,
749 mem_entry_port, mem_entry, sub_map, sub_map->pmap));
750 } else {
751 SHARED_REGION_TRACE_INFO(
752 ("shared_region: create(root=%p,cpu=%d,64bit=%d,"
753 "base=0x%llx,size=0x%llx) <- NULL",
754 root_dir, cputype, is_64bit, (long long)base_address,
755 (long long)size));
756 }
757 return shared_region;
758}
759
760/*
761 * Destroy a now-unused shared region.
762 * The shared region is no longer in the queue and can not be looked up.
763 */
764static void
765vm_shared_region_destroy(
766 vm_shared_region_t shared_region)
767{
768 vm_named_entry_t mem_entry;
769 vm_map_t map;
770
771 SHARED_REGION_TRACE_INFO(
772 ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n",
773 shared_region,
774 shared_region->sr_root_dir,
775 shared_region->sr_cpu_type,
776 shared_region->sr_64bit));
777
778 assert(shared_region->sr_ref_count == 0);
779 assert(!shared_region->sr_persists);
39236c6e 780 assert(!shared_region->sr_slid);
2d21ac55
A
781
782 mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject;
783 assert(mem_entry->is_sub_map);
784 assert(!mem_entry->internal);
785 assert(!mem_entry->is_pager);
39236c6e 786 assert(!mem_entry->is_copy);
2d21ac55
A
787 map = mem_entry->backing.map;
788
789 /*
790 * Clean up the pmap first. The virtual addresses that were
791 * entered in this possibly "nested" pmap may have different values
792 * than the VM map's min and max offsets, if the VM sub map was
793 * mapped at a non-zero offset in the processes' main VM maps, which
794 * is usually the case, so the clean-up we do in vm_map_destroy() would
795 * not be enough.
796 */
797 if (map->pmap) {
798 pmap_remove(map->pmap,
799 shared_region->sr_base_address,
800 (shared_region->sr_base_address +
801 shared_region->sr_size));
802 }
803
804 /*
805 * Release our (one and only) handle on the memory entry.
806 * This will generate a no-senders notification, which will be processed
807 * by ipc_kobject_notify(), which will release the one and only
808 * reference on the memory entry and cause it to be destroyed, along
809 * with the VM sub map and its pmap.
810 */
811 mach_memory_entry_port_release(shared_region->sr_mem_entry);
812 mem_entry = NULL;
813 shared_region->sr_mem_entry = IPC_PORT_NULL;
814
b0d623f7
A
815 if (shared_region->sr_timer_call) {
816 thread_call_free(shared_region->sr_timer_call);
817 }
818
39236c6e
A
819#if 0
820 /*
821 * If slid, free those resources. We'll want this eventually,
822 * but can't handle it properly today.
823 */
824 si = &shared_region->sr_slide_info;
825 if (si->slide_info_entry) {
6d2010ae 826 kmem_free(kernel_map,
39236c6e
A
827 (vm_offset_t) si->slide_info_entry,
828 (vm_size_t) si->slide_info_size);
829 vm_object_deallocate(si->slide_object);
6d2010ae 830 }
39236c6e 831#endif
6d2010ae 832
2d21ac55
A
833 /* release the shared region structure... */
834 kfree(shared_region, sizeof (*shared_region));
6d2010ae 835
2d21ac55
A
836 SHARED_REGION_TRACE_DEBUG(
837 ("shared_region: destroy(%p) <-\n",
838 shared_region));
839 shared_region = NULL;
840
841}
842
843/*
844 * Gets the address of the first (in time) mapping in the shared region.
845 */
846kern_return_t
847vm_shared_region_start_address(
848 vm_shared_region_t shared_region,
849 mach_vm_offset_t *start_address)
850{
851 kern_return_t kr;
852 mach_vm_offset_t sr_base_address;
853 mach_vm_offset_t sr_first_mapping;
854
855 SHARED_REGION_TRACE_DEBUG(
856 ("shared_region: -> start_address(%p)\n",
857 shared_region));
858 assert(shared_region->sr_ref_count > 1);
859
860 vm_shared_region_lock();
861
862 /*
863 * Wait if there's another thread establishing a mapping
864 * in this shared region right when we're looking at it.
865 * We want a consistent view of the map...
866 */
867 while (shared_region->sr_mapping_in_progress) {
868 /* wait for our turn... */
869 assert(shared_region->sr_ref_count > 1);
870 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
871 THREAD_UNINT);
872 }
873 assert(! shared_region->sr_mapping_in_progress);
874 assert(shared_region->sr_ref_count > 1);
875
876 sr_base_address = shared_region->sr_base_address;
877 sr_first_mapping = shared_region->sr_first_mapping;
878
879 if (sr_first_mapping == (mach_vm_offset_t) -1) {
880 /* shared region is empty */
881 kr = KERN_INVALID_ADDRESS;
882 } else {
883 kr = KERN_SUCCESS;
884 *start_address = sr_base_address + sr_first_mapping;
885 }
886
887 vm_shared_region_unlock();
888
889 SHARED_REGION_TRACE_DEBUG(
890 ("shared_region: start_address(%p) <- 0x%llx\n",
891 shared_region, (long long)shared_region->sr_base_address));
892
893 return kr;
894}
6d2010ae
A
895
896void
897vm_shared_region_undo_mappings(
898 vm_map_t sr_map,
899 mach_vm_offset_t sr_base_address,
900 struct shared_file_mapping_np *mappings,
901 unsigned int mappings_count)
902{
903 unsigned int j = 0;
904 vm_shared_region_t shared_region = NULL;
905 boolean_t reset_shared_region_state = FALSE;
316670eb 906
6d2010ae
A
907 shared_region = vm_shared_region_get(current_task());
908 if (shared_region == NULL) {
316670eb 909 printf("Failed to undo mappings because of NULL shared region.\n");
6d2010ae
A
910 return;
911 }
316670eb 912
6d2010ae
A
913
914 if (sr_map == NULL) {
915 ipc_port_t sr_handle;
916 vm_named_entry_t sr_mem_entry;
917
918 vm_shared_region_lock();
919 assert(shared_region->sr_ref_count > 1);
920
921 while (shared_region->sr_mapping_in_progress) {
922 /* wait for our turn... */
923 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
924 THREAD_UNINT);
925 }
926 assert(! shared_region->sr_mapping_in_progress);
927 assert(shared_region->sr_ref_count > 1);
928 /* let others know we're working in this shared region */
929 shared_region->sr_mapping_in_progress = TRUE;
930
931 vm_shared_region_unlock();
932
933 reset_shared_region_state = TRUE;
934
935 /* no need to lock because this data is never modified... */
936 sr_handle = shared_region->sr_mem_entry;
937 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
938 sr_map = sr_mem_entry->backing.map;
939 sr_base_address = shared_region->sr_base_address;
940 }
941 /*
942 * Undo the mappings we've established so far.
943 */
944 for (j = 0; j < mappings_count; j++) {
945 kern_return_t kr2;
946
947 if (mappings[j].sfm_size == 0) {
948 /*
949 * We didn't establish this
950 * mapping, so nothing to undo.
951 */
952 continue;
953 }
954 SHARED_REGION_TRACE_INFO(
955 ("shared_region: mapping[%d]: "
956 "address:0x%016llx "
957 "size:0x%016llx "
958 "offset:0x%016llx "
959 "maxprot:0x%x prot:0x%x: "
960 "undoing...\n",
961 j,
962 (long long)mappings[j].sfm_address,
963 (long long)mappings[j].sfm_size,
964 (long long)mappings[j].sfm_file_offset,
965 mappings[j].sfm_max_prot,
966 mappings[j].sfm_init_prot));
967 kr2 = mach_vm_deallocate(
968 sr_map,
969 (mappings[j].sfm_address -
970 sr_base_address),
971 mappings[j].sfm_size);
972 assert(kr2 == KERN_SUCCESS);
973 }
974
6d2010ae
A
975 if (reset_shared_region_state) {
976 vm_shared_region_lock();
977 assert(shared_region->sr_ref_count > 1);
978 assert(shared_region->sr_mapping_in_progress);
979 /* we're done working on that shared region */
980 shared_region->sr_mapping_in_progress = FALSE;
981 thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
982 vm_shared_region_unlock();
983 reset_shared_region_state = FALSE;
984 }
985
986 vm_shared_region_deallocate(shared_region);
987}
988
2d21ac55
A
989/*
990 * Establish some mappings of a file in the shared region.
991 * This is used by "dyld" via the shared_region_map_np() system call
992 * to populate the shared region with the appropriate shared cache.
993 *
994 * One could also call it several times to incrementally load several
995 * libraries, as long as they do not overlap.
996 * It will return KERN_SUCCESS if the mappings were successfully established
997 * or if they were already established identically by another process.
998 */
999kern_return_t
1000vm_shared_region_map_file(
1001 vm_shared_region_t shared_region,
1002 unsigned int mappings_count,
1003 struct shared_file_mapping_np *mappings,
1004 memory_object_control_t file_control,
1005 memory_object_size_t file_size,
6d2010ae 1006 void *root_dir,
15129b1c
A
1007 uint32_t slide,
1008 user_addr_t slide_start,
1009 user_addr_t slide_size)
2d21ac55
A
1010{
1011 kern_return_t kr;
1012 vm_object_t file_object;
1013 ipc_port_t sr_handle;
1014 vm_named_entry_t sr_mem_entry;
1015 vm_map_t sr_map;
1016 mach_vm_offset_t sr_base_address;
1017 unsigned int i;
1018 mach_port_t map_port;
316670eb 1019 vm_map_offset_t target_address;
4a3eedf9
A
1020 vm_object_t object;
1021 vm_object_size_t obj_size;
15129b1c
A
1022 struct shared_file_mapping_np *mapping_to_slide = NULL;
1023 mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1;
4a3eedf9 1024
2d21ac55
A
1025
1026 kr = KERN_SUCCESS;
1027
1028 vm_shared_region_lock();
1029 assert(shared_region->sr_ref_count > 1);
1030
1031 if (shared_region->sr_root_dir != root_dir) {
1032 /*
1033 * This shared region doesn't match the current root
1034 * directory of this process. Deny the mapping to
1035 * avoid tainting the shared region with something that
1036 * doesn't quite belong into it.
1037 */
1038 vm_shared_region_unlock();
1039 kr = KERN_PROTECTION_FAILURE;
1040 goto done;
1041 }
1042
1043 /*
1044 * Make sure we handle only one mapping at a time in a given
1045 * shared region, to avoid race conditions. This should not
1046 * happen frequently...
1047 */
1048 while (shared_region->sr_mapping_in_progress) {
1049 /* wait for our turn... */
1050 vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
1051 THREAD_UNINT);
1052 }
1053 assert(! shared_region->sr_mapping_in_progress);
1054 assert(shared_region->sr_ref_count > 1);
1055 /* let others know we're working in this shared region */
1056 shared_region->sr_mapping_in_progress = TRUE;
1057
1058 vm_shared_region_unlock();
1059
1060 /* no need to lock because this data is never modified... */
1061 sr_handle = shared_region->sr_mem_entry;
1062 sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject;
1063 sr_map = sr_mem_entry->backing.map;
1064 sr_base_address = shared_region->sr_base_address;
1065
1066 SHARED_REGION_TRACE_DEBUG(
1067 ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n",
1068 shared_region, mappings_count, mappings,
1069 file_control, file_size));
1070
1071 /* get the VM object associated with the file to be mapped */
1072 file_object = memory_object_control_to_vm_object(file_control);
1073
1074 /* establish the mappings */
1075 for (i = 0; i < mappings_count; i++) {
1076 SHARED_REGION_TRACE_INFO(
1077 ("shared_region: mapping[%d]: "
1078 "address:0x%016llx size:0x%016llx offset:0x%016llx "
1079 "maxprot:0x%x prot:0x%x\n",
1080 i,
1081 (long long)mappings[i].sfm_address,
1082 (long long)mappings[i].sfm_size,
1083 (long long)mappings[i].sfm_file_offset,
1084 mappings[i].sfm_max_prot,
1085 mappings[i].sfm_init_prot));
1086
1087 if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
1088 /* zero-filled memory */
1089 map_port = MACH_PORT_NULL;
1090 } else {
1091 /* file-backed memory */
1092 map_port = (ipc_port_t) file_object->pager;
1093 }
6d2010ae
A
1094
1095 if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) {
1096 /*
1097 * This is the mapping that needs to be slid.
1098 */
15129b1c 1099 if (mapping_to_slide != NULL) {
6d2010ae
A
1100 SHARED_REGION_TRACE_INFO(
1101 ("shared_region: mapping[%d]: "
1102 "address:0x%016llx size:0x%016llx "
1103 "offset:0x%016llx "
1104 "maxprot:0x%x prot:0x%x "
1105 "will not be slid as only one such mapping is allowed...\n",
1106 i,
1107 (long long)mappings[i].sfm_address,
1108 (long long)mappings[i].sfm_size,
1109 (long long)mappings[i].sfm_file_offset,
1110 mappings[i].sfm_max_prot,
1111 mappings[i].sfm_init_prot));
1112 } else {
15129b1c 1113 mapping_to_slide = &mappings[i];
6d2010ae
A
1114 }
1115 }
2d21ac55
A
1116
1117 /* mapping's address is relative to the shared region base */
1118 target_address =
1119 mappings[i].sfm_address - sr_base_address;
1120
4a3eedf9
A
1121 /* establish that mapping, OK if it's "already" there */
1122 if (map_port == MACH_PORT_NULL) {
1123 /*
1124 * We want to map some anonymous memory in a
1125 * shared region.
1126 * We have to create the VM object now, so that it
1127 * can be mapped "copy-on-write".
1128 */
39236c6e
A
1129 obj_size = vm_map_round_page(mappings[i].sfm_size,
1130 VM_MAP_PAGE_MASK(sr_map));
4a3eedf9
A
1131 object = vm_object_allocate(obj_size);
1132 if (object == VM_OBJECT_NULL) {
1133 kr = KERN_RESOURCE_SHORTAGE;
1134 } else {
1135 kr = vm_map_enter(
1136 sr_map,
1137 &target_address,
39236c6e
A
1138 vm_map_round_page(mappings[i].sfm_size,
1139 VM_MAP_PAGE_MASK(sr_map)),
4a3eedf9
A
1140 0,
1141 VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
1142 object,
1143 0,
1144 TRUE,
1145 mappings[i].sfm_init_prot & VM_PROT_ALL,
1146 mappings[i].sfm_max_prot & VM_PROT_ALL,
1147 VM_INHERIT_DEFAULT);
1148 }
1149 } else {
1150 object = VM_OBJECT_NULL; /* no anonymous memory here */
1151 kr = vm_map_enter_mem_object(
1152 sr_map,
1153 &target_address,
39236c6e
A
1154 vm_map_round_page(mappings[i].sfm_size,
1155 VM_MAP_PAGE_MASK(sr_map)),
4a3eedf9
A
1156 0,
1157 VM_FLAGS_FIXED | VM_FLAGS_ALREADY,
1158 map_port,
1159 mappings[i].sfm_file_offset,
1160 TRUE,
1161 mappings[i].sfm_init_prot & VM_PROT_ALL,
1162 mappings[i].sfm_max_prot & VM_PROT_ALL,
1163 VM_INHERIT_DEFAULT);
2d21ac55
A
1164 }
1165
15129b1c
A
1166 if (kr == KERN_SUCCESS) {
1167 /*
1168 * Record the first (chronologically) successful
1169 * mapping in this shared region.
1170 * We're protected by "sr_mapping_in_progress" here,
1171 * so no need to lock "shared_region".
1172 */
1173 if (first_mapping == (mach_vm_offset_t) -1) {
1174 first_mapping = target_address;
1175 }
1176 } else {
4a3eedf9
A
1177 if (map_port == MACH_PORT_NULL) {
1178 /*
1179 * Get rid of the VM object we just created
1180 * but failed to map.
1181 */
1182 vm_object_deallocate(object);
1183 object = VM_OBJECT_NULL;
1184 }
1185 if (kr == KERN_MEMORY_PRESENT) {
1186 /*
1187 * This exact mapping was already there:
1188 * that's fine.
1189 */
1190 SHARED_REGION_TRACE_INFO(
1191 ("shared_region: mapping[%d]: "
1192 "address:0x%016llx size:0x%016llx "
1193 "offset:0x%016llx "
1194 "maxprot:0x%x prot:0x%x "
1195 "already mapped...\n",
1196 i,
1197 (long long)mappings[i].sfm_address,
1198 (long long)mappings[i].sfm_size,
1199 (long long)mappings[i].sfm_file_offset,
1200 mappings[i].sfm_max_prot,
1201 mappings[i].sfm_init_prot));
1202 /*
1203 * We didn't establish this mapping ourselves;
1204 * let's reset its size, so that we do not
1205 * attempt to undo it if an error occurs later.
1206 */
1207 mappings[i].sfm_size = 0;
1208 kr = KERN_SUCCESS;
1209 } else {
4a3eedf9
A
1210 /* this mapping failed ! */
1211 SHARED_REGION_TRACE_ERROR(
1212 ("shared_region: mapping[%d]: "
1213 "address:0x%016llx size:0x%016llx "
1214 "offset:0x%016llx "
1215 "maxprot:0x%x prot:0x%x failed 0x%x\n",
1216 i,
1217 (long long)mappings[i].sfm_address,
1218 (long long)mappings[i].sfm_size,
1219 (long long)mappings[i].sfm_file_offset,
1220 mappings[i].sfm_max_prot,
1221 mappings[i].sfm_init_prot,
1222 kr));
1223
6d2010ae 1224 vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i);
4a3eedf9
A
1225 break;
1226 }
1227
1228 }
1229
15129b1c
A
1230 }
1231
1232 if (kr == KERN_SUCCESS &&
1233 slide &&
1234 mapping_to_slide != NULL) {
1235 kr = vm_shared_region_slide(slide,
1236 mapping_to_slide->sfm_file_offset,
1237 mapping_to_slide->sfm_size,
1238 slide_start,
1239 slide_size,
1240 file_control);
1241 if (kr != KERN_SUCCESS) {
1242 SHARED_REGION_TRACE_ERROR(
1243 ("shared_region: region_slide("
1244 "slide:0x%x start:0x%016llx "
1245 "size:0x%016llx) failed 0x%x\n",
1246 slide,
1247 (long long)slide_start,
1248 (long long)slide_size,
1249 kr));
1250 vm_shared_region_undo_mappings(NULL,
1251 0,
1252 mappings,
1253 mappings_count);
2d21ac55
A
1254 }
1255 }
1256
1257 vm_shared_region_lock();
1258 assert(shared_region->sr_ref_count > 1);
1259 assert(shared_region->sr_mapping_in_progress);
15129b1c
A
1260 /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
1261 if (kr == KERN_SUCCESS &&
1262 shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
1263 shared_region->sr_first_mapping = first_mapping;
1264 }
2d21ac55
A
1265 /* we're done working on that shared region */
1266 shared_region->sr_mapping_in_progress = FALSE;
1267 thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
1268 vm_shared_region_unlock();
1269
1270done:
1271 SHARED_REGION_TRACE_DEBUG(
1272 ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n",
1273 shared_region, mappings_count, mappings,
1274 file_control, file_size, kr));
1275 return kr;
1276}
1277
1278/*
1279 * Enter the appropriate shared region into "map" for "task".
1280 * This involves looking up the shared region (and possibly creating a new
1281 * one) for the desired environment, then mapping the VM sub map into the
1282 * task's VM "map", with the appropriate level of pmap-nesting.
1283 */
1284kern_return_t
1285vm_shared_region_enter(
1286 struct _vm_map *map,
1287 struct task *task,
1288 void *fsroot,
1289 cpu_type_t cpu)
1290{
1291 kern_return_t kr;
1292 vm_shared_region_t shared_region;
1293 vm_map_offset_t sr_address, sr_offset, target_address;
1294 vm_map_size_t sr_size, mapping_size;
1295 vm_map_offset_t sr_pmap_nesting_start;
1296 vm_map_size_t sr_pmap_nesting_size;
1297 ipc_port_t sr_handle;
1298 boolean_t is_64bit;
1299
1300 is_64bit = task_has_64BitAddr(task);
1301
1302 SHARED_REGION_TRACE_DEBUG(
1303 ("shared_region: -> "
1304 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n",
1305 map, task, fsroot, cpu, is_64bit));
1306
1307 /* lookup (create if needed) the shared region for this environment */
1308 shared_region = vm_shared_region_lookup(fsroot, cpu, is_64bit);
1309 if (shared_region == NULL) {
1310 /* this should not happen ! */
1311 SHARED_REGION_TRACE_ERROR(
1312 ("shared_region: -> "
1313 "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): "
1314 "lookup failed !\n",
1315 map, task, fsroot, cpu, is_64bit));
1316 //panic("shared_region_enter: lookup failed\n");
1317 return KERN_FAILURE;
1318 }
1319
1320 /* let the task use that shared region */
1321 vm_shared_region_set(task, shared_region);
1322
1323 kr = KERN_SUCCESS;
1324 /* no need to lock since this data is never modified */
1325 sr_address = shared_region->sr_base_address;
1326 sr_size = shared_region->sr_size;
1327 sr_handle = shared_region->sr_mem_entry;
1328 sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start;
1329 sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size;
1330
1331 /*
1332 * Start mapping the shared region's VM sub map into the task's VM map.
1333 */
1334 sr_offset = 0;
1335
1336 if (sr_pmap_nesting_start > sr_address) {
1337 /* we need to map a range without pmap-nesting first */
1338 target_address = sr_address;
1339 mapping_size = sr_pmap_nesting_start - sr_address;
1340 kr = vm_map_enter_mem_object(
1341 map,
1342 &target_address,
1343 mapping_size,
1344 0,
1345 VM_FLAGS_FIXED,
1346 sr_handle,
1347 sr_offset,
1348 TRUE,
1349 VM_PROT_READ,
1350 VM_PROT_ALL,
1351 VM_INHERIT_SHARE);
1352 if (kr != KERN_SUCCESS) {
1353 SHARED_REGION_TRACE_ERROR(
1354 ("shared_region: enter(%p,%p,%p,%d,%d): "
1355 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1356 map, task, fsroot, cpu, is_64bit,
1357 (long long)target_address,
1358 (long long)mapping_size, sr_handle, kr));
1359 goto done;
1360 }
1361 SHARED_REGION_TRACE_DEBUG(
1362 ("shared_region: enter(%p,%p,%p,%d,%d): "
1363 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1364 map, task, fsroot, cpu, is_64bit,
1365 (long long)target_address, (long long)mapping_size,
1366 sr_handle, kr));
1367 sr_offset += mapping_size;
1368 sr_size -= mapping_size;
1369 }
1370 /*
1371 * We may need to map several pmap-nested portions, due to platform
1372 * specific restrictions on pmap nesting.
1373 * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias...
1374 */
1375 for (;
1376 sr_pmap_nesting_size > 0;
1377 sr_offset += mapping_size,
1378 sr_size -= mapping_size,
1379 sr_pmap_nesting_size -= mapping_size) {
1380 target_address = sr_address + sr_offset;
1381 mapping_size = sr_pmap_nesting_size;
1382 if (mapping_size > pmap_nesting_size_max) {
1383 mapping_size = (vm_map_offset_t) pmap_nesting_size_max;
1384 }
1385 kr = vm_map_enter_mem_object(
1386 map,
1387 &target_address,
1388 mapping_size,
1389 0,
1390 (VM_FLAGS_FIXED | VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP)),
1391 sr_handle,
1392 sr_offset,
1393 TRUE,
1394 VM_PROT_READ,
1395 VM_PROT_ALL,
1396 VM_INHERIT_SHARE);
1397 if (kr != KERN_SUCCESS) {
1398 SHARED_REGION_TRACE_ERROR(
1399 ("shared_region: enter(%p,%p,%p,%d,%d): "
1400 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1401 map, task, fsroot, cpu, is_64bit,
1402 (long long)target_address,
1403 (long long)mapping_size, sr_handle, kr));
1404 goto done;
1405 }
1406 SHARED_REGION_TRACE_DEBUG(
1407 ("shared_region: enter(%p,%p,%p,%d,%d): "
1408 "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1409 map, task, fsroot, cpu, is_64bit,
1410 (long long)target_address, (long long)mapping_size,
1411 sr_handle, kr));
1412 }
1413 if (sr_size > 0) {
1414 /* and there's some left to be mapped without pmap-nesting */
1415 target_address = sr_address + sr_offset;
1416 mapping_size = sr_size;
1417 kr = vm_map_enter_mem_object(
1418 map,
1419 &target_address,
1420 mapping_size,
1421 0,
1422 VM_FLAGS_FIXED,
1423 sr_handle,
1424 sr_offset,
1425 TRUE,
1426 VM_PROT_READ,
1427 VM_PROT_ALL,
1428 VM_INHERIT_SHARE);
1429 if (kr != KERN_SUCCESS) {
1430 SHARED_REGION_TRACE_ERROR(
1431 ("shared_region: enter(%p,%p,%p,%d,%d): "
1432 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1433 map, task, fsroot, cpu, is_64bit,
1434 (long long)target_address,
1435 (long long)mapping_size, sr_handle, kr));
1436 goto done;
1437 }
1438 SHARED_REGION_TRACE_DEBUG(
1439 ("shared_region: enter(%p,%p,%p,%d,%d): "
1440 "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n",
1441 map, task, fsroot, cpu, is_64bit,
1442 (long long)target_address, (long long)mapping_size,
1443 sr_handle, kr));
1444 sr_offset += mapping_size;
1445 sr_size -= mapping_size;
1446 }
1447 assert(sr_size == 0);
1448
1449done:
1450 SHARED_REGION_TRACE_DEBUG(
1451 ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n",
1452 map, task, fsroot, cpu, is_64bit, kr));
1453 return kr;
1454}
1455
6d2010ae
A
1456#define SANE_SLIDE_INFO_SIZE (1024*1024) /*Can be changed if needed*/
1457struct vm_shared_region_slide_info slide_info;
1458
1459kern_return_t
39236c6e
A
1460vm_shared_region_sliding_valid(uint32_t slide)
1461{
6d2010ae 1462 kern_return_t kr = KERN_SUCCESS;
39236c6e 1463 vm_shared_region_t sr = vm_shared_region_get(current_task());
6d2010ae 1464
39236c6e
A
1465 /* No region yet? we're fine. */
1466 if (sr == NULL) {
1467 return kr;
1468 }
1469
1470 if ((sr->sr_slid == TRUE) && slide) {
1471 if (slide != vm_shared_region_get_slide_info(sr)->slide) {
316670eb 1472 printf("Only one shared region can be slid\n");
6d2010ae 1473 kr = KERN_FAILURE;
39236c6e 1474 } else {
6d2010ae
A
1475 /*
1476 * Request for sliding when we've
1477 * already done it with exactly the
1478 * same slide value before.
1479 * This isn't wrong technically but
1480 * we don't want to slide again and
1481 * so we return this value.
1482 */
1483 kr = KERN_INVALID_ARGUMENT;
1484 }
1485 }
39236c6e 1486 vm_shared_region_deallocate(sr);
6d2010ae
A
1487 return kr;
1488}
1489
1490kern_return_t
1491vm_shared_region_slide_init(
39236c6e 1492 vm_shared_region_t sr,
6d2010ae
A
1493 mach_vm_size_t slide_info_size,
1494 mach_vm_offset_t start,
1495 mach_vm_size_t size,
1496 uint32_t slide,
1497 memory_object_control_t sr_file_control)
1498{
1499 kern_return_t kr = KERN_SUCCESS;
1500 vm_object_t object = VM_OBJECT_NULL;
1501 vm_object_offset_t offset = 0;
39236c6e
A
1502 vm_shared_region_slide_info_t si = vm_shared_region_get_slide_info(sr);
1503 vm_offset_t slide_info_entry;
6d2010ae 1504
39236c6e 1505 vm_map_t map = NULL, cur_map = NULL;
6d2010ae
A
1506 boolean_t is_map_locked = FALSE;
1507
39236c6e
A
1508 assert(sr->sr_slide_in_progress);
1509 assert(!sr->sr_slid);
1510 assert(si->slide_object == NULL);
1511 assert(si->slide_info_entry == NULL);
6d2010ae
A
1512
1513 if (slide_info_size > SANE_SLIDE_INFO_SIZE) {
316670eb 1514 printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size);
6d2010ae
A
1515 kr = KERN_FAILURE;
1516 return kr;
1517 }
1518
39236c6e
A
1519 kr = kmem_alloc(kernel_map,
1520 (vm_offset_t *) &slide_info_entry,
1521 (vm_size_t) slide_info_size);
1522 if (kr != KERN_SUCCESS) {
1523 return kr;
1524 }
1525
6d2010ae
A
1526 if (sr_file_control != MEMORY_OBJECT_CONTROL_NULL) {
1527
1528 object = memory_object_control_to_vm_object(sr_file_control);
1529 vm_object_reference(object);
1530 offset = start;
1531
39236c6e 1532 vm_object_lock(object);
6d2010ae
A
1533 } else {
1534 /*
1535 * Remove this entire "else" block and all "map" references
1536 * once we get rid of the shared_region_slide_np()
1537 * system call.
1538 */
1539 vm_map_entry_t entry = VM_MAP_ENTRY_NULL;
1540 map = current_map();
1541 vm_map_lock_read(map);
1542 is_map_locked = TRUE;
1543 Retry:
1544 cur_map = map;
1545 if(!vm_map_lookup_entry(map, start, &entry)) {
1546 kr = KERN_INVALID_ARGUMENT;
1547 } else {
1548 vm_object_t shadow_obj = VM_OBJECT_NULL;
1549
1550 if (entry->is_sub_map == TRUE) {
1551 map = entry->object.sub_map;
1552 start -= entry->vme_start;
1553 start += entry->offset;
1554 vm_map_lock_read(map);
1555 vm_map_unlock_read(cur_map);
1556 goto Retry;
1557 } else {
1558 object = entry->object.vm_object;
1559 offset = (start - entry->vme_start) + entry->offset;
1560 }
1561
39236c6e 1562 vm_object_lock(object);
6d2010ae
A
1563 while (object->shadow != VM_OBJECT_NULL) {
1564 shadow_obj = object->shadow;
39236c6e 1565 vm_object_lock(shadow_obj);
6d2010ae
A
1566 vm_object_unlock(object);
1567 object = shadow_obj;
1568 }
1569 }
1570 }
1571
1572 if (object->internal == TRUE) {
1573 kr = KERN_INVALID_ADDRESS;
39236c6e
A
1574 } else if (object->object_slid) {
1575 /* Can only be slid once */
1576 printf("%s: found vm_object %p already slid?\n", __FUNCTION__, object);
1577 kr = KERN_FAILURE;
6d2010ae 1578 } else {
39236c6e
A
1579
1580 si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry;
1581 si->slide_info_size = slide_info_size;
1582 si->slide_object = object;
1583 si->start = offset;
1584 si->end = si->start + size;
1585 si->slide = slide;
1586
1587 /*
1588 * If we want to have this region get deallocated/freed
1589 * then we will have to make sure that we msync(..MS_INVALIDATE..)
1590 * the pages associated with this shared region. Those pages would
1591 * have been slid with an older slide value.
1592 */
1593
1594 /*
1595 * Pointers in object are held without references; they
1596 * are disconnected at the time that we destroy the
1597 * shared region, and since the shared region holds
1598 * a reference on the object, no references in the other
1599 * direction are required.
1600 */
1601 object->object_slid = TRUE;
1602 object->vo_slide_info = si;
6d2010ae 1603 }
6d2010ae 1604
39236c6e 1605 vm_object_unlock(object);
6d2010ae
A
1606 if (is_map_locked == TRUE) {
1607 vm_map_unlock_read(map);
1608 }
6d2010ae 1609
39236c6e
A
1610 if (kr != KERN_SUCCESS) {
1611 kmem_free(kernel_map, slide_info_entry, slide_info_size);
1612 }
1613 return kr;
6d2010ae
A
1614}
1615
1616void*
39236c6e
A
1617vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) {
1618 return (void*)sr->sr_slide_info.slide_info_entry;
6d2010ae
A
1619}
1620
1621
1622kern_return_t
39236c6e 1623vm_shared_region_slide_sanity_check(vm_shared_region_t sr)
6d2010ae
A
1624{
1625 uint32_t pageIndex=0;
1626 uint16_t entryIndex=0;
1627 uint16_t *toc = NULL;
39236c6e 1628 vm_shared_region_slide_info_t si;
6d2010ae
A
1629 vm_shared_region_slide_info_entry_t s_info;
1630 kern_return_t kr;
1631
39236c6e
A
1632 si = vm_shared_region_get_slide_info(sr);
1633 s_info = si->slide_info_entry;
6d2010ae
A
1634 toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
1635
1636 kr = mach_vm_protect(kernel_map,
39236c6e
A
1637 (mach_vm_offset_t)(vm_offset_t)s_info,
1638 (mach_vm_size_t) si->slide_info_size,
1639 TRUE, VM_PROT_READ);
6d2010ae
A
1640 if (kr != KERN_SUCCESS) {
1641 panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr);
1642 }
1643
1644 for (;pageIndex < s_info->toc_count; pageIndex++) {
1645
1646 entryIndex = (uint16_t)(toc[pageIndex]);
1647
1648 if (entryIndex >= s_info->entry_count) {
1649 printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count);
1650 goto fail;
1651 }
1652
1653 }
1654 return KERN_SUCCESS;
1655fail:
39236c6e 1656 if (si->slide_info_entry != NULL) {
6d2010ae 1657 kmem_free(kernel_map,
39236c6e
A
1658 (vm_offset_t) si->slide_info_entry,
1659 (vm_size_t) si->slide_info_size);
1660
1661 vm_object_lock(si->slide_object);
1662 si->slide_object->object_slid = FALSE;
1663 si->slide_object->vo_slide_info = NULL;
1664 vm_object_unlock(si->slide_object);
1665
1666 vm_object_deallocate(si->slide_object);
1667 si->slide_object = NULL;
1668 si->start = 0;
1669 si->end = 0;
1670 si->slide = 0;
1671 si->slide_info_entry = NULL;
1672 si->slide_info_size = 0;
6d2010ae
A
1673 }
1674 return KERN_FAILURE;
1675}
1676
1677kern_return_t
39236c6e 1678vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex)
6d2010ae
A
1679{
1680 uint16_t *toc = NULL;
1681 slide_info_entry_toc_t bitmap = NULL;
1682 uint32_t i=0, j=0;
1683 uint8_t b = 0;
39236c6e 1684 uint32_t slide = si->slide;
6d2010ae
A
1685 int is_64 = task_has_64BitAddr(current_task());
1686
39236c6e 1687 vm_shared_region_slide_info_entry_t s_info = si->slide_info_entry;
6d2010ae
A
1688 toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset);
1689
1690 if (pageIndex >= s_info->toc_count) {
1691 printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count);
1692 } else {
1693 uint16_t entryIndex = (uint16_t)(toc[pageIndex]);
1694 slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset);
1695
1696 if (entryIndex >= s_info->entry_count) {
1697 printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count);
1698 } else {
1699 bitmap = &slide_info_entries[entryIndex];
1700
1701 for(i=0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) {
1702 b = bitmap->entry[i];
1703 if (b!=0) {
1704 for (j=0; j <8; ++j) {
1705 if (b & (1 <<j)){
1706 uint32_t *ptr_to_slide;
1707 uint32_t old_value;
1708
1709 ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr)+(sizeof(uint32_t)*(i*8 +j)));
1710 old_value = *ptr_to_slide;
1711 *ptr_to_slide += slide;
1712 if (is_64 && *ptr_to_slide < old_value) {
1713 /*
1714 * We just slid the low 32 bits of a 64-bit pointer
1715 * and it looks like there should have been a carry-over
1716 * to the upper 32 bits.
1717 * The sliding failed...
1718 */
316670eb
A
1719 printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n",
1720 i, j, b, slide, old_value, *ptr_to_slide);
6d2010ae
A
1721 return KERN_FAILURE;
1722 }
1723 }
1724 }
1725 }
1726 }
1727 }
1728 }
1729
1730 return KERN_SUCCESS;
1731}
1732
2d21ac55
A
1733/******************************************************************************/
1734/* Comm page support */
1735/******************************************************************************/
1736
1737ipc_port_t commpage32_handle = IPC_PORT_NULL;
1738ipc_port_t commpage64_handle = IPC_PORT_NULL;
1739vm_named_entry_t commpage32_entry = NULL;
1740vm_named_entry_t commpage64_entry = NULL;
1741vm_map_t commpage32_map = VM_MAP_NULL;
1742vm_map_t commpage64_map = VM_MAP_NULL;
1743
316670eb
A
1744ipc_port_t commpage_text32_handle = IPC_PORT_NULL;
1745ipc_port_t commpage_text64_handle = IPC_PORT_NULL;
1746vm_named_entry_t commpage_text32_entry = NULL;
1747vm_named_entry_t commpage_text64_entry = NULL;
1748vm_map_t commpage_text32_map = VM_MAP_NULL;
1749vm_map_t commpage_text64_map = VM_MAP_NULL;
1750
1751user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START;
1752user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START;
1753
1754#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
1755/*
1756 * Create a memory entry, VM submap and pmap for one commpage.
1757 */
1758static void
1759_vm_commpage_init(
1760 ipc_port_t *handlep,
1761 vm_map_size_t size)
1762{
1763 kern_return_t kr;
1764 vm_named_entry_t mem_entry;
1765 vm_map_t new_map;
1766
1767 SHARED_REGION_TRACE_DEBUG(
1768 ("commpage: -> _init(0x%llx)\n",
1769 (long long)size));
1770
1771 kr = mach_memory_entry_allocate(&mem_entry,
1772 handlep);
1773 if (kr != KERN_SUCCESS) {
1774 panic("_vm_commpage_init: could not allocate mem_entry");
1775 }
316670eb 1776 new_map = vm_map_create(pmap_create(NULL, 0, FALSE), 0, size, TRUE);
2d21ac55
A
1777 if (new_map == VM_MAP_NULL) {
1778 panic("_vm_commpage_init: could not allocate VM map");
1779 }
1780 mem_entry->backing.map = new_map;
1781 mem_entry->internal = TRUE;
1782 mem_entry->is_sub_map = TRUE;
1783 mem_entry->offset = 0;
1784 mem_entry->protection = VM_PROT_ALL;
1785 mem_entry->size = size;
1786
1787 SHARED_REGION_TRACE_DEBUG(
1788 ("commpage: _init(0x%llx) <- %p\n",
1789 (long long)size, *handlep));
1790}
316670eb
A
1791#endif
1792
1793
1794/*
1795 *Initialize the comm text pages at boot time
1796 */
1797 extern u_int32_t random(void);
1798 void
1799vm_commpage_text_init(void)
1800{
1801 SHARED_REGION_TRACE_DEBUG(
1802 ("commpage text: ->init()\n"));
1803#if defined(__i386__) || defined(__x86_64__)
1804 /* create the 32 bit comm text page */
1805 unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */
1806 _vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
1807 commpage_text32_entry = (vm_named_entry_t) commpage_text32_handle->ip_kobject;
1808 commpage_text32_map = commpage_text32_entry->backing.map;
1809 commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset);
1810 /* XXX if (cpu_is_64bit_capable()) ? */
1811 /* create the 64-bit comm page */
1812 offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */
1813 _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH);
1814 commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject;
1815 commpage_text64_map = commpage_text64_entry->backing.map;
1816 commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset);
1817
1818 commpage_text_populate();
1819#else
1820#error Unknown architecture.
1821#endif /* __i386__ || __x86_64__ */
1822 /* populate the routines in here */
1823 SHARED_REGION_TRACE_DEBUG(
1824 ("commpage text: init() <-\n"));
1825
1826}
2d21ac55
A
1827
1828/*
1829 * Initialize the comm pages at boot time.
1830 */
1831void
1832vm_commpage_init(void)
1833{
1834 SHARED_REGION_TRACE_DEBUG(
1835 ("commpage: -> init()\n"));
1836
316670eb 1837#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
1838 /* create the 32-bit comm page */
1839 _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH);
1840 commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject;
1841 commpage32_map = commpage32_entry->backing.map;
1842
1843 /* XXX if (cpu_is_64bit_capable()) ? */
1844 /* create the 64-bit comm page */
1845 _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH);
1846 commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject;
1847 commpage64_map = commpage64_entry->backing.map;
1848
316670eb
A
1849#endif /* __i386__ || __x86_64__ */
1850
2d21ac55
A
1851 /* populate them according to this specific platform */
1852 commpage_populate();
b0d623f7
A
1853 __commpage_setup = 1;
1854#if defined(__i386__) || defined(__x86_64__)
1855 if (__system_power_source == 0) {
1856 post_sys_powersource_internal(0, 1);
1857 }
1858#endif /* __i386__ || __x86_64__ */
2d21ac55
A
1859
1860 SHARED_REGION_TRACE_DEBUG(
1861 ("commpage: init() <-\n"));
1862}
1863
1864/*
1865 * Enter the appropriate comm page into the task's address space.
1866 * This is called at exec() time via vm_map_exec().
1867 */
1868kern_return_t
1869vm_commpage_enter(
1870 vm_map_t map,
1871 task_t task)
1872{
316670eb
A
1873 ipc_port_t commpage_handle, commpage_text_handle;
1874 vm_map_offset_t commpage_address, objc_address, commpage_text_address;
1875 vm_map_size_t commpage_size, objc_size, commpage_text_size;
2d21ac55
A
1876 int vm_flags;
1877 kern_return_t kr;
1878
1879 SHARED_REGION_TRACE_DEBUG(
1880 ("commpage: -> enter(%p,%p)\n",
1881 map, task));
1882
316670eb 1883 commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH;
2d21ac55
A
1884 /* the comm page is likely to be beyond the actual end of the VM map */
1885 vm_flags = VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX;
1886
1887 /* select the appropriate comm page for this task */
1888 assert(! (task_has_64BitAddr(task) ^ vm_map_is_64bit(map)));
1889 if (task_has_64BitAddr(task)) {
2d21ac55
A
1890 commpage_handle = commpage64_handle;
1891 commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS;
1892 commpage_size = _COMM_PAGE64_AREA_LENGTH;
1893 objc_size = _COMM_PAGE64_OBJC_SIZE;
1894 objc_address = _COMM_PAGE64_OBJC_BASE;
316670eb
A
1895 commpage_text_handle = commpage_text64_handle;
1896 commpage_text_address = (vm_map_offset_t) commpage_text64_location;
2d21ac55
A
1897 } else {
1898 commpage_handle = commpage32_handle;
1899 commpage_address =
1900 (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS;
1901 commpage_size = _COMM_PAGE32_AREA_LENGTH;
1902 objc_size = _COMM_PAGE32_OBJC_SIZE;
1903 objc_address = _COMM_PAGE32_OBJC_BASE;
316670eb
A
1904 commpage_text_handle = commpage_text32_handle;
1905 commpage_text_address = (vm_map_offset_t) commpage_text32_location;
2d21ac55
A
1906 }
1907
1908 if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 &&
1909 (commpage_size & (pmap_nesting_size_min - 1)) == 0) {
1910 /* the commpage is properly aligned or sized for pmap-nesting */
1911 vm_flags |= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP);
1912 }
2d21ac55
A
1913 /* map the comm page in the task's address space */
1914 assert(commpage_handle != IPC_PORT_NULL);
1915 kr = vm_map_enter_mem_object(
1916 map,
1917 &commpage_address,
1918 commpage_size,
1919 0,
1920 vm_flags,
1921 commpage_handle,
1922 0,
1923 FALSE,
316670eb
A
1924 VM_PROT_READ,
1925 VM_PROT_READ,
2d21ac55
A
1926 VM_INHERIT_SHARE);
1927 if (kr != KERN_SUCCESS) {
1928 SHARED_REGION_TRACE_ERROR(
1929 ("commpage: enter(%p,0x%llx,0x%llx) "
1930 "commpage %p mapping failed 0x%x\n",
1931 map, (long long)commpage_address,
1932 (long long)commpage_size, commpage_handle, kr));
1933 }
1934
316670eb
A
1935 /* map the comm text page in the task's address space */
1936 assert(commpage_text_handle != IPC_PORT_NULL);
1937 kr = vm_map_enter_mem_object(
1938 map,
1939 &commpage_text_address,
1940 commpage_text_size,
1941 0,
1942 vm_flags,
1943 commpage_text_handle,
1944 0,
1945 FALSE,
1946 VM_PROT_READ|VM_PROT_EXECUTE,
1947 VM_PROT_READ|VM_PROT_EXECUTE,
1948 VM_INHERIT_SHARE);
1949 if (kr != KERN_SUCCESS) {
1950 SHARED_REGION_TRACE_ERROR(
1951 ("commpage text: enter(%p,0x%llx,0x%llx) "
1952 "commpage text %p mapping failed 0x%x\n",
1953 map, (long long)commpage_text_address,
1954 (long long)commpage_text_size, commpage_text_handle, kr));
1955 }
1956
2d21ac55
A
1957 /*
1958 * Since we're here, we also pre-allocate some virtual space for the
1959 * Objective-C run-time, if needed...
1960 */
1961 if (objc_size != 0) {
1962 kr = vm_map_enter_mem_object(
1963 map,
1964 &objc_address,
1965 objc_size,
1966 0,
1967 VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX,
1968 IPC_PORT_NULL,
1969 0,
1970 FALSE,
1971 VM_PROT_ALL,
1972 VM_PROT_ALL,
1973 VM_INHERIT_DEFAULT);
1974 if (kr != KERN_SUCCESS) {
1975 SHARED_REGION_TRACE_ERROR(
1976 ("commpage: enter(%p,0x%llx,0x%llx) "
1977 "objc mapping failed 0x%x\n",
1978 map, (long long)objc_address,
1979 (long long)objc_size, kr));
1980 }
1981 }
1982
1983 SHARED_REGION_TRACE_DEBUG(
1984 ("commpage: enter(%p,%p) <- 0x%x\n",
1985 map, task, kr));
1986 return kr;
1987}
b0d623f7 1988
39236c6e
A
1989int
1990vm_shared_region_slide(uint32_t slide,
1991 mach_vm_offset_t entry_start_address,
1992 mach_vm_size_t entry_size,
1993 mach_vm_offset_t slide_start,
1994 mach_vm_size_t slide_size,
1995 memory_object_control_t sr_file_control)
1996{
1997 void *slide_info_entry = NULL;
1998 int error;
1999 vm_shared_region_t sr;
2000
2001 SHARED_REGION_TRACE_DEBUG(
2002 ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n",
2003 slide, entry_start_address, entry_size, slide_start, slide_size));
2004
2005 sr = vm_shared_region_get(current_task());
2006 if (sr == NULL) {
2007 printf("%s: no shared region?\n", __FUNCTION__);
2008 SHARED_REGION_TRACE_DEBUG(
2009 ("vm_shared_region_slide: <- %d (no shared region)\n",
2010 KERN_FAILURE));
2011 return KERN_FAILURE;
2012 }
2013
2014 /*
2015 * Protect from concurrent access.
2016 */
2017 vm_shared_region_lock();
2018 while(sr->sr_slide_in_progress) {
2019 vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT);
2020 }
2021 if (sr->sr_slid
2022 || shared_region_completed_slide
2023 ) {
2024 vm_shared_region_unlock();
2025
2026 vm_shared_region_deallocate(sr);
2027 printf("%s: shared region already slid?\n", __FUNCTION__);
2028 SHARED_REGION_TRACE_DEBUG(
2029 ("vm_shared_region_slide: <- %d (already slid)\n",
2030 KERN_FAILURE));
2031 return KERN_FAILURE;
2032 }
2033
2034 sr->sr_slide_in_progress = TRUE;
2035 vm_shared_region_unlock();
2036
2037 if((error = vm_shared_region_slide_init(sr, slide_size, entry_start_address, entry_size, slide, sr_file_control))) {
2038 printf("slide_info initialization failed with kr=%d\n", error);
2039 goto done;
2040 }
2041
2042 slide_info_entry = vm_shared_region_get_slide_info_entry(sr);
2043 if (slide_info_entry == NULL){
2044 error = KERN_FAILURE;
2045 } else {
2046 error = copyin((user_addr_t)slide_start,
2047 slide_info_entry,
2048 (vm_size_t)slide_size);
2049 if (error) {
2050 error = KERN_INVALID_ADDRESS;
2051 }
2052 }
2053 if (error) {
2054 goto done;
2055 }
2056
2057 if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) {
2058 error = KERN_INVALID_ARGUMENT;
2059 printf("Sanity Check failed for slide_info\n");
2060 } else {
2061#if DEBUG
2062 printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n",
2063 (void*)(uintptr_t)entry_start_address,
2064 (unsigned long)entry_size,
2065 (unsigned long)slide_size);
2066#endif
2067 }
2068done:
2069 vm_shared_region_lock();
2070
2071 assert(sr->sr_slide_in_progress);
2072 assert(sr->sr_slid == FALSE);
2073 sr->sr_slide_in_progress = FALSE;
2074 thread_wakeup(&sr->sr_slide_in_progress);
2075
2076 if (error == KERN_SUCCESS) {
2077 sr->sr_slid = TRUE;
2078
2079 /*
2080 * We don't know how to tear down a slid shared region today, because
2081 * we would have to invalidate all the pages that have been slid
2082 * atomically with respect to anyone mapping the shared region afresh.
2083 * Therefore, take a dangling reference to prevent teardown.
2084 */
2085 sr->sr_ref_count++;
2086 shared_region_completed_slide = TRUE;
2087 }
2088 vm_shared_region_unlock();
2089
2090 vm_shared_region_deallocate(sr);
2091
2092 SHARED_REGION_TRACE_DEBUG(
2093 ("vm_shared_region_slide: <- %d\n",
2094 error));
2095
2096 return error;
2097}
b0d623f7
A
2098
2099/*
2100 * This is called from powermanagement code to let kernel know the current source of power.
2101 * 0 if it is external source (connected to power )
2102 * 1 if it is internal power source ie battery
2103 */
2104void
2105#if defined(__i386__) || defined(__x86_64__)
2106post_sys_powersource(int i)
2107#else
2108post_sys_powersource(__unused int i)
2109#endif
2110{
2111#if defined(__i386__) || defined(__x86_64__)
2112 post_sys_powersource_internal(i, 0);
2113#endif /* __i386__ || __x86_64__ */
2114}
2115
2116
2117#if defined(__i386__) || defined(__x86_64__)
2118static void
2119post_sys_powersource_internal(int i, int internal)
2120{
2121 if (internal == 0)
2122 __system_power_source = i;
2123
2124 if (__commpage_setup != 0) {
2125 if (__system_power_source != 0)
2126 commpage_set_spin_count(0);
2127 else
2128 commpage_set_spin_count(MP_SPIN_TRIES);
2129 }
2130}
2131#endif /* __i386__ || __x86_64__ */
39236c6e 2132