]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | /* | |
25 | * Shared region (... and comm page) | |
26 | * | |
27 | * This file handles the VM shared region and comm page. | |
28 | * | |
29 | */ | |
30 | /* | |
31 | * SHARED REGIONS | |
32 | * -------------- | |
33 | * | |
34 | * A shared region is a submap that contains the most common system shared | |
35 | * libraries for a given environment. | |
36 | * An environment is defined by (cpu-type, 64-bitness, root directory). | |
37 | * | |
38 | * The point of a shared region is to reduce the setup overhead when exec'ing | |
39 | * a new process. | |
40 | * A shared region uses a shared VM submap that gets mapped automatically | |
41 | * at exec() time (see vm_map_exec()). The first process of a given | |
42 | * environment sets up the shared region and all further processes in that | |
43 | * environment can re-use that shared region without having to re-create | |
44 | * the same mappings in their VM map. All they need is contained in the shared | |
45 | * region. | |
46 | * It can also shared a pmap (mostly for read-only parts but also for the | |
47 | * initial version of some writable parts), which gets "nested" into the | |
48 | * process's pmap. This reduces the number of soft faults: once one process | |
49 | * brings in a page in the shared region, all the other processes can access | |
50 | * it without having to enter it in their own pmap. | |
51 | * | |
52 | * | |
53 | * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter() | |
54 | * to map the appropriate shared region in the process's address space. | |
55 | * We look up the appropriate shared region for the process's environment. | |
56 | * If we can't find one, we create a new (empty) one and add it to the list. | |
57 | * Otherwise, we just take an extra reference on the shared region we found. | |
58 | * | |
59 | * The "dyld" runtime (mapped into the process's address space at exec() time) | |
60 | * will then use the shared_region_check_np() and shared_region_map_np() | |
61 | * system call to validate and/or populate the shared region with the | |
62 | * appropriate dyld_shared_cache file. | |
63 | * | |
64 | * The shared region is inherited on fork() and the child simply takes an | |
65 | * extra reference on its parent's shared region. | |
66 | * | |
67 | * When the task terminates, we release a reference on its shared region. | |
68 | * When the last reference is released, we destroy the shared region. | |
69 | * | |
70 | * After a chroot(), the calling process keeps using its original shared region, | |
71 | * since that's what was mapped when it was started. But its children | |
72 | * will use a different shared region, because they need to use the shared | |
73 | * cache that's relative to the new root directory. | |
74 | */ | |
75 | /* | |
76 | * COMM PAGE | |
77 | * | |
78 | * A "comm page" is an area of memory that is populated by the kernel with | |
79 | * the appropriate platform-specific version of some commonly used code. | |
80 | * There is one "comm page" per platform (cpu-type, 64-bitness) but only | |
81 | * for the native cpu-type. No need to overly optimize translated code | |
82 | * for hardware that is not really there ! | |
83 | * | |
84 | * The comm pages are created and populated at boot time. | |
85 | * | |
86 | * The appropriate comm page is mapped into a process's address space | |
87 | * at exec() time, in vm_map_exec(). | |
88 | * It is then inherited on fork(). | |
89 | * | |
90 | * The comm page is shared between the kernel and all applications of | |
91 | * a given platform. Only the kernel can modify it. | |
92 | * | |
93 | * Applications just branch to fixed addresses in the comm page and find | |
94 | * the right version of the code for the platform. There is also some | |
95 | * data provided and updated by the kernel for processes to retrieve easily | |
96 | * without having to do a system call. | |
97 | */ | |
98 | ||
99 | #include <debug.h> | |
100 | ||
101 | #include <kern/ipc_tt.h> | |
102 | #include <kern/kalloc.h> | |
103 | ||
4a3eedf9 A |
104 | #include <mach/mach_vm.h> |
105 | ||
2d21ac55 A |
106 | #include <vm/vm_map.h> |
107 | #include <vm/vm_shared_region.h> | |
108 | ||
109 | #include <vm/vm_protos.h> | |
110 | ||
111 | #include <machine/commpage.h> | |
112 | #include <machine/cpu_capabilities.h> | |
113 | ||
114 | /* "dyld" uses this to figure out what the kernel supports */ | |
115 | int shared_region_version = 3; | |
116 | ||
117 | /* should local (non-chroot) shared regions persist when no task uses them ? */ | |
118 | int shared_region_persistence = 1; /* yes by default */ | |
119 | ||
120 | /* trace level, output is sent to the system log file */ | |
121 | int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL; | |
122 | ||
123 | /* this lock protects all the shared region data structures */ | |
124 | lck_grp_t *vm_shared_region_lck_grp; | |
125 | lck_mtx_t vm_shared_region_lock; | |
126 | ||
127 | #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock) | |
128 | #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock) | |
129 | #define vm_shared_region_sleep(event, interruptible) \ | |
130 | lck_mtx_sleep(&vm_shared_region_lock, \ | |
131 | LCK_SLEEP_DEFAULT, \ | |
132 | (event_t) (event), \ | |
133 | (interruptible)) | |
134 | ||
135 | /* the list of currently available shared regions (one per environment) */ | |
136 | queue_head_t vm_shared_region_queue; | |
137 | ||
138 | static void vm_shared_region_reference_locked(vm_shared_region_t shared_region); | |
139 | static vm_shared_region_t vm_shared_region_create( | |
140 | void *root_dir, | |
141 | cpu_type_t cputype, | |
142 | boolean_t is_64bit); | |
143 | static void vm_shared_region_destroy(vm_shared_region_t shared_region); | |
144 | ||
145 | /* | |
146 | * Initialize the module... | |
147 | */ | |
148 | void | |
149 | vm_shared_region_init(void) | |
150 | { | |
151 | SHARED_REGION_TRACE_DEBUG( | |
152 | ("shared_region: -> init\n")); | |
153 | ||
154 | vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region", | |
155 | LCK_GRP_ATTR_NULL); | |
156 | lck_mtx_init(&vm_shared_region_lock, | |
157 | vm_shared_region_lck_grp, | |
158 | LCK_ATTR_NULL); | |
159 | ||
160 | queue_init(&vm_shared_region_queue); | |
161 | ||
162 | SHARED_REGION_TRACE_DEBUG( | |
163 | ("shared_region: <- init\n")); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Retrieve a task's shared region and grab an extra reference to | |
168 | * make sure it doesn't disappear while the caller is using it. | |
169 | * The caller is responsible for consuming that extra reference if | |
170 | * necessary. | |
171 | */ | |
172 | vm_shared_region_t | |
173 | vm_shared_region_get( | |
174 | task_t task) | |
175 | { | |
176 | vm_shared_region_t shared_region; | |
177 | ||
178 | SHARED_REGION_TRACE_DEBUG( | |
179 | ("shared_region: -> get(%p)\n", | |
180 | task)); | |
181 | ||
182 | task_lock(task); | |
183 | vm_shared_region_lock(); | |
184 | shared_region = task->shared_region; | |
185 | if (shared_region) { | |
186 | assert(shared_region->sr_ref_count > 0); | |
187 | vm_shared_region_reference_locked(shared_region); | |
188 | } | |
189 | vm_shared_region_unlock(); | |
190 | task_unlock(task); | |
191 | ||
192 | SHARED_REGION_TRACE_DEBUG( | |
193 | ("shared_region: get(%p) <- %p\n", | |
194 | task, shared_region)); | |
195 | ||
196 | return shared_region; | |
197 | } | |
198 | ||
199 | /* | |
200 | * Get the base address of the shared region. | |
201 | * That's the address at which it needs to be mapped in the process's address | |
202 | * space. | |
203 | * No need to lock since this data is set when the shared region is | |
204 | * created and is never modified after that. The caller must hold an extra | |
205 | * reference on the shared region to prevent it from being destroyed. | |
206 | */ | |
207 | mach_vm_offset_t | |
208 | vm_shared_region_base_address( | |
209 | vm_shared_region_t shared_region) | |
210 | { | |
211 | SHARED_REGION_TRACE_DEBUG( | |
212 | ("shared_region: -> base_address(%p)\n", | |
213 | shared_region)); | |
214 | assert(shared_region->sr_ref_count > 1); | |
215 | SHARED_REGION_TRACE_DEBUG( | |
216 | ("shared_region: base_address(%p) <- 0x%llx\n", | |
217 | shared_region, (long long)shared_region->sr_base_address)); | |
218 | return shared_region->sr_base_address; | |
219 | } | |
220 | ||
221 | /* | |
222 | * Get the size of the shared region. | |
223 | * That's the size that needs to be mapped in the process's address | |
224 | * space. | |
225 | * No need to lock since this data is set when the shared region is | |
226 | * created and is never modified after that. The caller must hold an extra | |
227 | * reference on the shared region to prevent it from being destroyed. | |
228 | */ | |
229 | mach_vm_size_t | |
230 | vm_shared_region_size( | |
231 | vm_shared_region_t shared_region) | |
232 | { | |
233 | SHARED_REGION_TRACE_DEBUG( | |
234 | ("shared_region: -> size(%p)\n", | |
235 | shared_region)); | |
236 | assert(shared_region->sr_ref_count > 1); | |
237 | SHARED_REGION_TRACE_DEBUG( | |
238 | ("shared_region: size(%p) <- 0x%llx\n", | |
239 | shared_region, (long long)shared_region->sr_size)); | |
240 | return shared_region->sr_size; | |
241 | } | |
242 | ||
243 | /* | |
244 | * Get the memory entry of the shared region. | |
245 | * That's the "memory object" that needs to be mapped in the process's address | |
246 | * space. | |
247 | * No need to lock since this data is set when the shared region is | |
248 | * created and is never modified after that. The caller must hold an extra | |
249 | * reference on the shared region to prevent it from being destroyed. | |
250 | */ | |
251 | ipc_port_t | |
252 | vm_shared_region_mem_entry( | |
253 | vm_shared_region_t shared_region) | |
254 | { | |
255 | SHARED_REGION_TRACE_DEBUG( | |
256 | ("shared_region: -> mem_entry(%p)\n", | |
257 | shared_region)); | |
258 | assert(shared_region->sr_ref_count > 1); | |
259 | SHARED_REGION_TRACE_DEBUG( | |
260 | ("shared_region: mem_entry(%p) <- %p\n", | |
261 | shared_region, shared_region->sr_mem_entry)); | |
262 | return shared_region->sr_mem_entry; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Set the shared region the process should use. | |
267 | * A NULL new shared region means that we just want to release the old | |
268 | * shared region. | |
269 | * The caller should already have an extra reference on the new shared region | |
270 | * (if any). We release a reference on the old shared region (if any). | |
271 | */ | |
272 | void | |
273 | vm_shared_region_set( | |
274 | task_t task, | |
275 | vm_shared_region_t new_shared_region) | |
276 | { | |
277 | vm_shared_region_t old_shared_region; | |
278 | ||
279 | SHARED_REGION_TRACE_DEBUG( | |
280 | ("shared_region: -> set(%p, %p)\n", | |
281 | task, new_shared_region)); | |
282 | ||
283 | task_lock(task); | |
284 | vm_shared_region_lock(); | |
285 | ||
286 | old_shared_region = task->shared_region; | |
287 | if (new_shared_region) { | |
288 | assert(new_shared_region->sr_ref_count > 0); | |
289 | } | |
290 | ||
291 | task->shared_region = new_shared_region; | |
292 | ||
293 | vm_shared_region_unlock(); | |
294 | task_unlock(task); | |
295 | ||
296 | if (old_shared_region) { | |
297 | assert(old_shared_region->sr_ref_count > 0); | |
298 | vm_shared_region_deallocate(old_shared_region); | |
299 | } | |
300 | ||
301 | SHARED_REGION_TRACE_DEBUG( | |
302 | ("shared_region: set(%p) <- old=%p new=%p\n", | |
303 | task, old_shared_region, new_shared_region)); | |
304 | } | |
305 | ||
306 | /* | |
307 | * Lookup up the shared region for the desired environment. | |
308 | * If none is found, create a new (empty) one. | |
309 | * Grab an extra reference on the returned shared region, to make sure | |
310 | * it doesn't get destroyed before the caller is done with it. The caller | |
311 | * is responsible for consuming that extra reference if necessary. | |
312 | */ | |
313 | vm_shared_region_t | |
314 | vm_shared_region_lookup( | |
315 | void *root_dir, | |
316 | cpu_type_t cputype, | |
317 | boolean_t is_64bit) | |
318 | { | |
319 | vm_shared_region_t shared_region; | |
320 | vm_shared_region_t new_shared_region; | |
321 | ||
322 | SHARED_REGION_TRACE_DEBUG( | |
323 | ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n", | |
324 | root_dir, cputype, is_64bit)); | |
325 | ||
326 | shared_region = NULL; | |
327 | new_shared_region = NULL; | |
328 | ||
329 | vm_shared_region_lock(); | |
330 | for (;;) { | |
331 | queue_iterate(&vm_shared_region_queue, | |
332 | shared_region, | |
333 | vm_shared_region_t, | |
334 | sr_q) { | |
335 | assert(shared_region->sr_ref_count > 0); | |
336 | if (shared_region->sr_cpu_type == cputype && | |
337 | shared_region->sr_root_dir == root_dir && | |
338 | shared_region->sr_64bit == is_64bit) { | |
339 | /* found a match ! */ | |
340 | vm_shared_region_reference_locked(shared_region); | |
341 | goto done; | |
342 | } | |
343 | } | |
344 | if (new_shared_region == NULL) { | |
345 | /* no match: create a new one */ | |
346 | vm_shared_region_unlock(); | |
347 | new_shared_region = vm_shared_region_create(root_dir, | |
348 | cputype, | |
349 | is_64bit); | |
350 | /* do the lookup again, in case we lost a race */ | |
351 | vm_shared_region_lock(); | |
352 | continue; | |
353 | } | |
354 | /* still no match: use our new one */ | |
355 | shared_region = new_shared_region; | |
356 | new_shared_region = NULL; | |
357 | queue_enter(&vm_shared_region_queue, | |
358 | shared_region, | |
359 | vm_shared_region_t, | |
360 | sr_q); | |
361 | break; | |
362 | } | |
363 | ||
364 | done: | |
365 | vm_shared_region_unlock(); | |
366 | ||
367 | if (new_shared_region) { | |
368 | /* | |
369 | * We lost a race with someone else to create a new shared | |
370 | * region for that environment. Get rid of our unused one. | |
371 | */ | |
372 | assert(new_shared_region->sr_ref_count == 1); | |
373 | new_shared_region->sr_ref_count--; | |
374 | vm_shared_region_destroy(new_shared_region); | |
375 | new_shared_region = NULL; | |
376 | } | |
377 | ||
378 | SHARED_REGION_TRACE_DEBUG( | |
379 | ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n", | |
380 | root_dir, cputype, is_64bit, shared_region)); | |
381 | ||
382 | assert(shared_region->sr_ref_count > 0); | |
383 | return shared_region; | |
384 | } | |
385 | ||
386 | /* | |
387 | * Take an extra reference on a shared region. | |
388 | * The vm_shared_region_lock should already be held by the caller. | |
389 | */ | |
390 | static void | |
391 | vm_shared_region_reference_locked( | |
392 | vm_shared_region_t shared_region) | |
393 | { | |
394 | #if DEBUG | |
395 | lck_mtx_assert(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED); | |
396 | #endif | |
397 | ||
398 | SHARED_REGION_TRACE_DEBUG( | |
399 | ("shared_region: -> reference_locked(%p)\n", | |
400 | shared_region)); | |
401 | assert(shared_region->sr_ref_count > 0); | |
402 | shared_region->sr_ref_count++; | |
403 | SHARED_REGION_TRACE_DEBUG( | |
404 | ("shared_region: reference_locked(%p) <- %d\n", | |
405 | shared_region, shared_region->sr_ref_count)); | |
406 | } | |
407 | ||
408 | /* | |
409 | * Release a reference on the shared region. | |
410 | * Destroy it if there are no references left. | |
411 | */ | |
412 | void | |
413 | vm_shared_region_deallocate( | |
414 | vm_shared_region_t shared_region) | |
415 | { | |
416 | SHARED_REGION_TRACE_DEBUG( | |
417 | ("shared_region: -> deallocate(%p)\n", | |
418 | shared_region)); | |
419 | ||
420 | vm_shared_region_lock(); | |
421 | ||
422 | assert(shared_region->sr_ref_count > 0); | |
423 | ||
424 | if (shared_region->sr_root_dir == NULL) { | |
425 | /* | |
426 | * Local (i.e. based on the boot volume) shared regions | |
427 | * can persist or not based on the "shared_region_persistence" | |
428 | * sysctl. | |
429 | * Make sure that this one complies. | |
430 | */ | |
431 | if (shared_region_persistence && | |
432 | !shared_region->sr_persists) { | |
433 | /* make this one persistent */ | |
434 | shared_region->sr_ref_count++; | |
435 | shared_region->sr_persists = TRUE; | |
436 | } else if (!shared_region_persistence && | |
437 | shared_region->sr_persists) { | |
438 | /* make this one no longer persistent */ | |
439 | assert(shared_region->sr_ref_count > 1); | |
440 | shared_region->sr_ref_count--; | |
441 | shared_region->sr_persists = FALSE; | |
442 | } | |
443 | } | |
444 | ||
445 | assert(shared_region->sr_ref_count > 0); | |
446 | shared_region->sr_ref_count--; | |
447 | SHARED_REGION_TRACE_DEBUG( | |
448 | ("shared_region: deallocate(%p): ref now %d\n", | |
449 | shared_region, shared_region->sr_ref_count)); | |
450 | ||
451 | if (shared_region->sr_ref_count == 0) { | |
452 | assert(! shared_region->sr_mapping_in_progress); | |
453 | /* remove it from the queue first, so no one can find it... */ | |
454 | queue_remove(&vm_shared_region_queue, | |
455 | shared_region, | |
456 | vm_shared_region_t, | |
457 | sr_q); | |
458 | vm_shared_region_unlock(); | |
459 | /* ... and destroy it */ | |
460 | vm_shared_region_destroy(shared_region); | |
461 | shared_region = NULL; | |
462 | } else { | |
463 | vm_shared_region_unlock(); | |
464 | } | |
465 | ||
466 | SHARED_REGION_TRACE_DEBUG( | |
467 | ("shared_region: deallocate(%p) <-\n", | |
468 | shared_region)); | |
469 | } | |
470 | ||
471 | /* | |
472 | * Create a new (empty) shared region for a new environment. | |
473 | */ | |
474 | static vm_shared_region_t | |
475 | vm_shared_region_create( | |
476 | void *root_dir, | |
477 | cpu_type_t cputype, | |
478 | boolean_t is_64bit) | |
479 | { | |
480 | kern_return_t kr; | |
481 | vm_named_entry_t mem_entry; | |
482 | ipc_port_t mem_entry_port; | |
483 | vm_shared_region_t shared_region; | |
484 | vm_map_t sub_map; | |
485 | mach_vm_offset_t base_address, pmap_nesting_start; | |
486 | mach_vm_size_t size, pmap_nesting_size; | |
487 | ||
488 | SHARED_REGION_TRACE_DEBUG( | |
489 | ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n", | |
490 | root_dir, cputype, is_64bit)); | |
491 | ||
492 | base_address = 0; | |
493 | size = 0; | |
494 | mem_entry = NULL; | |
495 | mem_entry_port = IPC_PORT_NULL; | |
496 | sub_map = VM_MAP_NULL; | |
497 | ||
498 | /* create a new shared region structure... */ | |
499 | shared_region = kalloc(sizeof (*shared_region)); | |
500 | if (shared_region == NULL) { | |
501 | SHARED_REGION_TRACE_ERROR( | |
502 | ("shared_region: create: couldn't allocate\n")); | |
503 | goto done; | |
504 | } | |
505 | ||
506 | /* figure out the correct settings for the desired environment */ | |
507 | if (is_64bit) { | |
508 | switch (cputype) { | |
509 | case CPU_TYPE_I386: | |
510 | base_address = SHARED_REGION_BASE_X86_64; | |
511 | size = SHARED_REGION_SIZE_X86_64; | |
512 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64; | |
513 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64; | |
514 | break; | |
515 | case CPU_TYPE_POWERPC: | |
516 | base_address = SHARED_REGION_BASE_PPC64; | |
517 | size = SHARED_REGION_SIZE_PPC64; | |
518 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64; | |
519 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64; | |
520 | break; | |
521 | default: | |
522 | SHARED_REGION_TRACE_ERROR( | |
523 | ("shared_region: create: unknown cpu type %d\n", | |
524 | cputype)); | |
525 | kfree(shared_region, sizeof (*shared_region)); | |
526 | shared_region = NULL; | |
527 | goto done; | |
528 | } | |
529 | } else { | |
530 | switch (cputype) { | |
531 | case CPU_TYPE_I386: | |
532 | base_address = SHARED_REGION_BASE_I386; | |
533 | size = SHARED_REGION_SIZE_I386; | |
534 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386; | |
535 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386; | |
536 | break; | |
537 | case CPU_TYPE_POWERPC: | |
538 | base_address = SHARED_REGION_BASE_PPC; | |
539 | size = SHARED_REGION_SIZE_PPC; | |
540 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC; | |
541 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC; | |
542 | break; | |
543 | #ifdef CPU_TYPE_ARM | |
544 | case CPU_TYPE_ARM: | |
545 | base_address = SHARED_REGION_BASE_ARM; | |
546 | size = SHARED_REGION_SIZE_ARM; | |
547 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM; | |
548 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM; | |
549 | break; | |
550 | #endif /* CPU_TYPE_ARM */ | |
551 | default: | |
552 | SHARED_REGION_TRACE_ERROR( | |
553 | ("shared_region: create: unknown cpu type %d\n", | |
554 | cputype)); | |
555 | kfree(shared_region, sizeof (*shared_region)); | |
556 | shared_region = NULL; | |
557 | goto done; | |
558 | ||
559 | } | |
560 | } | |
561 | ||
562 | /* create a memory entry structure and a Mach port handle */ | |
563 | kr = mach_memory_entry_allocate(&mem_entry, | |
564 | &mem_entry_port); | |
565 | if (kr != KERN_SUCCESS) { | |
566 | kfree(shared_region, sizeof (*shared_region)); | |
567 | shared_region = NULL; | |
568 | SHARED_REGION_TRACE_ERROR( | |
569 | ("shared_region: create: " | |
570 | "couldn't allocate mem_entry\n")); | |
571 | goto done; | |
572 | } | |
573 | ||
574 | /* create a VM sub map and its pmap */ | |
575 | sub_map = vm_map_create(pmap_create(0, is_64bit), | |
576 | 0, size, | |
577 | TRUE); | |
578 | if (sub_map == VM_MAP_NULL) { | |
579 | ipc_port_release_send(mem_entry_port); | |
580 | kfree(shared_region, sizeof (*shared_region)); | |
581 | shared_region = NULL; | |
582 | SHARED_REGION_TRACE_ERROR( | |
583 | ("shared_region: create: " | |
584 | "couldn't allocate map\n")); | |
585 | goto done; | |
586 | } | |
587 | ||
588 | /* make the memory entry point to the VM sub map */ | |
589 | mem_entry->is_sub_map = TRUE; | |
590 | mem_entry->backing.map = sub_map; | |
591 | mem_entry->size = size; | |
592 | mem_entry->protection = VM_PROT_ALL; | |
593 | ||
594 | /* make the shared region point at the memory entry */ | |
595 | shared_region->sr_mem_entry = mem_entry_port; | |
596 | ||
597 | /* fill in the shared region's environment and settings */ | |
598 | shared_region->sr_base_address = base_address; | |
599 | shared_region->sr_size = size; | |
600 | shared_region->sr_pmap_nesting_start = pmap_nesting_start; | |
601 | shared_region->sr_pmap_nesting_size = pmap_nesting_size; | |
602 | shared_region->sr_cpu_type = cputype; | |
603 | shared_region->sr_64bit = is_64bit; | |
604 | shared_region->sr_root_dir = root_dir; | |
605 | ||
606 | queue_init(&shared_region->sr_q); | |
607 | shared_region->sr_mapping_in_progress = FALSE; | |
608 | shared_region->sr_persists = FALSE; | |
609 | shared_region->sr_first_mapping = (mach_vm_offset_t) -1; | |
610 | ||
611 | /* grab a reference for the caller */ | |
612 | shared_region->sr_ref_count = 1; | |
613 | ||
614 | done: | |
615 | if (shared_region) { | |
616 | SHARED_REGION_TRACE_INFO( | |
617 | ("shared_region: create(root=%p,cpu=%d,64bit=%d," | |
618 | "base=0x%llx,size=0x%llx) <- " | |
619 | "%p mem=(%p,%p) map=%p pmap=%p\n", | |
620 | root_dir, cputype, is_64bit, (long long)base_address, | |
621 | (long long)size, shared_region, | |
622 | mem_entry_port, mem_entry, sub_map, sub_map->pmap)); | |
623 | } else { | |
624 | SHARED_REGION_TRACE_INFO( | |
625 | ("shared_region: create(root=%p,cpu=%d,64bit=%d," | |
626 | "base=0x%llx,size=0x%llx) <- NULL", | |
627 | root_dir, cputype, is_64bit, (long long)base_address, | |
628 | (long long)size)); | |
629 | } | |
630 | return shared_region; | |
631 | } | |
632 | ||
633 | /* | |
634 | * Destroy a now-unused shared region. | |
635 | * The shared region is no longer in the queue and can not be looked up. | |
636 | */ | |
637 | static void | |
638 | vm_shared_region_destroy( | |
639 | vm_shared_region_t shared_region) | |
640 | { | |
641 | vm_named_entry_t mem_entry; | |
642 | vm_map_t map; | |
643 | ||
644 | SHARED_REGION_TRACE_INFO( | |
645 | ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n", | |
646 | shared_region, | |
647 | shared_region->sr_root_dir, | |
648 | shared_region->sr_cpu_type, | |
649 | shared_region->sr_64bit)); | |
650 | ||
651 | assert(shared_region->sr_ref_count == 0); | |
652 | assert(!shared_region->sr_persists); | |
653 | ||
654 | mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject; | |
655 | assert(mem_entry->is_sub_map); | |
656 | assert(!mem_entry->internal); | |
657 | assert(!mem_entry->is_pager); | |
658 | map = mem_entry->backing.map; | |
659 | ||
660 | /* | |
661 | * Clean up the pmap first. The virtual addresses that were | |
662 | * entered in this possibly "nested" pmap may have different values | |
663 | * than the VM map's min and max offsets, if the VM sub map was | |
664 | * mapped at a non-zero offset in the processes' main VM maps, which | |
665 | * is usually the case, so the clean-up we do in vm_map_destroy() would | |
666 | * not be enough. | |
667 | */ | |
668 | if (map->pmap) { | |
669 | pmap_remove(map->pmap, | |
670 | shared_region->sr_base_address, | |
671 | (shared_region->sr_base_address + | |
672 | shared_region->sr_size)); | |
673 | } | |
674 | ||
675 | /* | |
676 | * Release our (one and only) handle on the memory entry. | |
677 | * This will generate a no-senders notification, which will be processed | |
678 | * by ipc_kobject_notify(), which will release the one and only | |
679 | * reference on the memory entry and cause it to be destroyed, along | |
680 | * with the VM sub map and its pmap. | |
681 | */ | |
682 | mach_memory_entry_port_release(shared_region->sr_mem_entry); | |
683 | mem_entry = NULL; | |
684 | shared_region->sr_mem_entry = IPC_PORT_NULL; | |
685 | ||
686 | /* release the shared region structure... */ | |
687 | kfree(shared_region, sizeof (*shared_region)); | |
688 | SHARED_REGION_TRACE_DEBUG( | |
689 | ("shared_region: destroy(%p) <-\n", | |
690 | shared_region)); | |
691 | shared_region = NULL; | |
692 | ||
693 | } | |
694 | ||
695 | /* | |
696 | * Gets the address of the first (in time) mapping in the shared region. | |
697 | */ | |
698 | kern_return_t | |
699 | vm_shared_region_start_address( | |
700 | vm_shared_region_t shared_region, | |
701 | mach_vm_offset_t *start_address) | |
702 | { | |
703 | kern_return_t kr; | |
704 | mach_vm_offset_t sr_base_address; | |
705 | mach_vm_offset_t sr_first_mapping; | |
706 | ||
707 | SHARED_REGION_TRACE_DEBUG( | |
708 | ("shared_region: -> start_address(%p)\n", | |
709 | shared_region)); | |
710 | assert(shared_region->sr_ref_count > 1); | |
711 | ||
712 | vm_shared_region_lock(); | |
713 | ||
714 | /* | |
715 | * Wait if there's another thread establishing a mapping | |
716 | * in this shared region right when we're looking at it. | |
717 | * We want a consistent view of the map... | |
718 | */ | |
719 | while (shared_region->sr_mapping_in_progress) { | |
720 | /* wait for our turn... */ | |
721 | assert(shared_region->sr_ref_count > 1); | |
722 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, | |
723 | THREAD_UNINT); | |
724 | } | |
725 | assert(! shared_region->sr_mapping_in_progress); | |
726 | assert(shared_region->sr_ref_count > 1); | |
727 | ||
728 | sr_base_address = shared_region->sr_base_address; | |
729 | sr_first_mapping = shared_region->sr_first_mapping; | |
730 | ||
731 | if (sr_first_mapping == (mach_vm_offset_t) -1) { | |
732 | /* shared region is empty */ | |
733 | kr = KERN_INVALID_ADDRESS; | |
734 | } else { | |
735 | kr = KERN_SUCCESS; | |
736 | *start_address = sr_base_address + sr_first_mapping; | |
737 | } | |
738 | ||
739 | vm_shared_region_unlock(); | |
740 | ||
741 | SHARED_REGION_TRACE_DEBUG( | |
742 | ("shared_region: start_address(%p) <- 0x%llx\n", | |
743 | shared_region, (long long)shared_region->sr_base_address)); | |
744 | ||
745 | return kr; | |
746 | } | |
747 | /* | |
748 | * Establish some mappings of a file in the shared region. | |
749 | * This is used by "dyld" via the shared_region_map_np() system call | |
750 | * to populate the shared region with the appropriate shared cache. | |
751 | * | |
752 | * One could also call it several times to incrementally load several | |
753 | * libraries, as long as they do not overlap. | |
754 | * It will return KERN_SUCCESS if the mappings were successfully established | |
755 | * or if they were already established identically by another process. | |
756 | */ | |
757 | kern_return_t | |
758 | vm_shared_region_map_file( | |
759 | vm_shared_region_t shared_region, | |
760 | unsigned int mappings_count, | |
761 | struct shared_file_mapping_np *mappings, | |
762 | memory_object_control_t file_control, | |
763 | memory_object_size_t file_size, | |
764 | void *root_dir) | |
765 | { | |
766 | kern_return_t kr; | |
767 | vm_object_t file_object; | |
768 | ipc_port_t sr_handle; | |
769 | vm_named_entry_t sr_mem_entry; | |
770 | vm_map_t sr_map; | |
771 | mach_vm_offset_t sr_base_address; | |
772 | unsigned int i; | |
773 | mach_port_t map_port; | |
774 | mach_vm_offset_t target_address; | |
4a3eedf9 A |
775 | vm_object_t object; |
776 | vm_object_size_t obj_size; | |
777 | ||
2d21ac55 A |
778 | |
779 | kr = KERN_SUCCESS; | |
780 | ||
781 | vm_shared_region_lock(); | |
782 | assert(shared_region->sr_ref_count > 1); | |
783 | ||
784 | if (shared_region->sr_root_dir != root_dir) { | |
785 | /* | |
786 | * This shared region doesn't match the current root | |
787 | * directory of this process. Deny the mapping to | |
788 | * avoid tainting the shared region with something that | |
789 | * doesn't quite belong into it. | |
790 | */ | |
791 | vm_shared_region_unlock(); | |
792 | kr = KERN_PROTECTION_FAILURE; | |
793 | goto done; | |
794 | } | |
795 | ||
796 | /* | |
797 | * Make sure we handle only one mapping at a time in a given | |
798 | * shared region, to avoid race conditions. This should not | |
799 | * happen frequently... | |
800 | */ | |
801 | while (shared_region->sr_mapping_in_progress) { | |
802 | /* wait for our turn... */ | |
803 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, | |
804 | THREAD_UNINT); | |
805 | } | |
806 | assert(! shared_region->sr_mapping_in_progress); | |
807 | assert(shared_region->sr_ref_count > 1); | |
808 | /* let others know we're working in this shared region */ | |
809 | shared_region->sr_mapping_in_progress = TRUE; | |
810 | ||
811 | vm_shared_region_unlock(); | |
812 | ||
813 | /* no need to lock because this data is never modified... */ | |
814 | sr_handle = shared_region->sr_mem_entry; | |
815 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; | |
816 | sr_map = sr_mem_entry->backing.map; | |
817 | sr_base_address = shared_region->sr_base_address; | |
818 | ||
819 | SHARED_REGION_TRACE_DEBUG( | |
820 | ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n", | |
821 | shared_region, mappings_count, mappings, | |
822 | file_control, file_size)); | |
823 | ||
824 | /* get the VM object associated with the file to be mapped */ | |
825 | file_object = memory_object_control_to_vm_object(file_control); | |
826 | ||
827 | /* establish the mappings */ | |
828 | for (i = 0; i < mappings_count; i++) { | |
829 | SHARED_REGION_TRACE_INFO( | |
830 | ("shared_region: mapping[%d]: " | |
831 | "address:0x%016llx size:0x%016llx offset:0x%016llx " | |
832 | "maxprot:0x%x prot:0x%x\n", | |
833 | i, | |
834 | (long long)mappings[i].sfm_address, | |
835 | (long long)mappings[i].sfm_size, | |
836 | (long long)mappings[i].sfm_file_offset, | |
837 | mappings[i].sfm_max_prot, | |
838 | mappings[i].sfm_init_prot)); | |
839 | ||
840 | if (mappings[i].sfm_init_prot & VM_PROT_ZF) { | |
841 | /* zero-filled memory */ | |
842 | map_port = MACH_PORT_NULL; | |
843 | } else { | |
844 | /* file-backed memory */ | |
845 | map_port = (ipc_port_t) file_object->pager; | |
846 | } | |
847 | ||
848 | /* mapping's address is relative to the shared region base */ | |
849 | target_address = | |
850 | mappings[i].sfm_address - sr_base_address; | |
851 | ||
4a3eedf9 A |
852 | /* establish that mapping, OK if it's "already" there */ |
853 | if (map_port == MACH_PORT_NULL) { | |
854 | /* | |
855 | * We want to map some anonymous memory in a | |
856 | * shared region. | |
857 | * We have to create the VM object now, so that it | |
858 | * can be mapped "copy-on-write". | |
859 | */ | |
860 | obj_size = vm_map_round_page(mappings[i].sfm_size); | |
861 | object = vm_object_allocate(obj_size); | |
862 | if (object == VM_OBJECT_NULL) { | |
863 | kr = KERN_RESOURCE_SHORTAGE; | |
864 | } else { | |
865 | kr = vm_map_enter( | |
866 | sr_map, | |
867 | &target_address, | |
868 | vm_map_round_page(mappings[i].sfm_size), | |
869 | 0, | |
870 | VM_FLAGS_FIXED | VM_FLAGS_ALREADY, | |
871 | object, | |
872 | 0, | |
873 | TRUE, | |
874 | mappings[i].sfm_init_prot & VM_PROT_ALL, | |
875 | mappings[i].sfm_max_prot & VM_PROT_ALL, | |
876 | VM_INHERIT_DEFAULT); | |
877 | } | |
878 | } else { | |
879 | object = VM_OBJECT_NULL; /* no anonymous memory here */ | |
880 | kr = vm_map_enter_mem_object( | |
881 | sr_map, | |
882 | &target_address, | |
883 | vm_map_round_page(mappings[i].sfm_size), | |
884 | 0, | |
885 | VM_FLAGS_FIXED | VM_FLAGS_ALREADY, | |
886 | map_port, | |
887 | mappings[i].sfm_file_offset, | |
888 | TRUE, | |
889 | mappings[i].sfm_init_prot & VM_PROT_ALL, | |
890 | mappings[i].sfm_max_prot & VM_PROT_ALL, | |
891 | VM_INHERIT_DEFAULT); | |
2d21ac55 A |
892 | } |
893 | ||
4a3eedf9 A |
894 | if (kr != KERN_SUCCESS) { |
895 | if (map_port == MACH_PORT_NULL) { | |
896 | /* | |
897 | * Get rid of the VM object we just created | |
898 | * but failed to map. | |
899 | */ | |
900 | vm_object_deallocate(object); | |
901 | object = VM_OBJECT_NULL; | |
902 | } | |
903 | if (kr == KERN_MEMORY_PRESENT) { | |
904 | /* | |
905 | * This exact mapping was already there: | |
906 | * that's fine. | |
907 | */ | |
908 | SHARED_REGION_TRACE_INFO( | |
909 | ("shared_region: mapping[%d]: " | |
910 | "address:0x%016llx size:0x%016llx " | |
911 | "offset:0x%016llx " | |
912 | "maxprot:0x%x prot:0x%x " | |
913 | "already mapped...\n", | |
914 | i, | |
915 | (long long)mappings[i].sfm_address, | |
916 | (long long)mappings[i].sfm_size, | |
917 | (long long)mappings[i].sfm_file_offset, | |
918 | mappings[i].sfm_max_prot, | |
919 | mappings[i].sfm_init_prot)); | |
920 | /* | |
921 | * We didn't establish this mapping ourselves; | |
922 | * let's reset its size, so that we do not | |
923 | * attempt to undo it if an error occurs later. | |
924 | */ | |
925 | mappings[i].sfm_size = 0; | |
926 | kr = KERN_SUCCESS; | |
927 | } else { | |
928 | unsigned int j; | |
929 | ||
930 | /* this mapping failed ! */ | |
931 | SHARED_REGION_TRACE_ERROR( | |
932 | ("shared_region: mapping[%d]: " | |
933 | "address:0x%016llx size:0x%016llx " | |
934 | "offset:0x%016llx " | |
935 | "maxprot:0x%x prot:0x%x failed 0x%x\n", | |
936 | i, | |
937 | (long long)mappings[i].sfm_address, | |
938 | (long long)mappings[i].sfm_size, | |
939 | (long long)mappings[i].sfm_file_offset, | |
940 | mappings[i].sfm_max_prot, | |
941 | mappings[i].sfm_init_prot, | |
942 | kr)); | |
943 | ||
944 | /* | |
945 | * Undo the mappings we've established so far. | |
946 | */ | |
947 | for (j = 0; j < i; j++) { | |
948 | kern_return_t kr2; | |
949 | ||
950 | if (mappings[j].sfm_size == 0) { | |
951 | /* | |
952 | * We didn't establish this | |
953 | * mapping, so nothing to undo. | |
954 | */ | |
955 | continue; | |
956 | } | |
957 | SHARED_REGION_TRACE_INFO( | |
958 | ("shared_region: mapping[%d]: " | |
959 | "address:0x%016llx " | |
960 | "size:0x%016llx " | |
961 | "offset:0x%016llx " | |
962 | "maxprot:0x%x prot:0x%x: " | |
963 | "undoing...\n", | |
964 | j, | |
965 | (long long)mappings[j].sfm_address, | |
966 | (long long)mappings[j].sfm_size, | |
967 | (long long)mappings[j].sfm_file_offset, | |
968 | mappings[j].sfm_max_prot, | |
969 | mappings[j].sfm_init_prot)); | |
970 | kr2 = mach_vm_deallocate( | |
971 | sr_map, | |
972 | (mappings[j].sfm_address - | |
973 | sr_base_address), | |
974 | mappings[j].sfm_size); | |
975 | assert(kr2 == KERN_SUCCESS); | |
976 | } | |
977 | ||
978 | break; | |
979 | } | |
980 | ||
981 | } | |
982 | ||
983 | /* | |
984 | * Record the first (chronologically) mapping in | |
985 | * this shared region. | |
986 | * We're protected by "sr_mapping_in_progress" here, | |
987 | * so no need to lock "shared_region". | |
988 | */ | |
2d21ac55 A |
989 | if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { |
990 | shared_region->sr_first_mapping = target_address; | |
991 | } | |
992 | } | |
993 | ||
994 | vm_shared_region_lock(); | |
995 | assert(shared_region->sr_ref_count > 1); | |
996 | assert(shared_region->sr_mapping_in_progress); | |
997 | /* we're done working on that shared region */ | |
998 | shared_region->sr_mapping_in_progress = FALSE; | |
999 | thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); | |
1000 | vm_shared_region_unlock(); | |
1001 | ||
1002 | done: | |
1003 | SHARED_REGION_TRACE_DEBUG( | |
1004 | ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n", | |
1005 | shared_region, mappings_count, mappings, | |
1006 | file_control, file_size, kr)); | |
1007 | return kr; | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * Enter the appropriate shared region into "map" for "task". | |
1012 | * This involves looking up the shared region (and possibly creating a new | |
1013 | * one) for the desired environment, then mapping the VM sub map into the | |
1014 | * task's VM "map", with the appropriate level of pmap-nesting. | |
1015 | */ | |
1016 | kern_return_t | |
1017 | vm_shared_region_enter( | |
1018 | struct _vm_map *map, | |
1019 | struct task *task, | |
1020 | void *fsroot, | |
1021 | cpu_type_t cpu) | |
1022 | { | |
1023 | kern_return_t kr; | |
1024 | vm_shared_region_t shared_region; | |
1025 | vm_map_offset_t sr_address, sr_offset, target_address; | |
1026 | vm_map_size_t sr_size, mapping_size; | |
1027 | vm_map_offset_t sr_pmap_nesting_start; | |
1028 | vm_map_size_t sr_pmap_nesting_size; | |
1029 | ipc_port_t sr_handle; | |
1030 | boolean_t is_64bit; | |
1031 | ||
1032 | is_64bit = task_has_64BitAddr(task); | |
1033 | ||
1034 | SHARED_REGION_TRACE_DEBUG( | |
1035 | ("shared_region: -> " | |
1036 | "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n", | |
1037 | map, task, fsroot, cpu, is_64bit)); | |
1038 | ||
1039 | /* lookup (create if needed) the shared region for this environment */ | |
1040 | shared_region = vm_shared_region_lookup(fsroot, cpu, is_64bit); | |
1041 | if (shared_region == NULL) { | |
1042 | /* this should not happen ! */ | |
1043 | SHARED_REGION_TRACE_ERROR( | |
1044 | ("shared_region: -> " | |
1045 | "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): " | |
1046 | "lookup failed !\n", | |
1047 | map, task, fsroot, cpu, is_64bit)); | |
1048 | //panic("shared_region_enter: lookup failed\n"); | |
1049 | return KERN_FAILURE; | |
1050 | } | |
1051 | ||
1052 | /* let the task use that shared region */ | |
1053 | vm_shared_region_set(task, shared_region); | |
1054 | ||
1055 | kr = KERN_SUCCESS; | |
1056 | /* no need to lock since this data is never modified */ | |
1057 | sr_address = shared_region->sr_base_address; | |
1058 | sr_size = shared_region->sr_size; | |
1059 | sr_handle = shared_region->sr_mem_entry; | |
1060 | sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start; | |
1061 | sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size; | |
1062 | ||
1063 | /* | |
1064 | * Start mapping the shared region's VM sub map into the task's VM map. | |
1065 | */ | |
1066 | sr_offset = 0; | |
1067 | ||
1068 | if (sr_pmap_nesting_start > sr_address) { | |
1069 | /* we need to map a range without pmap-nesting first */ | |
1070 | target_address = sr_address; | |
1071 | mapping_size = sr_pmap_nesting_start - sr_address; | |
1072 | kr = vm_map_enter_mem_object( | |
1073 | map, | |
1074 | &target_address, | |
1075 | mapping_size, | |
1076 | 0, | |
1077 | VM_FLAGS_FIXED, | |
1078 | sr_handle, | |
1079 | sr_offset, | |
1080 | TRUE, | |
1081 | VM_PROT_READ, | |
1082 | VM_PROT_ALL, | |
1083 | VM_INHERIT_SHARE); | |
1084 | if (kr != KERN_SUCCESS) { | |
1085 | SHARED_REGION_TRACE_ERROR( | |
1086 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1087 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1088 | map, task, fsroot, cpu, is_64bit, | |
1089 | (long long)target_address, | |
1090 | (long long)mapping_size, sr_handle, kr)); | |
1091 | goto done; | |
1092 | } | |
1093 | SHARED_REGION_TRACE_DEBUG( | |
1094 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1095 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1096 | map, task, fsroot, cpu, is_64bit, | |
1097 | (long long)target_address, (long long)mapping_size, | |
1098 | sr_handle, kr)); | |
1099 | sr_offset += mapping_size; | |
1100 | sr_size -= mapping_size; | |
1101 | } | |
1102 | /* | |
1103 | * We may need to map several pmap-nested portions, due to platform | |
1104 | * specific restrictions on pmap nesting. | |
1105 | * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias... | |
1106 | */ | |
1107 | for (; | |
1108 | sr_pmap_nesting_size > 0; | |
1109 | sr_offset += mapping_size, | |
1110 | sr_size -= mapping_size, | |
1111 | sr_pmap_nesting_size -= mapping_size) { | |
1112 | target_address = sr_address + sr_offset; | |
1113 | mapping_size = sr_pmap_nesting_size; | |
1114 | if (mapping_size > pmap_nesting_size_max) { | |
1115 | mapping_size = (vm_map_offset_t) pmap_nesting_size_max; | |
1116 | } | |
1117 | kr = vm_map_enter_mem_object( | |
1118 | map, | |
1119 | &target_address, | |
1120 | mapping_size, | |
1121 | 0, | |
1122 | (VM_FLAGS_FIXED | VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP)), | |
1123 | sr_handle, | |
1124 | sr_offset, | |
1125 | TRUE, | |
1126 | VM_PROT_READ, | |
1127 | VM_PROT_ALL, | |
1128 | VM_INHERIT_SHARE); | |
1129 | if (kr != KERN_SUCCESS) { | |
1130 | SHARED_REGION_TRACE_ERROR( | |
1131 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1132 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1133 | map, task, fsroot, cpu, is_64bit, | |
1134 | (long long)target_address, | |
1135 | (long long)mapping_size, sr_handle, kr)); | |
1136 | goto done; | |
1137 | } | |
1138 | SHARED_REGION_TRACE_DEBUG( | |
1139 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1140 | "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1141 | map, task, fsroot, cpu, is_64bit, | |
1142 | (long long)target_address, (long long)mapping_size, | |
1143 | sr_handle, kr)); | |
1144 | } | |
1145 | if (sr_size > 0) { | |
1146 | /* and there's some left to be mapped without pmap-nesting */ | |
1147 | target_address = sr_address + sr_offset; | |
1148 | mapping_size = sr_size; | |
1149 | kr = vm_map_enter_mem_object( | |
1150 | map, | |
1151 | &target_address, | |
1152 | mapping_size, | |
1153 | 0, | |
1154 | VM_FLAGS_FIXED, | |
1155 | sr_handle, | |
1156 | sr_offset, | |
1157 | TRUE, | |
1158 | VM_PROT_READ, | |
1159 | VM_PROT_ALL, | |
1160 | VM_INHERIT_SHARE); | |
1161 | if (kr != KERN_SUCCESS) { | |
1162 | SHARED_REGION_TRACE_ERROR( | |
1163 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1164 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1165 | map, task, fsroot, cpu, is_64bit, | |
1166 | (long long)target_address, | |
1167 | (long long)mapping_size, sr_handle, kr)); | |
1168 | goto done; | |
1169 | } | |
1170 | SHARED_REGION_TRACE_DEBUG( | |
1171 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1172 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1173 | map, task, fsroot, cpu, is_64bit, | |
1174 | (long long)target_address, (long long)mapping_size, | |
1175 | sr_handle, kr)); | |
1176 | sr_offset += mapping_size; | |
1177 | sr_size -= mapping_size; | |
1178 | } | |
1179 | assert(sr_size == 0); | |
1180 | ||
1181 | done: | |
1182 | SHARED_REGION_TRACE_DEBUG( | |
1183 | ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n", | |
1184 | map, task, fsroot, cpu, is_64bit, kr)); | |
1185 | return kr; | |
1186 | } | |
1187 | ||
1188 | /******************************************************************************/ | |
1189 | /* Comm page support */ | |
1190 | /******************************************************************************/ | |
1191 | ||
1192 | ipc_port_t commpage32_handle = IPC_PORT_NULL; | |
1193 | ipc_port_t commpage64_handle = IPC_PORT_NULL; | |
1194 | vm_named_entry_t commpage32_entry = NULL; | |
1195 | vm_named_entry_t commpage64_entry = NULL; | |
1196 | vm_map_t commpage32_map = VM_MAP_NULL; | |
1197 | vm_map_t commpage64_map = VM_MAP_NULL; | |
1198 | ||
1199 | /* | |
1200 | * Create a memory entry, VM submap and pmap for one commpage. | |
1201 | */ | |
1202 | static void | |
1203 | _vm_commpage_init( | |
1204 | ipc_port_t *handlep, | |
1205 | vm_map_size_t size) | |
1206 | { | |
1207 | kern_return_t kr; | |
1208 | vm_named_entry_t mem_entry; | |
1209 | vm_map_t new_map; | |
1210 | ||
1211 | SHARED_REGION_TRACE_DEBUG( | |
1212 | ("commpage: -> _init(0x%llx)\n", | |
1213 | (long long)size)); | |
1214 | ||
1215 | kr = mach_memory_entry_allocate(&mem_entry, | |
1216 | handlep); | |
1217 | if (kr != KERN_SUCCESS) { | |
1218 | panic("_vm_commpage_init: could not allocate mem_entry"); | |
1219 | } | |
1220 | new_map = vm_map_create(pmap_create(0, FALSE), 0, size, TRUE); | |
1221 | if (new_map == VM_MAP_NULL) { | |
1222 | panic("_vm_commpage_init: could not allocate VM map"); | |
1223 | } | |
1224 | mem_entry->backing.map = new_map; | |
1225 | mem_entry->internal = TRUE; | |
1226 | mem_entry->is_sub_map = TRUE; | |
1227 | mem_entry->offset = 0; | |
1228 | mem_entry->protection = VM_PROT_ALL; | |
1229 | mem_entry->size = size; | |
1230 | ||
1231 | SHARED_REGION_TRACE_DEBUG( | |
1232 | ("commpage: _init(0x%llx) <- %p\n", | |
1233 | (long long)size, *handlep)); | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * Initialize the comm pages at boot time. | |
1238 | */ | |
1239 | void | |
1240 | vm_commpage_init(void) | |
1241 | { | |
1242 | SHARED_REGION_TRACE_DEBUG( | |
1243 | ("commpage: -> init()\n")); | |
1244 | ||
1245 | /* create the 32-bit comm page */ | |
1246 | _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH); | |
1247 | commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject; | |
1248 | commpage32_map = commpage32_entry->backing.map; | |
1249 | ||
1250 | /* XXX if (cpu_is_64bit_capable()) ? */ | |
1251 | /* create the 64-bit comm page */ | |
1252 | _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH); | |
1253 | commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject; | |
1254 | commpage64_map = commpage64_entry->backing.map; | |
1255 | ||
1256 | /* populate them according to this specific platform */ | |
1257 | commpage_populate(); | |
1258 | ||
1259 | SHARED_REGION_TRACE_DEBUG( | |
1260 | ("commpage: init() <-\n")); | |
1261 | } | |
1262 | ||
1263 | /* | |
1264 | * Enter the appropriate comm page into the task's address space. | |
1265 | * This is called at exec() time via vm_map_exec(). | |
1266 | */ | |
1267 | kern_return_t | |
1268 | vm_commpage_enter( | |
1269 | vm_map_t map, | |
1270 | task_t task) | |
1271 | { | |
1272 | ipc_port_t commpage_handle; | |
1273 | vm_map_offset_t commpage_address, objc_address; | |
1274 | vm_map_size_t commpage_size, objc_size; | |
1275 | int vm_flags; | |
1276 | kern_return_t kr; | |
1277 | ||
1278 | SHARED_REGION_TRACE_DEBUG( | |
1279 | ("commpage: -> enter(%p,%p)\n", | |
1280 | map, task)); | |
1281 | ||
1282 | /* the comm page is likely to be beyond the actual end of the VM map */ | |
1283 | vm_flags = VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX; | |
1284 | ||
1285 | /* select the appropriate comm page for this task */ | |
1286 | assert(! (task_has_64BitAddr(task) ^ vm_map_is_64bit(map))); | |
1287 | if (task_has_64BitAddr(task)) { | |
1288 | #ifdef __ppc__ | |
1289 | /* | |
1290 | * PPC51: ppc64 is limited to 51-bit addresses. | |
1291 | * Memory above that limit is handled specially at the | |
1292 | * pmap level, so do not interfere. | |
1293 | */ | |
1294 | vm_flags |= VM_FLAGS_NO_PMAP_CHECK; | |
1295 | #endif /* __ppc__ */ | |
1296 | commpage_handle = commpage64_handle; | |
1297 | commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS; | |
1298 | commpage_size = _COMM_PAGE64_AREA_LENGTH; | |
1299 | objc_size = _COMM_PAGE64_OBJC_SIZE; | |
1300 | objc_address = _COMM_PAGE64_OBJC_BASE; | |
1301 | } else { | |
1302 | commpage_handle = commpage32_handle; | |
1303 | commpage_address = | |
1304 | (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS; | |
1305 | commpage_size = _COMM_PAGE32_AREA_LENGTH; | |
1306 | objc_size = _COMM_PAGE32_OBJC_SIZE; | |
1307 | objc_address = _COMM_PAGE32_OBJC_BASE; | |
1308 | } | |
1309 | ||
1310 | if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 && | |
1311 | (commpage_size & (pmap_nesting_size_min - 1)) == 0) { | |
1312 | /* the commpage is properly aligned or sized for pmap-nesting */ | |
1313 | vm_flags |= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP); | |
1314 | } | |
1315 | ||
1316 | /* map the comm page in the task's address space */ | |
1317 | assert(commpage_handle != IPC_PORT_NULL); | |
1318 | kr = vm_map_enter_mem_object( | |
1319 | map, | |
1320 | &commpage_address, | |
1321 | commpage_size, | |
1322 | 0, | |
1323 | vm_flags, | |
1324 | commpage_handle, | |
1325 | 0, | |
1326 | FALSE, | |
1327 | VM_PROT_READ|VM_PROT_EXECUTE, | |
1328 | VM_PROT_READ|VM_PROT_EXECUTE, | |
1329 | VM_INHERIT_SHARE); | |
1330 | if (kr != KERN_SUCCESS) { | |
1331 | SHARED_REGION_TRACE_ERROR( | |
1332 | ("commpage: enter(%p,0x%llx,0x%llx) " | |
1333 | "commpage %p mapping failed 0x%x\n", | |
1334 | map, (long long)commpage_address, | |
1335 | (long long)commpage_size, commpage_handle, kr)); | |
1336 | } | |
1337 | ||
1338 | /* | |
1339 | * Since we're here, we also pre-allocate some virtual space for the | |
1340 | * Objective-C run-time, if needed... | |
1341 | */ | |
1342 | if (objc_size != 0) { | |
1343 | kr = vm_map_enter_mem_object( | |
1344 | map, | |
1345 | &objc_address, | |
1346 | objc_size, | |
1347 | 0, | |
1348 | VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX, | |
1349 | IPC_PORT_NULL, | |
1350 | 0, | |
1351 | FALSE, | |
1352 | VM_PROT_ALL, | |
1353 | VM_PROT_ALL, | |
1354 | VM_INHERIT_DEFAULT); | |
1355 | if (kr != KERN_SUCCESS) { | |
1356 | SHARED_REGION_TRACE_ERROR( | |
1357 | ("commpage: enter(%p,0x%llx,0x%llx) " | |
1358 | "objc mapping failed 0x%x\n", | |
1359 | map, (long long)objc_address, | |
1360 | (long long)objc_size, kr)); | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | SHARED_REGION_TRACE_DEBUG( | |
1365 | ("commpage: enter(%p,%p) <- 0x%x\n", | |
1366 | map, task, kr)); | |
1367 | return kr; | |
1368 | } |