]>
Commit | Line | Data |
---|---|---|
2d21ac55 A |
1 | /* |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | ||
24 | /* | |
25 | * Shared region (... and comm page) | |
26 | * | |
27 | * This file handles the VM shared region and comm page. | |
28 | * | |
29 | */ | |
30 | /* | |
31 | * SHARED REGIONS | |
32 | * -------------- | |
33 | * | |
34 | * A shared region is a submap that contains the most common system shared | |
35 | * libraries for a given environment. | |
36 | * An environment is defined by (cpu-type, 64-bitness, root directory). | |
37 | * | |
38 | * The point of a shared region is to reduce the setup overhead when exec'ing | |
39 | * a new process. | |
40 | * A shared region uses a shared VM submap that gets mapped automatically | |
41 | * at exec() time (see vm_map_exec()). The first process of a given | |
42 | * environment sets up the shared region and all further processes in that | |
43 | * environment can re-use that shared region without having to re-create | |
44 | * the same mappings in their VM map. All they need is contained in the shared | |
45 | * region. | |
46 | * It can also shared a pmap (mostly for read-only parts but also for the | |
47 | * initial version of some writable parts), which gets "nested" into the | |
48 | * process's pmap. This reduces the number of soft faults: once one process | |
49 | * brings in a page in the shared region, all the other processes can access | |
50 | * it without having to enter it in their own pmap. | |
51 | * | |
52 | * | |
53 | * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter() | |
54 | * to map the appropriate shared region in the process's address space. | |
55 | * We look up the appropriate shared region for the process's environment. | |
56 | * If we can't find one, we create a new (empty) one and add it to the list. | |
57 | * Otherwise, we just take an extra reference on the shared region we found. | |
58 | * | |
59 | * The "dyld" runtime (mapped into the process's address space at exec() time) | |
60 | * will then use the shared_region_check_np() and shared_region_map_np() | |
61 | * system call to validate and/or populate the shared region with the | |
62 | * appropriate dyld_shared_cache file. | |
63 | * | |
64 | * The shared region is inherited on fork() and the child simply takes an | |
65 | * extra reference on its parent's shared region. | |
66 | * | |
67 | * When the task terminates, we release a reference on its shared region. | |
68 | * When the last reference is released, we destroy the shared region. | |
69 | * | |
70 | * After a chroot(), the calling process keeps using its original shared region, | |
71 | * since that's what was mapped when it was started. But its children | |
72 | * will use a different shared region, because they need to use the shared | |
73 | * cache that's relative to the new root directory. | |
74 | */ | |
75 | /* | |
76 | * COMM PAGE | |
77 | * | |
78 | * A "comm page" is an area of memory that is populated by the kernel with | |
79 | * the appropriate platform-specific version of some commonly used code. | |
80 | * There is one "comm page" per platform (cpu-type, 64-bitness) but only | |
81 | * for the native cpu-type. No need to overly optimize translated code | |
82 | * for hardware that is not really there ! | |
83 | * | |
84 | * The comm pages are created and populated at boot time. | |
85 | * | |
86 | * The appropriate comm page is mapped into a process's address space | |
87 | * at exec() time, in vm_map_exec(). | |
88 | * It is then inherited on fork(). | |
89 | * | |
90 | * The comm page is shared between the kernel and all applications of | |
91 | * a given platform. Only the kernel can modify it. | |
92 | * | |
93 | * Applications just branch to fixed addresses in the comm page and find | |
94 | * the right version of the code for the platform. There is also some | |
95 | * data provided and updated by the kernel for processes to retrieve easily | |
96 | * without having to do a system call. | |
97 | */ | |
98 | ||
99 | #include <debug.h> | |
100 | ||
101 | #include <kern/ipc_tt.h> | |
102 | #include <kern/kalloc.h> | |
b0d623f7 | 103 | #include <kern/thread_call.h> |
2d21ac55 | 104 | |
4a3eedf9 A |
105 | #include <mach/mach_vm.h> |
106 | ||
2d21ac55 A |
107 | #include <vm/vm_map.h> |
108 | #include <vm/vm_shared_region.h> | |
109 | ||
110 | #include <vm/vm_protos.h> | |
111 | ||
112 | #include <machine/commpage.h> | |
113 | #include <machine/cpu_capabilities.h> | |
114 | ||
115 | /* "dyld" uses this to figure out what the kernel supports */ | |
116 | int shared_region_version = 3; | |
117 | ||
2d21ac55 A |
118 | /* trace level, output is sent to the system log file */ |
119 | int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL; | |
120 | ||
b0d623f7 A |
121 | /* should local (non-chroot) shared regions persist when no task uses them ? */ |
122 | int shared_region_persistence = 0; /* no by default */ | |
123 | ||
124 | /* delay before reclaiming an unused shared region */ | |
125 | int shared_region_destroy_delay = 120; /* in seconds */ | |
126 | ||
6d2010ae A |
127 | /* indicate if the shared region has been slid. Only one region can be slid */ |
128 | boolean_t shared_region_completed_slide = FALSE; | |
129 | ||
2d21ac55 A |
130 | /* this lock protects all the shared region data structures */ |
131 | lck_grp_t *vm_shared_region_lck_grp; | |
132 | lck_mtx_t vm_shared_region_lock; | |
133 | ||
134 | #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock) | |
135 | #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock) | |
136 | #define vm_shared_region_sleep(event, interruptible) \ | |
137 | lck_mtx_sleep(&vm_shared_region_lock, \ | |
138 | LCK_SLEEP_DEFAULT, \ | |
139 | (event_t) (event), \ | |
140 | (interruptible)) | |
141 | ||
142 | /* the list of currently available shared regions (one per environment) */ | |
143 | queue_head_t vm_shared_region_queue; | |
144 | ||
145 | static void vm_shared_region_reference_locked(vm_shared_region_t shared_region); | |
146 | static vm_shared_region_t vm_shared_region_create( | |
147 | void *root_dir, | |
148 | cpu_type_t cputype, | |
149 | boolean_t is_64bit); | |
150 | static void vm_shared_region_destroy(vm_shared_region_t shared_region); | |
151 | ||
b0d623f7 A |
152 | static void vm_shared_region_timeout(thread_call_param_t param0, |
153 | thread_call_param_t param1); | |
154 | ||
155 | static int __commpage_setup = 0; | |
156 | #if defined(__i386__) || defined(__x86_64__) | |
157 | static int __system_power_source = 1; /* init to extrnal power source */ | |
158 | static void post_sys_powersource_internal(int i, int internal); | |
159 | #endif /* __i386__ || __x86_64__ */ | |
160 | ||
161 | ||
2d21ac55 A |
162 | /* |
163 | * Initialize the module... | |
164 | */ | |
165 | void | |
166 | vm_shared_region_init(void) | |
167 | { | |
168 | SHARED_REGION_TRACE_DEBUG( | |
169 | ("shared_region: -> init\n")); | |
170 | ||
171 | vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region", | |
172 | LCK_GRP_ATTR_NULL); | |
173 | lck_mtx_init(&vm_shared_region_lock, | |
174 | vm_shared_region_lck_grp, | |
175 | LCK_ATTR_NULL); | |
176 | ||
177 | queue_init(&vm_shared_region_queue); | |
178 | ||
179 | SHARED_REGION_TRACE_DEBUG( | |
180 | ("shared_region: <- init\n")); | |
181 | } | |
182 | ||
183 | /* | |
184 | * Retrieve a task's shared region and grab an extra reference to | |
185 | * make sure it doesn't disappear while the caller is using it. | |
186 | * The caller is responsible for consuming that extra reference if | |
187 | * necessary. | |
188 | */ | |
189 | vm_shared_region_t | |
190 | vm_shared_region_get( | |
191 | task_t task) | |
192 | { | |
193 | vm_shared_region_t shared_region; | |
194 | ||
195 | SHARED_REGION_TRACE_DEBUG( | |
196 | ("shared_region: -> get(%p)\n", | |
197 | task)); | |
198 | ||
199 | task_lock(task); | |
200 | vm_shared_region_lock(); | |
201 | shared_region = task->shared_region; | |
202 | if (shared_region) { | |
203 | assert(shared_region->sr_ref_count > 0); | |
204 | vm_shared_region_reference_locked(shared_region); | |
205 | } | |
206 | vm_shared_region_unlock(); | |
207 | task_unlock(task); | |
208 | ||
209 | SHARED_REGION_TRACE_DEBUG( | |
210 | ("shared_region: get(%p) <- %p\n", | |
211 | task, shared_region)); | |
212 | ||
213 | return shared_region; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Get the base address of the shared region. | |
218 | * That's the address at which it needs to be mapped in the process's address | |
219 | * space. | |
220 | * No need to lock since this data is set when the shared region is | |
221 | * created and is never modified after that. The caller must hold an extra | |
222 | * reference on the shared region to prevent it from being destroyed. | |
223 | */ | |
224 | mach_vm_offset_t | |
225 | vm_shared_region_base_address( | |
226 | vm_shared_region_t shared_region) | |
227 | { | |
228 | SHARED_REGION_TRACE_DEBUG( | |
229 | ("shared_region: -> base_address(%p)\n", | |
230 | shared_region)); | |
231 | assert(shared_region->sr_ref_count > 1); | |
232 | SHARED_REGION_TRACE_DEBUG( | |
233 | ("shared_region: base_address(%p) <- 0x%llx\n", | |
234 | shared_region, (long long)shared_region->sr_base_address)); | |
235 | return shared_region->sr_base_address; | |
236 | } | |
237 | ||
238 | /* | |
239 | * Get the size of the shared region. | |
240 | * That's the size that needs to be mapped in the process's address | |
241 | * space. | |
242 | * No need to lock since this data is set when the shared region is | |
243 | * created and is never modified after that. The caller must hold an extra | |
244 | * reference on the shared region to prevent it from being destroyed. | |
245 | */ | |
246 | mach_vm_size_t | |
247 | vm_shared_region_size( | |
248 | vm_shared_region_t shared_region) | |
249 | { | |
250 | SHARED_REGION_TRACE_DEBUG( | |
251 | ("shared_region: -> size(%p)\n", | |
252 | shared_region)); | |
253 | assert(shared_region->sr_ref_count > 1); | |
254 | SHARED_REGION_TRACE_DEBUG( | |
255 | ("shared_region: size(%p) <- 0x%llx\n", | |
256 | shared_region, (long long)shared_region->sr_size)); | |
257 | return shared_region->sr_size; | |
258 | } | |
259 | ||
260 | /* | |
261 | * Get the memory entry of the shared region. | |
262 | * That's the "memory object" that needs to be mapped in the process's address | |
263 | * space. | |
264 | * No need to lock since this data is set when the shared region is | |
265 | * created and is never modified after that. The caller must hold an extra | |
266 | * reference on the shared region to prevent it from being destroyed. | |
267 | */ | |
268 | ipc_port_t | |
269 | vm_shared_region_mem_entry( | |
270 | vm_shared_region_t shared_region) | |
271 | { | |
272 | SHARED_REGION_TRACE_DEBUG( | |
273 | ("shared_region: -> mem_entry(%p)\n", | |
274 | shared_region)); | |
275 | assert(shared_region->sr_ref_count > 1); | |
276 | SHARED_REGION_TRACE_DEBUG( | |
277 | ("shared_region: mem_entry(%p) <- %p\n", | |
278 | shared_region, shared_region->sr_mem_entry)); | |
279 | return shared_region->sr_mem_entry; | |
280 | } | |
281 | ||
282 | /* | |
283 | * Set the shared region the process should use. | |
284 | * A NULL new shared region means that we just want to release the old | |
285 | * shared region. | |
286 | * The caller should already have an extra reference on the new shared region | |
287 | * (if any). We release a reference on the old shared region (if any). | |
288 | */ | |
289 | void | |
290 | vm_shared_region_set( | |
291 | task_t task, | |
292 | vm_shared_region_t new_shared_region) | |
293 | { | |
294 | vm_shared_region_t old_shared_region; | |
295 | ||
296 | SHARED_REGION_TRACE_DEBUG( | |
297 | ("shared_region: -> set(%p, %p)\n", | |
298 | task, new_shared_region)); | |
299 | ||
300 | task_lock(task); | |
301 | vm_shared_region_lock(); | |
302 | ||
303 | old_shared_region = task->shared_region; | |
304 | if (new_shared_region) { | |
305 | assert(new_shared_region->sr_ref_count > 0); | |
306 | } | |
307 | ||
308 | task->shared_region = new_shared_region; | |
309 | ||
310 | vm_shared_region_unlock(); | |
311 | task_unlock(task); | |
312 | ||
313 | if (old_shared_region) { | |
314 | assert(old_shared_region->sr_ref_count > 0); | |
315 | vm_shared_region_deallocate(old_shared_region); | |
316 | } | |
317 | ||
318 | SHARED_REGION_TRACE_DEBUG( | |
319 | ("shared_region: set(%p) <- old=%p new=%p\n", | |
320 | task, old_shared_region, new_shared_region)); | |
321 | } | |
322 | ||
323 | /* | |
324 | * Lookup up the shared region for the desired environment. | |
325 | * If none is found, create a new (empty) one. | |
326 | * Grab an extra reference on the returned shared region, to make sure | |
327 | * it doesn't get destroyed before the caller is done with it. The caller | |
328 | * is responsible for consuming that extra reference if necessary. | |
329 | */ | |
330 | vm_shared_region_t | |
331 | vm_shared_region_lookup( | |
332 | void *root_dir, | |
333 | cpu_type_t cputype, | |
334 | boolean_t is_64bit) | |
335 | { | |
336 | vm_shared_region_t shared_region; | |
337 | vm_shared_region_t new_shared_region; | |
338 | ||
339 | SHARED_REGION_TRACE_DEBUG( | |
340 | ("shared_region: -> lookup(root=%p,cpu=%d,64bit=%d)\n", | |
341 | root_dir, cputype, is_64bit)); | |
342 | ||
343 | shared_region = NULL; | |
344 | new_shared_region = NULL; | |
345 | ||
346 | vm_shared_region_lock(); | |
347 | for (;;) { | |
348 | queue_iterate(&vm_shared_region_queue, | |
349 | shared_region, | |
350 | vm_shared_region_t, | |
351 | sr_q) { | |
352 | assert(shared_region->sr_ref_count > 0); | |
353 | if (shared_region->sr_cpu_type == cputype && | |
354 | shared_region->sr_root_dir == root_dir && | |
355 | shared_region->sr_64bit == is_64bit) { | |
356 | /* found a match ! */ | |
357 | vm_shared_region_reference_locked(shared_region); | |
358 | goto done; | |
359 | } | |
360 | } | |
361 | if (new_shared_region == NULL) { | |
362 | /* no match: create a new one */ | |
363 | vm_shared_region_unlock(); | |
364 | new_shared_region = vm_shared_region_create(root_dir, | |
365 | cputype, | |
366 | is_64bit); | |
367 | /* do the lookup again, in case we lost a race */ | |
368 | vm_shared_region_lock(); | |
369 | continue; | |
370 | } | |
371 | /* still no match: use our new one */ | |
372 | shared_region = new_shared_region; | |
373 | new_shared_region = NULL; | |
374 | queue_enter(&vm_shared_region_queue, | |
375 | shared_region, | |
376 | vm_shared_region_t, | |
377 | sr_q); | |
378 | break; | |
379 | } | |
380 | ||
381 | done: | |
382 | vm_shared_region_unlock(); | |
383 | ||
384 | if (new_shared_region) { | |
385 | /* | |
386 | * We lost a race with someone else to create a new shared | |
387 | * region for that environment. Get rid of our unused one. | |
388 | */ | |
389 | assert(new_shared_region->sr_ref_count == 1); | |
390 | new_shared_region->sr_ref_count--; | |
391 | vm_shared_region_destroy(new_shared_region); | |
392 | new_shared_region = NULL; | |
393 | } | |
394 | ||
395 | SHARED_REGION_TRACE_DEBUG( | |
396 | ("shared_region: lookup(root=%p,cpu=%d,64bit=%d) <- %p\n", | |
397 | root_dir, cputype, is_64bit, shared_region)); | |
398 | ||
399 | assert(shared_region->sr_ref_count > 0); | |
400 | return shared_region; | |
401 | } | |
402 | ||
403 | /* | |
404 | * Take an extra reference on a shared region. | |
405 | * The vm_shared_region_lock should already be held by the caller. | |
406 | */ | |
407 | static void | |
408 | vm_shared_region_reference_locked( | |
409 | vm_shared_region_t shared_region) | |
410 | { | |
411 | #if DEBUG | |
412 | lck_mtx_assert(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED); | |
413 | #endif | |
414 | ||
415 | SHARED_REGION_TRACE_DEBUG( | |
416 | ("shared_region: -> reference_locked(%p)\n", | |
417 | shared_region)); | |
418 | assert(shared_region->sr_ref_count > 0); | |
419 | shared_region->sr_ref_count++; | |
b0d623f7 A |
420 | |
421 | if (shared_region->sr_timer_call != NULL) { | |
422 | boolean_t cancelled; | |
423 | ||
424 | /* cancel and free any pending timeout */ | |
425 | cancelled = thread_call_cancel(shared_region->sr_timer_call); | |
426 | if (cancelled) { | |
427 | thread_call_free(shared_region->sr_timer_call); | |
428 | shared_region->sr_timer_call = NULL; | |
429 | /* release the reference held by the cancelled timer */ | |
430 | shared_region->sr_ref_count--; | |
431 | } else { | |
432 | /* the timer will drop the reference and free itself */ | |
433 | } | |
434 | } | |
435 | ||
2d21ac55 A |
436 | SHARED_REGION_TRACE_DEBUG( |
437 | ("shared_region: reference_locked(%p) <- %d\n", | |
438 | shared_region, shared_region->sr_ref_count)); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Release a reference on the shared region. | |
443 | * Destroy it if there are no references left. | |
444 | */ | |
445 | void | |
446 | vm_shared_region_deallocate( | |
447 | vm_shared_region_t shared_region) | |
448 | { | |
449 | SHARED_REGION_TRACE_DEBUG( | |
450 | ("shared_region: -> deallocate(%p)\n", | |
451 | shared_region)); | |
452 | ||
453 | vm_shared_region_lock(); | |
454 | ||
455 | assert(shared_region->sr_ref_count > 0); | |
456 | ||
457 | if (shared_region->sr_root_dir == NULL) { | |
458 | /* | |
459 | * Local (i.e. based on the boot volume) shared regions | |
460 | * can persist or not based on the "shared_region_persistence" | |
461 | * sysctl. | |
462 | * Make sure that this one complies. | |
463 | */ | |
464 | if (shared_region_persistence && | |
465 | !shared_region->sr_persists) { | |
466 | /* make this one persistent */ | |
467 | shared_region->sr_ref_count++; | |
468 | shared_region->sr_persists = TRUE; | |
469 | } else if (!shared_region_persistence && | |
470 | shared_region->sr_persists) { | |
471 | /* make this one no longer persistent */ | |
472 | assert(shared_region->sr_ref_count > 1); | |
473 | shared_region->sr_ref_count--; | |
474 | shared_region->sr_persists = FALSE; | |
475 | } | |
476 | } | |
477 | ||
478 | assert(shared_region->sr_ref_count > 0); | |
479 | shared_region->sr_ref_count--; | |
480 | SHARED_REGION_TRACE_DEBUG( | |
481 | ("shared_region: deallocate(%p): ref now %d\n", | |
482 | shared_region, shared_region->sr_ref_count)); | |
483 | ||
484 | if (shared_region->sr_ref_count == 0) { | |
b0d623f7 A |
485 | uint64_t deadline; |
486 | ||
487 | if (shared_region->sr_timer_call == NULL) { | |
488 | /* hold one reference for the timer */ | |
489 | assert(! shared_region->sr_mapping_in_progress); | |
490 | shared_region->sr_ref_count++; | |
491 | ||
492 | /* set up the timer */ | |
493 | shared_region->sr_timer_call = thread_call_allocate( | |
494 | (thread_call_func_t) vm_shared_region_timeout, | |
495 | (thread_call_param_t) shared_region); | |
496 | ||
497 | /* schedule the timer */ | |
498 | clock_interval_to_deadline(shared_region_destroy_delay, | |
499 | 1000 * 1000 * 1000, | |
500 | &deadline); | |
501 | thread_call_enter_delayed(shared_region->sr_timer_call, | |
502 | deadline); | |
503 | ||
504 | SHARED_REGION_TRACE_DEBUG( | |
505 | ("shared_region: deallocate(%p): armed timer\n", | |
506 | shared_region)); | |
507 | ||
508 | vm_shared_region_unlock(); | |
509 | } else { | |
510 | /* timer expired: let go of this shared region */ | |
511 | ||
512 | /* | |
513 | * Remove it from the queue first, so no one can find | |
514 | * it... | |
515 | */ | |
516 | queue_remove(&vm_shared_region_queue, | |
517 | shared_region, | |
518 | vm_shared_region_t, | |
519 | sr_q); | |
520 | vm_shared_region_unlock(); | |
521 | /* ... and destroy it */ | |
522 | vm_shared_region_destroy(shared_region); | |
523 | shared_region = NULL; | |
524 | } | |
2d21ac55 A |
525 | } else { |
526 | vm_shared_region_unlock(); | |
527 | } | |
528 | ||
529 | SHARED_REGION_TRACE_DEBUG( | |
530 | ("shared_region: deallocate(%p) <-\n", | |
531 | shared_region)); | |
532 | } | |
533 | ||
b0d623f7 A |
534 | void |
535 | vm_shared_region_timeout( | |
536 | thread_call_param_t param0, | |
537 | __unused thread_call_param_t param1) | |
538 | { | |
539 | vm_shared_region_t shared_region; | |
540 | ||
541 | shared_region = (vm_shared_region_t) param0; | |
542 | ||
543 | vm_shared_region_deallocate(shared_region); | |
544 | } | |
545 | ||
2d21ac55 A |
546 | /* |
547 | * Create a new (empty) shared region for a new environment. | |
548 | */ | |
549 | static vm_shared_region_t | |
550 | vm_shared_region_create( | |
551 | void *root_dir, | |
552 | cpu_type_t cputype, | |
553 | boolean_t is_64bit) | |
554 | { | |
555 | kern_return_t kr; | |
556 | vm_named_entry_t mem_entry; | |
557 | ipc_port_t mem_entry_port; | |
558 | vm_shared_region_t shared_region; | |
559 | vm_map_t sub_map; | |
560 | mach_vm_offset_t base_address, pmap_nesting_start; | |
561 | mach_vm_size_t size, pmap_nesting_size; | |
562 | ||
563 | SHARED_REGION_TRACE_DEBUG( | |
564 | ("shared_region: -> create(root=%p,cpu=%d,64bit=%d)\n", | |
565 | root_dir, cputype, is_64bit)); | |
566 | ||
567 | base_address = 0; | |
568 | size = 0; | |
569 | mem_entry = NULL; | |
570 | mem_entry_port = IPC_PORT_NULL; | |
571 | sub_map = VM_MAP_NULL; | |
572 | ||
573 | /* create a new shared region structure... */ | |
574 | shared_region = kalloc(sizeof (*shared_region)); | |
575 | if (shared_region == NULL) { | |
576 | SHARED_REGION_TRACE_ERROR( | |
577 | ("shared_region: create: couldn't allocate\n")); | |
578 | goto done; | |
579 | } | |
580 | ||
581 | /* figure out the correct settings for the desired environment */ | |
582 | if (is_64bit) { | |
583 | switch (cputype) { | |
584 | case CPU_TYPE_I386: | |
585 | base_address = SHARED_REGION_BASE_X86_64; | |
586 | size = SHARED_REGION_SIZE_X86_64; | |
587 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64; | |
588 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64; | |
589 | break; | |
590 | case CPU_TYPE_POWERPC: | |
591 | base_address = SHARED_REGION_BASE_PPC64; | |
592 | size = SHARED_REGION_SIZE_PPC64; | |
593 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64; | |
594 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64; | |
595 | break; | |
596 | default: | |
597 | SHARED_REGION_TRACE_ERROR( | |
598 | ("shared_region: create: unknown cpu type %d\n", | |
599 | cputype)); | |
600 | kfree(shared_region, sizeof (*shared_region)); | |
601 | shared_region = NULL; | |
602 | goto done; | |
603 | } | |
604 | } else { | |
605 | switch (cputype) { | |
606 | case CPU_TYPE_I386: | |
607 | base_address = SHARED_REGION_BASE_I386; | |
608 | size = SHARED_REGION_SIZE_I386; | |
609 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386; | |
610 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386; | |
611 | break; | |
612 | case CPU_TYPE_POWERPC: | |
613 | base_address = SHARED_REGION_BASE_PPC; | |
614 | size = SHARED_REGION_SIZE_PPC; | |
615 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC; | |
616 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC; | |
617 | break; | |
618 | #ifdef CPU_TYPE_ARM | |
619 | case CPU_TYPE_ARM: | |
620 | base_address = SHARED_REGION_BASE_ARM; | |
621 | size = SHARED_REGION_SIZE_ARM; | |
622 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM; | |
623 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM; | |
624 | break; | |
625 | #endif /* CPU_TYPE_ARM */ | |
626 | default: | |
627 | SHARED_REGION_TRACE_ERROR( | |
628 | ("shared_region: create: unknown cpu type %d\n", | |
629 | cputype)); | |
630 | kfree(shared_region, sizeof (*shared_region)); | |
631 | shared_region = NULL; | |
632 | goto done; | |
633 | ||
634 | } | |
635 | } | |
636 | ||
637 | /* create a memory entry structure and a Mach port handle */ | |
638 | kr = mach_memory_entry_allocate(&mem_entry, | |
639 | &mem_entry_port); | |
640 | if (kr != KERN_SUCCESS) { | |
641 | kfree(shared_region, sizeof (*shared_region)); | |
642 | shared_region = NULL; | |
643 | SHARED_REGION_TRACE_ERROR( | |
644 | ("shared_region: create: " | |
645 | "couldn't allocate mem_entry\n")); | |
646 | goto done; | |
647 | } | |
648 | ||
649 | /* create a VM sub map and its pmap */ | |
316670eb | 650 | sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit), |
2d21ac55 A |
651 | 0, size, |
652 | TRUE); | |
653 | if (sub_map == VM_MAP_NULL) { | |
654 | ipc_port_release_send(mem_entry_port); | |
655 | kfree(shared_region, sizeof (*shared_region)); | |
656 | shared_region = NULL; | |
657 | SHARED_REGION_TRACE_ERROR( | |
658 | ("shared_region: create: " | |
659 | "couldn't allocate map\n")); | |
660 | goto done; | |
661 | } | |
662 | ||
663 | /* make the memory entry point to the VM sub map */ | |
664 | mem_entry->is_sub_map = TRUE; | |
665 | mem_entry->backing.map = sub_map; | |
666 | mem_entry->size = size; | |
667 | mem_entry->protection = VM_PROT_ALL; | |
668 | ||
669 | /* make the shared region point at the memory entry */ | |
670 | shared_region->sr_mem_entry = mem_entry_port; | |
671 | ||
672 | /* fill in the shared region's environment and settings */ | |
673 | shared_region->sr_base_address = base_address; | |
674 | shared_region->sr_size = size; | |
675 | shared_region->sr_pmap_nesting_start = pmap_nesting_start; | |
676 | shared_region->sr_pmap_nesting_size = pmap_nesting_size; | |
677 | shared_region->sr_cpu_type = cputype; | |
678 | shared_region->sr_64bit = is_64bit; | |
679 | shared_region->sr_root_dir = root_dir; | |
680 | ||
681 | queue_init(&shared_region->sr_q); | |
682 | shared_region->sr_mapping_in_progress = FALSE; | |
683 | shared_region->sr_persists = FALSE; | |
b0d623f7 | 684 | shared_region->sr_timer_call = NULL; |
2d21ac55 A |
685 | shared_region->sr_first_mapping = (mach_vm_offset_t) -1; |
686 | ||
687 | /* grab a reference for the caller */ | |
688 | shared_region->sr_ref_count = 1; | |
689 | ||
690 | done: | |
691 | if (shared_region) { | |
692 | SHARED_REGION_TRACE_INFO( | |
693 | ("shared_region: create(root=%p,cpu=%d,64bit=%d," | |
694 | "base=0x%llx,size=0x%llx) <- " | |
695 | "%p mem=(%p,%p) map=%p pmap=%p\n", | |
696 | root_dir, cputype, is_64bit, (long long)base_address, | |
697 | (long long)size, shared_region, | |
698 | mem_entry_port, mem_entry, sub_map, sub_map->pmap)); | |
699 | } else { | |
700 | SHARED_REGION_TRACE_INFO( | |
701 | ("shared_region: create(root=%p,cpu=%d,64bit=%d," | |
702 | "base=0x%llx,size=0x%llx) <- NULL", | |
703 | root_dir, cputype, is_64bit, (long long)base_address, | |
704 | (long long)size)); | |
705 | } | |
706 | return shared_region; | |
707 | } | |
708 | ||
709 | /* | |
710 | * Destroy a now-unused shared region. | |
711 | * The shared region is no longer in the queue and can not be looked up. | |
712 | */ | |
713 | static void | |
714 | vm_shared_region_destroy( | |
715 | vm_shared_region_t shared_region) | |
716 | { | |
717 | vm_named_entry_t mem_entry; | |
718 | vm_map_t map; | |
719 | ||
720 | SHARED_REGION_TRACE_INFO( | |
721 | ("shared_region: -> destroy(%p) (root=%p,cpu=%d,64bit=%d)\n", | |
722 | shared_region, | |
723 | shared_region->sr_root_dir, | |
724 | shared_region->sr_cpu_type, | |
725 | shared_region->sr_64bit)); | |
726 | ||
727 | assert(shared_region->sr_ref_count == 0); | |
728 | assert(!shared_region->sr_persists); | |
729 | ||
730 | mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject; | |
731 | assert(mem_entry->is_sub_map); | |
732 | assert(!mem_entry->internal); | |
733 | assert(!mem_entry->is_pager); | |
734 | map = mem_entry->backing.map; | |
735 | ||
736 | /* | |
737 | * Clean up the pmap first. The virtual addresses that were | |
738 | * entered in this possibly "nested" pmap may have different values | |
739 | * than the VM map's min and max offsets, if the VM sub map was | |
740 | * mapped at a non-zero offset in the processes' main VM maps, which | |
741 | * is usually the case, so the clean-up we do in vm_map_destroy() would | |
742 | * not be enough. | |
743 | */ | |
744 | if (map->pmap) { | |
745 | pmap_remove(map->pmap, | |
746 | shared_region->sr_base_address, | |
747 | (shared_region->sr_base_address + | |
748 | shared_region->sr_size)); | |
749 | } | |
750 | ||
751 | /* | |
752 | * Release our (one and only) handle on the memory entry. | |
753 | * This will generate a no-senders notification, which will be processed | |
754 | * by ipc_kobject_notify(), which will release the one and only | |
755 | * reference on the memory entry and cause it to be destroyed, along | |
756 | * with the VM sub map and its pmap. | |
757 | */ | |
758 | mach_memory_entry_port_release(shared_region->sr_mem_entry); | |
759 | mem_entry = NULL; | |
760 | shared_region->sr_mem_entry = IPC_PORT_NULL; | |
761 | ||
b0d623f7 A |
762 | if (shared_region->sr_timer_call) { |
763 | thread_call_free(shared_region->sr_timer_call); | |
764 | } | |
765 | ||
6d2010ae A |
766 | if ((slide_info.slide_info_entry != NULL) && (slide_info.sr == shared_region)) { |
767 | kmem_free(kernel_map, | |
768 | (vm_offset_t) slide_info.slide_info_entry, | |
769 | (vm_size_t) slide_info.slide_info_size); | |
770 | vm_object_deallocate(slide_info.slide_object); | |
771 | slide_info.slide_object = NULL; | |
772 | slide_info.start = 0; | |
773 | slide_info.end = 0; | |
774 | slide_info.slide = 0; | |
775 | slide_info.sr = NULL; | |
776 | slide_info.slide_info_entry = NULL; | |
777 | slide_info.slide_info_size = 0; | |
778 | shared_region_completed_slide = FALSE; | |
779 | } | |
780 | ||
2d21ac55 A |
781 | /* release the shared region structure... */ |
782 | kfree(shared_region, sizeof (*shared_region)); | |
6d2010ae | 783 | |
2d21ac55 A |
784 | SHARED_REGION_TRACE_DEBUG( |
785 | ("shared_region: destroy(%p) <-\n", | |
786 | shared_region)); | |
787 | shared_region = NULL; | |
788 | ||
789 | } | |
790 | ||
791 | /* | |
792 | * Gets the address of the first (in time) mapping in the shared region. | |
793 | */ | |
794 | kern_return_t | |
795 | vm_shared_region_start_address( | |
796 | vm_shared_region_t shared_region, | |
797 | mach_vm_offset_t *start_address) | |
798 | { | |
799 | kern_return_t kr; | |
800 | mach_vm_offset_t sr_base_address; | |
801 | mach_vm_offset_t sr_first_mapping; | |
802 | ||
803 | SHARED_REGION_TRACE_DEBUG( | |
804 | ("shared_region: -> start_address(%p)\n", | |
805 | shared_region)); | |
806 | assert(shared_region->sr_ref_count > 1); | |
807 | ||
808 | vm_shared_region_lock(); | |
809 | ||
810 | /* | |
811 | * Wait if there's another thread establishing a mapping | |
812 | * in this shared region right when we're looking at it. | |
813 | * We want a consistent view of the map... | |
814 | */ | |
815 | while (shared_region->sr_mapping_in_progress) { | |
816 | /* wait for our turn... */ | |
817 | assert(shared_region->sr_ref_count > 1); | |
818 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, | |
819 | THREAD_UNINT); | |
820 | } | |
821 | assert(! shared_region->sr_mapping_in_progress); | |
822 | assert(shared_region->sr_ref_count > 1); | |
823 | ||
824 | sr_base_address = shared_region->sr_base_address; | |
825 | sr_first_mapping = shared_region->sr_first_mapping; | |
826 | ||
827 | if (sr_first_mapping == (mach_vm_offset_t) -1) { | |
828 | /* shared region is empty */ | |
829 | kr = KERN_INVALID_ADDRESS; | |
830 | } else { | |
831 | kr = KERN_SUCCESS; | |
832 | *start_address = sr_base_address + sr_first_mapping; | |
833 | } | |
834 | ||
835 | vm_shared_region_unlock(); | |
836 | ||
837 | SHARED_REGION_TRACE_DEBUG( | |
838 | ("shared_region: start_address(%p) <- 0x%llx\n", | |
839 | shared_region, (long long)shared_region->sr_base_address)); | |
840 | ||
841 | return kr; | |
842 | } | |
6d2010ae A |
843 | |
844 | void | |
845 | vm_shared_region_undo_mappings( | |
846 | vm_map_t sr_map, | |
847 | mach_vm_offset_t sr_base_address, | |
848 | struct shared_file_mapping_np *mappings, | |
849 | unsigned int mappings_count) | |
850 | { | |
851 | unsigned int j = 0; | |
852 | vm_shared_region_t shared_region = NULL; | |
853 | boolean_t reset_shared_region_state = FALSE; | |
316670eb | 854 | |
6d2010ae A |
855 | shared_region = vm_shared_region_get(current_task()); |
856 | if (shared_region == NULL) { | |
316670eb | 857 | printf("Failed to undo mappings because of NULL shared region.\n"); |
6d2010ae A |
858 | return; |
859 | } | |
316670eb | 860 | |
6d2010ae A |
861 | |
862 | if (sr_map == NULL) { | |
863 | ipc_port_t sr_handle; | |
864 | vm_named_entry_t sr_mem_entry; | |
865 | ||
866 | vm_shared_region_lock(); | |
867 | assert(shared_region->sr_ref_count > 1); | |
868 | ||
869 | while (shared_region->sr_mapping_in_progress) { | |
870 | /* wait for our turn... */ | |
871 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, | |
872 | THREAD_UNINT); | |
873 | } | |
874 | assert(! shared_region->sr_mapping_in_progress); | |
875 | assert(shared_region->sr_ref_count > 1); | |
876 | /* let others know we're working in this shared region */ | |
877 | shared_region->sr_mapping_in_progress = TRUE; | |
878 | ||
879 | vm_shared_region_unlock(); | |
880 | ||
881 | reset_shared_region_state = TRUE; | |
882 | ||
883 | /* no need to lock because this data is never modified... */ | |
884 | sr_handle = shared_region->sr_mem_entry; | |
885 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; | |
886 | sr_map = sr_mem_entry->backing.map; | |
887 | sr_base_address = shared_region->sr_base_address; | |
888 | } | |
889 | /* | |
890 | * Undo the mappings we've established so far. | |
891 | */ | |
892 | for (j = 0; j < mappings_count; j++) { | |
893 | kern_return_t kr2; | |
894 | ||
895 | if (mappings[j].sfm_size == 0) { | |
896 | /* | |
897 | * We didn't establish this | |
898 | * mapping, so nothing to undo. | |
899 | */ | |
900 | continue; | |
901 | } | |
902 | SHARED_REGION_TRACE_INFO( | |
903 | ("shared_region: mapping[%d]: " | |
904 | "address:0x%016llx " | |
905 | "size:0x%016llx " | |
906 | "offset:0x%016llx " | |
907 | "maxprot:0x%x prot:0x%x: " | |
908 | "undoing...\n", | |
909 | j, | |
910 | (long long)mappings[j].sfm_address, | |
911 | (long long)mappings[j].sfm_size, | |
912 | (long long)mappings[j].sfm_file_offset, | |
913 | mappings[j].sfm_max_prot, | |
914 | mappings[j].sfm_init_prot)); | |
915 | kr2 = mach_vm_deallocate( | |
916 | sr_map, | |
917 | (mappings[j].sfm_address - | |
918 | sr_base_address), | |
919 | mappings[j].sfm_size); | |
920 | assert(kr2 == KERN_SUCCESS); | |
921 | } | |
922 | ||
923 | /* | |
924 | * This is how check_np() knows if the shared region | |
925 | * is mapped. So clear it here. | |
926 | */ | |
927 | shared_region->sr_first_mapping = (mach_vm_offset_t) -1; | |
928 | ||
929 | if (reset_shared_region_state) { | |
930 | vm_shared_region_lock(); | |
931 | assert(shared_region->sr_ref_count > 1); | |
932 | assert(shared_region->sr_mapping_in_progress); | |
933 | /* we're done working on that shared region */ | |
934 | shared_region->sr_mapping_in_progress = FALSE; | |
935 | thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); | |
936 | vm_shared_region_unlock(); | |
937 | reset_shared_region_state = FALSE; | |
938 | } | |
939 | ||
940 | vm_shared_region_deallocate(shared_region); | |
941 | } | |
942 | ||
2d21ac55 A |
943 | /* |
944 | * Establish some mappings of a file in the shared region. | |
945 | * This is used by "dyld" via the shared_region_map_np() system call | |
946 | * to populate the shared region with the appropriate shared cache. | |
947 | * | |
948 | * One could also call it several times to incrementally load several | |
949 | * libraries, as long as they do not overlap. | |
950 | * It will return KERN_SUCCESS if the mappings were successfully established | |
951 | * or if they were already established identically by another process. | |
952 | */ | |
953 | kern_return_t | |
954 | vm_shared_region_map_file( | |
955 | vm_shared_region_t shared_region, | |
956 | unsigned int mappings_count, | |
957 | struct shared_file_mapping_np *mappings, | |
958 | memory_object_control_t file_control, | |
959 | memory_object_size_t file_size, | |
6d2010ae A |
960 | void *root_dir, |
961 | struct shared_file_mapping_np *mapping_to_slide) | |
2d21ac55 A |
962 | { |
963 | kern_return_t kr; | |
964 | vm_object_t file_object; | |
965 | ipc_port_t sr_handle; | |
966 | vm_named_entry_t sr_mem_entry; | |
967 | vm_map_t sr_map; | |
968 | mach_vm_offset_t sr_base_address; | |
969 | unsigned int i; | |
970 | mach_port_t map_port; | |
316670eb | 971 | vm_map_offset_t target_address; |
4a3eedf9 A |
972 | vm_object_t object; |
973 | vm_object_size_t obj_size; | |
6d2010ae | 974 | boolean_t found_mapping_to_slide = FALSE; |
4a3eedf9 | 975 | |
2d21ac55 A |
976 | |
977 | kr = KERN_SUCCESS; | |
978 | ||
979 | vm_shared_region_lock(); | |
980 | assert(shared_region->sr_ref_count > 1); | |
981 | ||
982 | if (shared_region->sr_root_dir != root_dir) { | |
983 | /* | |
984 | * This shared region doesn't match the current root | |
985 | * directory of this process. Deny the mapping to | |
986 | * avoid tainting the shared region with something that | |
987 | * doesn't quite belong into it. | |
988 | */ | |
989 | vm_shared_region_unlock(); | |
990 | kr = KERN_PROTECTION_FAILURE; | |
991 | goto done; | |
992 | } | |
993 | ||
994 | /* | |
995 | * Make sure we handle only one mapping at a time in a given | |
996 | * shared region, to avoid race conditions. This should not | |
997 | * happen frequently... | |
998 | */ | |
999 | while (shared_region->sr_mapping_in_progress) { | |
1000 | /* wait for our turn... */ | |
1001 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, | |
1002 | THREAD_UNINT); | |
1003 | } | |
1004 | assert(! shared_region->sr_mapping_in_progress); | |
1005 | assert(shared_region->sr_ref_count > 1); | |
1006 | /* let others know we're working in this shared region */ | |
1007 | shared_region->sr_mapping_in_progress = TRUE; | |
1008 | ||
1009 | vm_shared_region_unlock(); | |
1010 | ||
1011 | /* no need to lock because this data is never modified... */ | |
1012 | sr_handle = shared_region->sr_mem_entry; | |
1013 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; | |
1014 | sr_map = sr_mem_entry->backing.map; | |
1015 | sr_base_address = shared_region->sr_base_address; | |
1016 | ||
1017 | SHARED_REGION_TRACE_DEBUG( | |
1018 | ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n", | |
1019 | shared_region, mappings_count, mappings, | |
1020 | file_control, file_size)); | |
1021 | ||
1022 | /* get the VM object associated with the file to be mapped */ | |
1023 | file_object = memory_object_control_to_vm_object(file_control); | |
1024 | ||
1025 | /* establish the mappings */ | |
1026 | for (i = 0; i < mappings_count; i++) { | |
1027 | SHARED_REGION_TRACE_INFO( | |
1028 | ("shared_region: mapping[%d]: " | |
1029 | "address:0x%016llx size:0x%016llx offset:0x%016llx " | |
1030 | "maxprot:0x%x prot:0x%x\n", | |
1031 | i, | |
1032 | (long long)mappings[i].sfm_address, | |
1033 | (long long)mappings[i].sfm_size, | |
1034 | (long long)mappings[i].sfm_file_offset, | |
1035 | mappings[i].sfm_max_prot, | |
1036 | mappings[i].sfm_init_prot)); | |
1037 | ||
1038 | if (mappings[i].sfm_init_prot & VM_PROT_ZF) { | |
1039 | /* zero-filled memory */ | |
1040 | map_port = MACH_PORT_NULL; | |
1041 | } else { | |
1042 | /* file-backed memory */ | |
1043 | map_port = (ipc_port_t) file_object->pager; | |
1044 | } | |
6d2010ae A |
1045 | |
1046 | if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) { | |
1047 | /* | |
1048 | * This is the mapping that needs to be slid. | |
1049 | */ | |
1050 | if (found_mapping_to_slide == TRUE) { | |
1051 | SHARED_REGION_TRACE_INFO( | |
1052 | ("shared_region: mapping[%d]: " | |
1053 | "address:0x%016llx size:0x%016llx " | |
1054 | "offset:0x%016llx " | |
1055 | "maxprot:0x%x prot:0x%x " | |
1056 | "will not be slid as only one such mapping is allowed...\n", | |
1057 | i, | |
1058 | (long long)mappings[i].sfm_address, | |
1059 | (long long)mappings[i].sfm_size, | |
1060 | (long long)mappings[i].sfm_file_offset, | |
1061 | mappings[i].sfm_max_prot, | |
1062 | mappings[i].sfm_init_prot)); | |
1063 | } else { | |
1064 | if (mapping_to_slide != NULL) { | |
1065 | mapping_to_slide->sfm_file_offset = mappings[i].sfm_file_offset; | |
1066 | mapping_to_slide->sfm_size = mappings[i].sfm_size; | |
1067 | found_mapping_to_slide = TRUE; | |
1068 | } | |
1069 | } | |
1070 | } | |
2d21ac55 A |
1071 | |
1072 | /* mapping's address is relative to the shared region base */ | |
1073 | target_address = | |
1074 | mappings[i].sfm_address - sr_base_address; | |
1075 | ||
4a3eedf9 A |
1076 | /* establish that mapping, OK if it's "already" there */ |
1077 | if (map_port == MACH_PORT_NULL) { | |
1078 | /* | |
1079 | * We want to map some anonymous memory in a | |
1080 | * shared region. | |
1081 | * We have to create the VM object now, so that it | |
1082 | * can be mapped "copy-on-write". | |
1083 | */ | |
1084 | obj_size = vm_map_round_page(mappings[i].sfm_size); | |
1085 | object = vm_object_allocate(obj_size); | |
1086 | if (object == VM_OBJECT_NULL) { | |
1087 | kr = KERN_RESOURCE_SHORTAGE; | |
1088 | } else { | |
1089 | kr = vm_map_enter( | |
1090 | sr_map, | |
1091 | &target_address, | |
1092 | vm_map_round_page(mappings[i].sfm_size), | |
1093 | 0, | |
1094 | VM_FLAGS_FIXED | VM_FLAGS_ALREADY, | |
1095 | object, | |
1096 | 0, | |
1097 | TRUE, | |
1098 | mappings[i].sfm_init_prot & VM_PROT_ALL, | |
1099 | mappings[i].sfm_max_prot & VM_PROT_ALL, | |
1100 | VM_INHERIT_DEFAULT); | |
1101 | } | |
1102 | } else { | |
1103 | object = VM_OBJECT_NULL; /* no anonymous memory here */ | |
1104 | kr = vm_map_enter_mem_object( | |
1105 | sr_map, | |
1106 | &target_address, | |
1107 | vm_map_round_page(mappings[i].sfm_size), | |
1108 | 0, | |
1109 | VM_FLAGS_FIXED | VM_FLAGS_ALREADY, | |
1110 | map_port, | |
1111 | mappings[i].sfm_file_offset, | |
1112 | TRUE, | |
1113 | mappings[i].sfm_init_prot & VM_PROT_ALL, | |
1114 | mappings[i].sfm_max_prot & VM_PROT_ALL, | |
1115 | VM_INHERIT_DEFAULT); | |
2d21ac55 A |
1116 | } |
1117 | ||
4a3eedf9 A |
1118 | if (kr != KERN_SUCCESS) { |
1119 | if (map_port == MACH_PORT_NULL) { | |
1120 | /* | |
1121 | * Get rid of the VM object we just created | |
1122 | * but failed to map. | |
1123 | */ | |
1124 | vm_object_deallocate(object); | |
1125 | object = VM_OBJECT_NULL; | |
1126 | } | |
1127 | if (kr == KERN_MEMORY_PRESENT) { | |
1128 | /* | |
1129 | * This exact mapping was already there: | |
1130 | * that's fine. | |
1131 | */ | |
1132 | SHARED_REGION_TRACE_INFO( | |
1133 | ("shared_region: mapping[%d]: " | |
1134 | "address:0x%016llx size:0x%016llx " | |
1135 | "offset:0x%016llx " | |
1136 | "maxprot:0x%x prot:0x%x " | |
1137 | "already mapped...\n", | |
1138 | i, | |
1139 | (long long)mappings[i].sfm_address, | |
1140 | (long long)mappings[i].sfm_size, | |
1141 | (long long)mappings[i].sfm_file_offset, | |
1142 | mappings[i].sfm_max_prot, | |
1143 | mappings[i].sfm_init_prot)); | |
1144 | /* | |
1145 | * We didn't establish this mapping ourselves; | |
1146 | * let's reset its size, so that we do not | |
1147 | * attempt to undo it if an error occurs later. | |
1148 | */ | |
1149 | mappings[i].sfm_size = 0; | |
1150 | kr = KERN_SUCCESS; | |
1151 | } else { | |
4a3eedf9 A |
1152 | /* this mapping failed ! */ |
1153 | SHARED_REGION_TRACE_ERROR( | |
1154 | ("shared_region: mapping[%d]: " | |
1155 | "address:0x%016llx size:0x%016llx " | |
1156 | "offset:0x%016llx " | |
1157 | "maxprot:0x%x prot:0x%x failed 0x%x\n", | |
1158 | i, | |
1159 | (long long)mappings[i].sfm_address, | |
1160 | (long long)mappings[i].sfm_size, | |
1161 | (long long)mappings[i].sfm_file_offset, | |
1162 | mappings[i].sfm_max_prot, | |
1163 | mappings[i].sfm_init_prot, | |
1164 | kr)); | |
1165 | ||
6d2010ae | 1166 | vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i); |
4a3eedf9 A |
1167 | break; |
1168 | } | |
1169 | ||
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * Record the first (chronologically) mapping in | |
1174 | * this shared region. | |
1175 | * We're protected by "sr_mapping_in_progress" here, | |
1176 | * so no need to lock "shared_region". | |
1177 | */ | |
2d21ac55 A |
1178 | if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { |
1179 | shared_region->sr_first_mapping = target_address; | |
1180 | } | |
1181 | } | |
1182 | ||
1183 | vm_shared_region_lock(); | |
1184 | assert(shared_region->sr_ref_count > 1); | |
1185 | assert(shared_region->sr_mapping_in_progress); | |
1186 | /* we're done working on that shared region */ | |
1187 | shared_region->sr_mapping_in_progress = FALSE; | |
1188 | thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); | |
1189 | vm_shared_region_unlock(); | |
1190 | ||
1191 | done: | |
1192 | SHARED_REGION_TRACE_DEBUG( | |
1193 | ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n", | |
1194 | shared_region, mappings_count, mappings, | |
1195 | file_control, file_size, kr)); | |
1196 | return kr; | |
1197 | } | |
1198 | ||
1199 | /* | |
1200 | * Enter the appropriate shared region into "map" for "task". | |
1201 | * This involves looking up the shared region (and possibly creating a new | |
1202 | * one) for the desired environment, then mapping the VM sub map into the | |
1203 | * task's VM "map", with the appropriate level of pmap-nesting. | |
1204 | */ | |
1205 | kern_return_t | |
1206 | vm_shared_region_enter( | |
1207 | struct _vm_map *map, | |
1208 | struct task *task, | |
1209 | void *fsroot, | |
1210 | cpu_type_t cpu) | |
1211 | { | |
1212 | kern_return_t kr; | |
1213 | vm_shared_region_t shared_region; | |
1214 | vm_map_offset_t sr_address, sr_offset, target_address; | |
1215 | vm_map_size_t sr_size, mapping_size; | |
1216 | vm_map_offset_t sr_pmap_nesting_start; | |
1217 | vm_map_size_t sr_pmap_nesting_size; | |
1218 | ipc_port_t sr_handle; | |
1219 | boolean_t is_64bit; | |
1220 | ||
1221 | is_64bit = task_has_64BitAddr(task); | |
1222 | ||
1223 | SHARED_REGION_TRACE_DEBUG( | |
1224 | ("shared_region: -> " | |
1225 | "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d)\n", | |
1226 | map, task, fsroot, cpu, is_64bit)); | |
1227 | ||
1228 | /* lookup (create if needed) the shared region for this environment */ | |
1229 | shared_region = vm_shared_region_lookup(fsroot, cpu, is_64bit); | |
1230 | if (shared_region == NULL) { | |
1231 | /* this should not happen ! */ | |
1232 | SHARED_REGION_TRACE_ERROR( | |
1233 | ("shared_region: -> " | |
1234 | "enter(map=%p,task=%p,root=%p,cpu=%d,64bit=%d): " | |
1235 | "lookup failed !\n", | |
1236 | map, task, fsroot, cpu, is_64bit)); | |
1237 | //panic("shared_region_enter: lookup failed\n"); | |
1238 | return KERN_FAILURE; | |
1239 | } | |
1240 | ||
1241 | /* let the task use that shared region */ | |
1242 | vm_shared_region_set(task, shared_region); | |
1243 | ||
1244 | kr = KERN_SUCCESS; | |
1245 | /* no need to lock since this data is never modified */ | |
1246 | sr_address = shared_region->sr_base_address; | |
1247 | sr_size = shared_region->sr_size; | |
1248 | sr_handle = shared_region->sr_mem_entry; | |
1249 | sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start; | |
1250 | sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size; | |
1251 | ||
1252 | /* | |
1253 | * Start mapping the shared region's VM sub map into the task's VM map. | |
1254 | */ | |
1255 | sr_offset = 0; | |
1256 | ||
1257 | if (sr_pmap_nesting_start > sr_address) { | |
1258 | /* we need to map a range without pmap-nesting first */ | |
1259 | target_address = sr_address; | |
1260 | mapping_size = sr_pmap_nesting_start - sr_address; | |
1261 | kr = vm_map_enter_mem_object( | |
1262 | map, | |
1263 | &target_address, | |
1264 | mapping_size, | |
1265 | 0, | |
1266 | VM_FLAGS_FIXED, | |
1267 | sr_handle, | |
1268 | sr_offset, | |
1269 | TRUE, | |
1270 | VM_PROT_READ, | |
1271 | VM_PROT_ALL, | |
1272 | VM_INHERIT_SHARE); | |
1273 | if (kr != KERN_SUCCESS) { | |
1274 | SHARED_REGION_TRACE_ERROR( | |
1275 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1276 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1277 | map, task, fsroot, cpu, is_64bit, | |
1278 | (long long)target_address, | |
1279 | (long long)mapping_size, sr_handle, kr)); | |
1280 | goto done; | |
1281 | } | |
1282 | SHARED_REGION_TRACE_DEBUG( | |
1283 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1284 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1285 | map, task, fsroot, cpu, is_64bit, | |
1286 | (long long)target_address, (long long)mapping_size, | |
1287 | sr_handle, kr)); | |
1288 | sr_offset += mapping_size; | |
1289 | sr_size -= mapping_size; | |
1290 | } | |
1291 | /* | |
1292 | * We may need to map several pmap-nested portions, due to platform | |
1293 | * specific restrictions on pmap nesting. | |
1294 | * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias... | |
1295 | */ | |
1296 | for (; | |
1297 | sr_pmap_nesting_size > 0; | |
1298 | sr_offset += mapping_size, | |
1299 | sr_size -= mapping_size, | |
1300 | sr_pmap_nesting_size -= mapping_size) { | |
1301 | target_address = sr_address + sr_offset; | |
1302 | mapping_size = sr_pmap_nesting_size; | |
1303 | if (mapping_size > pmap_nesting_size_max) { | |
1304 | mapping_size = (vm_map_offset_t) pmap_nesting_size_max; | |
1305 | } | |
1306 | kr = vm_map_enter_mem_object( | |
1307 | map, | |
1308 | &target_address, | |
1309 | mapping_size, | |
1310 | 0, | |
1311 | (VM_FLAGS_FIXED | VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP)), | |
1312 | sr_handle, | |
1313 | sr_offset, | |
1314 | TRUE, | |
1315 | VM_PROT_READ, | |
1316 | VM_PROT_ALL, | |
1317 | VM_INHERIT_SHARE); | |
1318 | if (kr != KERN_SUCCESS) { | |
1319 | SHARED_REGION_TRACE_ERROR( | |
1320 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1321 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1322 | map, task, fsroot, cpu, is_64bit, | |
1323 | (long long)target_address, | |
1324 | (long long)mapping_size, sr_handle, kr)); | |
1325 | goto done; | |
1326 | } | |
1327 | SHARED_REGION_TRACE_DEBUG( | |
1328 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1329 | "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1330 | map, task, fsroot, cpu, is_64bit, | |
1331 | (long long)target_address, (long long)mapping_size, | |
1332 | sr_handle, kr)); | |
1333 | } | |
1334 | if (sr_size > 0) { | |
1335 | /* and there's some left to be mapped without pmap-nesting */ | |
1336 | target_address = sr_address + sr_offset; | |
1337 | mapping_size = sr_size; | |
1338 | kr = vm_map_enter_mem_object( | |
1339 | map, | |
1340 | &target_address, | |
1341 | mapping_size, | |
1342 | 0, | |
1343 | VM_FLAGS_FIXED, | |
1344 | sr_handle, | |
1345 | sr_offset, | |
1346 | TRUE, | |
1347 | VM_PROT_READ, | |
1348 | VM_PROT_ALL, | |
1349 | VM_INHERIT_SHARE); | |
1350 | if (kr != KERN_SUCCESS) { | |
1351 | SHARED_REGION_TRACE_ERROR( | |
1352 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1353 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1354 | map, task, fsroot, cpu, is_64bit, | |
1355 | (long long)target_address, | |
1356 | (long long)mapping_size, sr_handle, kr)); | |
1357 | goto done; | |
1358 | } | |
1359 | SHARED_REGION_TRACE_DEBUG( | |
1360 | ("shared_region: enter(%p,%p,%p,%d,%d): " | |
1361 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", | |
1362 | map, task, fsroot, cpu, is_64bit, | |
1363 | (long long)target_address, (long long)mapping_size, | |
1364 | sr_handle, kr)); | |
1365 | sr_offset += mapping_size; | |
1366 | sr_size -= mapping_size; | |
1367 | } | |
1368 | assert(sr_size == 0); | |
1369 | ||
1370 | done: | |
1371 | SHARED_REGION_TRACE_DEBUG( | |
1372 | ("shared_region: enter(%p,%p,%p,%d,%d) <- 0x%x\n", | |
1373 | map, task, fsroot, cpu, is_64bit, kr)); | |
1374 | return kr; | |
1375 | } | |
1376 | ||
6d2010ae A |
1377 | #define SANE_SLIDE_INFO_SIZE (1024*1024) /*Can be changed if needed*/ |
1378 | struct vm_shared_region_slide_info slide_info; | |
1379 | ||
1380 | kern_return_t | |
1381 | vm_shared_region_sliding_valid(uint32_t slide) { | |
1382 | ||
1383 | kern_return_t kr = KERN_SUCCESS; | |
1384 | ||
1385 | if ((shared_region_completed_slide == TRUE) && slide) { | |
1386 | if (slide != slide_info.slide) { | |
316670eb | 1387 | printf("Only one shared region can be slid\n"); |
6d2010ae A |
1388 | kr = KERN_FAILURE; |
1389 | } else if (slide == slide_info.slide) { | |
1390 | /* | |
1391 | * Request for sliding when we've | |
1392 | * already done it with exactly the | |
1393 | * same slide value before. | |
1394 | * This isn't wrong technically but | |
1395 | * we don't want to slide again and | |
1396 | * so we return this value. | |
1397 | */ | |
1398 | kr = KERN_INVALID_ARGUMENT; | |
1399 | } | |
1400 | } | |
1401 | return kr; | |
1402 | } | |
1403 | ||
1404 | kern_return_t | |
1405 | vm_shared_region_slide_init( | |
1406 | mach_vm_size_t slide_info_size, | |
1407 | mach_vm_offset_t start, | |
1408 | mach_vm_size_t size, | |
1409 | uint32_t slide, | |
1410 | memory_object_control_t sr_file_control) | |
1411 | { | |
1412 | kern_return_t kr = KERN_SUCCESS; | |
1413 | vm_object_t object = VM_OBJECT_NULL; | |
1414 | vm_object_offset_t offset = 0; | |
1415 | ||
1416 | vm_map_t map =NULL, cur_map = NULL; | |
1417 | boolean_t is_map_locked = FALSE; | |
1418 | ||
1419 | if ((kr = vm_shared_region_sliding_valid(slide)) != KERN_SUCCESS) { | |
1420 | if (kr == KERN_INVALID_ARGUMENT) { | |
1421 | /* | |
1422 | * This will happen if we request sliding again | |
1423 | * with the same slide value that was used earlier | |
1424 | * for the very first sliding. | |
1425 | */ | |
1426 | kr = KERN_SUCCESS; | |
1427 | } | |
1428 | return kr; | |
1429 | } | |
1430 | ||
1431 | if (slide_info_size > SANE_SLIDE_INFO_SIZE) { | |
316670eb | 1432 | printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size); |
6d2010ae A |
1433 | kr = KERN_FAILURE; |
1434 | return kr; | |
1435 | } | |
1436 | ||
1437 | if (sr_file_control != MEMORY_OBJECT_CONTROL_NULL) { | |
1438 | ||
1439 | object = memory_object_control_to_vm_object(sr_file_control); | |
1440 | vm_object_reference(object); | |
1441 | offset = start; | |
1442 | ||
1443 | vm_object_lock_shared(object); | |
1444 | ||
1445 | } else { | |
1446 | /* | |
1447 | * Remove this entire "else" block and all "map" references | |
1448 | * once we get rid of the shared_region_slide_np() | |
1449 | * system call. | |
1450 | */ | |
1451 | vm_map_entry_t entry = VM_MAP_ENTRY_NULL; | |
1452 | map = current_map(); | |
1453 | vm_map_lock_read(map); | |
1454 | is_map_locked = TRUE; | |
1455 | Retry: | |
1456 | cur_map = map; | |
1457 | if(!vm_map_lookup_entry(map, start, &entry)) { | |
1458 | kr = KERN_INVALID_ARGUMENT; | |
1459 | } else { | |
1460 | vm_object_t shadow_obj = VM_OBJECT_NULL; | |
1461 | ||
1462 | if (entry->is_sub_map == TRUE) { | |
1463 | map = entry->object.sub_map; | |
1464 | start -= entry->vme_start; | |
1465 | start += entry->offset; | |
1466 | vm_map_lock_read(map); | |
1467 | vm_map_unlock_read(cur_map); | |
1468 | goto Retry; | |
1469 | } else { | |
1470 | object = entry->object.vm_object; | |
1471 | offset = (start - entry->vme_start) + entry->offset; | |
1472 | } | |
1473 | ||
1474 | vm_object_lock_shared(object); | |
1475 | while (object->shadow != VM_OBJECT_NULL) { | |
1476 | shadow_obj = object->shadow; | |
1477 | vm_object_lock_shared(shadow_obj); | |
1478 | vm_object_unlock(object); | |
1479 | object = shadow_obj; | |
1480 | } | |
1481 | } | |
1482 | } | |
1483 | ||
1484 | if (object->internal == TRUE) { | |
1485 | kr = KERN_INVALID_ADDRESS; | |
1486 | } else { | |
1487 | kr = kmem_alloc(kernel_map, | |
1488 | (vm_offset_t *) &slide_info.slide_info_entry, | |
1489 | (vm_size_t) slide_info_size); | |
1490 | if (kr == KERN_SUCCESS) { | |
1491 | slide_info.slide_info_size = slide_info_size; | |
1492 | slide_info.slide_object = object; | |
1493 | slide_info.start = offset; | |
1494 | slide_info.end = slide_info.start + size; | |
1495 | slide_info.slide = slide; | |
1496 | slide_info.sr = vm_shared_region_get(current_task()); | |
1497 | /* | |
1498 | * We want to keep the above reference on the shared region | |
1499 | * because we have a pointer to it in the slide_info. | |
1500 | * | |
1501 | * If we want to have this region get deallocated/freed | |
1502 | * then we will have to make sure that we msync(..MS_INVALIDATE..) | |
1503 | * the pages associated with this shared region. Those pages would | |
1504 | * have been slid with an older slide value. | |
1505 | * | |
1506 | * vm_shared_region_deallocate(slide_info.sr); | |
1507 | */ | |
1508 | shared_region_completed_slide = TRUE; | |
1509 | } else { | |
1510 | kr = KERN_FAILURE; | |
1511 | } | |
1512 | } | |
1513 | vm_object_unlock(object); | |
1514 | ||
1515 | if (is_map_locked == TRUE) { | |
1516 | vm_map_unlock_read(map); | |
1517 | } | |
1518 | return kr; | |
1519 | } | |
1520 | ||
1521 | void* | |
1522 | vm_shared_region_get_slide_info(void) { | |
1523 | return (void*)&slide_info; | |
1524 | } | |
1525 | ||
1526 | void* | |
1527 | vm_shared_region_get_slide_info_entry(void) { | |
1528 | return (void*)slide_info.slide_info_entry; | |
1529 | } | |
1530 | ||
1531 | ||
1532 | kern_return_t | |
1533 | vm_shared_region_slide_sanity_check(void) | |
1534 | { | |
1535 | uint32_t pageIndex=0; | |
1536 | uint16_t entryIndex=0; | |
1537 | uint16_t *toc = NULL; | |
1538 | vm_shared_region_slide_info_entry_t s_info; | |
1539 | kern_return_t kr; | |
1540 | ||
1541 | s_info = vm_shared_region_get_slide_info_entry(); | |
1542 | toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); | |
1543 | ||
1544 | kr = mach_vm_protect(kernel_map, | |
1545 | (mach_vm_offset_t)(vm_offset_t) slide_info.slide_info_entry, | |
1546 | (mach_vm_size_t) slide_info.slide_info_size, | |
1547 | VM_PROT_READ, TRUE); | |
1548 | if (kr != KERN_SUCCESS) { | |
1549 | panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr); | |
1550 | } | |
1551 | ||
1552 | for (;pageIndex < s_info->toc_count; pageIndex++) { | |
1553 | ||
1554 | entryIndex = (uint16_t)(toc[pageIndex]); | |
1555 | ||
1556 | if (entryIndex >= s_info->entry_count) { | |
1557 | printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count); | |
1558 | goto fail; | |
1559 | } | |
1560 | ||
1561 | } | |
1562 | return KERN_SUCCESS; | |
1563 | fail: | |
1564 | if (slide_info.slide_info_entry != NULL) { | |
1565 | kmem_free(kernel_map, | |
1566 | (vm_offset_t) slide_info.slide_info_entry, | |
1567 | (vm_size_t) slide_info.slide_info_size); | |
1568 | vm_object_deallocate(slide_info.slide_object); | |
1569 | slide_info.slide_object = NULL; | |
1570 | slide_info.start = 0; | |
1571 | slide_info.end = 0; | |
1572 | slide_info.slide = 0; | |
1573 | slide_info.slide_info_entry = NULL; | |
1574 | slide_info.slide_info_size = 0; | |
1575 | shared_region_completed_slide = FALSE; | |
1576 | } | |
1577 | return KERN_FAILURE; | |
1578 | } | |
1579 | ||
1580 | kern_return_t | |
1581 | vm_shared_region_slide(vm_offset_t vaddr, uint32_t pageIndex) | |
1582 | { | |
1583 | uint16_t *toc = NULL; | |
1584 | slide_info_entry_toc_t bitmap = NULL; | |
1585 | uint32_t i=0, j=0; | |
1586 | uint8_t b = 0; | |
1587 | uint32_t slide = slide_info.slide; | |
1588 | int is_64 = task_has_64BitAddr(current_task()); | |
1589 | ||
1590 | vm_shared_region_slide_info_entry_t s_info = vm_shared_region_get_slide_info_entry(); | |
1591 | toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); | |
1592 | ||
1593 | if (pageIndex >= s_info->toc_count) { | |
1594 | printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count); | |
1595 | } else { | |
1596 | uint16_t entryIndex = (uint16_t)(toc[pageIndex]); | |
1597 | slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset); | |
1598 | ||
1599 | if (entryIndex >= s_info->entry_count) { | |
1600 | printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count); | |
1601 | } else { | |
1602 | bitmap = &slide_info_entries[entryIndex]; | |
1603 | ||
1604 | for(i=0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) { | |
1605 | b = bitmap->entry[i]; | |
1606 | if (b!=0) { | |
1607 | for (j=0; j <8; ++j) { | |
1608 | if (b & (1 <<j)){ | |
1609 | uint32_t *ptr_to_slide; | |
1610 | uint32_t old_value; | |
1611 | ||
1612 | ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr)+(sizeof(uint32_t)*(i*8 +j))); | |
1613 | old_value = *ptr_to_slide; | |
1614 | *ptr_to_slide += slide; | |
1615 | if (is_64 && *ptr_to_slide < old_value) { | |
1616 | /* | |
1617 | * We just slid the low 32 bits of a 64-bit pointer | |
1618 | * and it looks like there should have been a carry-over | |
1619 | * to the upper 32 bits. | |
1620 | * The sliding failed... | |
1621 | */ | |
316670eb A |
1622 | printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n", |
1623 | i, j, b, slide, old_value, *ptr_to_slide); | |
6d2010ae A |
1624 | return KERN_FAILURE; |
1625 | } | |
1626 | } | |
1627 | } | |
1628 | } | |
1629 | } | |
1630 | } | |
1631 | } | |
1632 | ||
1633 | return KERN_SUCCESS; | |
1634 | } | |
1635 | ||
2d21ac55 A |
1636 | /******************************************************************************/ |
1637 | /* Comm page support */ | |
1638 | /******************************************************************************/ | |
1639 | ||
1640 | ipc_port_t commpage32_handle = IPC_PORT_NULL; | |
1641 | ipc_port_t commpage64_handle = IPC_PORT_NULL; | |
1642 | vm_named_entry_t commpage32_entry = NULL; | |
1643 | vm_named_entry_t commpage64_entry = NULL; | |
1644 | vm_map_t commpage32_map = VM_MAP_NULL; | |
1645 | vm_map_t commpage64_map = VM_MAP_NULL; | |
1646 | ||
316670eb A |
1647 | ipc_port_t commpage_text32_handle = IPC_PORT_NULL; |
1648 | ipc_port_t commpage_text64_handle = IPC_PORT_NULL; | |
1649 | vm_named_entry_t commpage_text32_entry = NULL; | |
1650 | vm_named_entry_t commpage_text64_entry = NULL; | |
1651 | vm_map_t commpage_text32_map = VM_MAP_NULL; | |
1652 | vm_map_t commpage_text64_map = VM_MAP_NULL; | |
1653 | ||
1654 | user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START; | |
1655 | user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START; | |
1656 | ||
1657 | #if defined(__i386__) || defined(__x86_64__) | |
2d21ac55 A |
1658 | /* |
1659 | * Create a memory entry, VM submap and pmap for one commpage. | |
1660 | */ | |
1661 | static void | |
1662 | _vm_commpage_init( | |
1663 | ipc_port_t *handlep, | |
1664 | vm_map_size_t size) | |
1665 | { | |
1666 | kern_return_t kr; | |
1667 | vm_named_entry_t mem_entry; | |
1668 | vm_map_t new_map; | |
1669 | ||
1670 | SHARED_REGION_TRACE_DEBUG( | |
1671 | ("commpage: -> _init(0x%llx)\n", | |
1672 | (long long)size)); | |
1673 | ||
1674 | kr = mach_memory_entry_allocate(&mem_entry, | |
1675 | handlep); | |
1676 | if (kr != KERN_SUCCESS) { | |
1677 | panic("_vm_commpage_init: could not allocate mem_entry"); | |
1678 | } | |
316670eb | 1679 | new_map = vm_map_create(pmap_create(NULL, 0, FALSE), 0, size, TRUE); |
2d21ac55 A |
1680 | if (new_map == VM_MAP_NULL) { |
1681 | panic("_vm_commpage_init: could not allocate VM map"); | |
1682 | } | |
1683 | mem_entry->backing.map = new_map; | |
1684 | mem_entry->internal = TRUE; | |
1685 | mem_entry->is_sub_map = TRUE; | |
1686 | mem_entry->offset = 0; | |
1687 | mem_entry->protection = VM_PROT_ALL; | |
1688 | mem_entry->size = size; | |
1689 | ||
1690 | SHARED_REGION_TRACE_DEBUG( | |
1691 | ("commpage: _init(0x%llx) <- %p\n", | |
1692 | (long long)size, *handlep)); | |
1693 | } | |
316670eb A |
1694 | #endif |
1695 | ||
1696 | ||
1697 | /* | |
1698 | *Initialize the comm text pages at boot time | |
1699 | */ | |
1700 | extern u_int32_t random(void); | |
1701 | void | |
1702 | vm_commpage_text_init(void) | |
1703 | { | |
1704 | SHARED_REGION_TRACE_DEBUG( | |
1705 | ("commpage text: ->init()\n")); | |
1706 | #if defined(__i386__) || defined(__x86_64__) | |
1707 | /* create the 32 bit comm text page */ | |
1708 | unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */ | |
1709 | _vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH); | |
1710 | commpage_text32_entry = (vm_named_entry_t) commpage_text32_handle->ip_kobject; | |
1711 | commpage_text32_map = commpage_text32_entry->backing.map; | |
1712 | commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset); | |
1713 | /* XXX if (cpu_is_64bit_capable()) ? */ | |
1714 | /* create the 64-bit comm page */ | |
1715 | offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */ | |
1716 | _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH); | |
1717 | commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject; | |
1718 | commpage_text64_map = commpage_text64_entry->backing.map; | |
1719 | commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset); | |
1720 | ||
1721 | commpage_text_populate(); | |
1722 | #else | |
1723 | #error Unknown architecture. | |
1724 | #endif /* __i386__ || __x86_64__ */ | |
1725 | /* populate the routines in here */ | |
1726 | SHARED_REGION_TRACE_DEBUG( | |
1727 | ("commpage text: init() <-\n")); | |
1728 | ||
1729 | } | |
2d21ac55 A |
1730 | |
1731 | /* | |
1732 | * Initialize the comm pages at boot time. | |
1733 | */ | |
1734 | void | |
1735 | vm_commpage_init(void) | |
1736 | { | |
1737 | SHARED_REGION_TRACE_DEBUG( | |
1738 | ("commpage: -> init()\n")); | |
1739 | ||
316670eb | 1740 | #if defined(__i386__) || defined(__x86_64__) |
2d21ac55 A |
1741 | /* create the 32-bit comm page */ |
1742 | _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH); | |
1743 | commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject; | |
1744 | commpage32_map = commpage32_entry->backing.map; | |
1745 | ||
1746 | /* XXX if (cpu_is_64bit_capable()) ? */ | |
1747 | /* create the 64-bit comm page */ | |
1748 | _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH); | |
1749 | commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject; | |
1750 | commpage64_map = commpage64_entry->backing.map; | |
1751 | ||
316670eb A |
1752 | #endif /* __i386__ || __x86_64__ */ |
1753 | ||
2d21ac55 A |
1754 | /* populate them according to this specific platform */ |
1755 | commpage_populate(); | |
b0d623f7 A |
1756 | __commpage_setup = 1; |
1757 | #if defined(__i386__) || defined(__x86_64__) | |
1758 | if (__system_power_source == 0) { | |
1759 | post_sys_powersource_internal(0, 1); | |
1760 | } | |
1761 | #endif /* __i386__ || __x86_64__ */ | |
2d21ac55 A |
1762 | |
1763 | SHARED_REGION_TRACE_DEBUG( | |
1764 | ("commpage: init() <-\n")); | |
1765 | } | |
1766 | ||
1767 | /* | |
1768 | * Enter the appropriate comm page into the task's address space. | |
1769 | * This is called at exec() time via vm_map_exec(). | |
1770 | */ | |
1771 | kern_return_t | |
1772 | vm_commpage_enter( | |
1773 | vm_map_t map, | |
1774 | task_t task) | |
1775 | { | |
316670eb A |
1776 | ipc_port_t commpage_handle, commpage_text_handle; |
1777 | vm_map_offset_t commpage_address, objc_address, commpage_text_address; | |
1778 | vm_map_size_t commpage_size, objc_size, commpage_text_size; | |
2d21ac55 A |
1779 | int vm_flags; |
1780 | kern_return_t kr; | |
1781 | ||
1782 | SHARED_REGION_TRACE_DEBUG( | |
1783 | ("commpage: -> enter(%p,%p)\n", | |
1784 | map, task)); | |
1785 | ||
316670eb | 1786 | commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH; |
2d21ac55 A |
1787 | /* the comm page is likely to be beyond the actual end of the VM map */ |
1788 | vm_flags = VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX; | |
1789 | ||
1790 | /* select the appropriate comm page for this task */ | |
1791 | assert(! (task_has_64BitAddr(task) ^ vm_map_is_64bit(map))); | |
1792 | if (task_has_64BitAddr(task)) { | |
2d21ac55 A |
1793 | commpage_handle = commpage64_handle; |
1794 | commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS; | |
1795 | commpage_size = _COMM_PAGE64_AREA_LENGTH; | |
1796 | objc_size = _COMM_PAGE64_OBJC_SIZE; | |
1797 | objc_address = _COMM_PAGE64_OBJC_BASE; | |
316670eb A |
1798 | commpage_text_handle = commpage_text64_handle; |
1799 | commpage_text_address = (vm_map_offset_t) commpage_text64_location; | |
2d21ac55 A |
1800 | } else { |
1801 | commpage_handle = commpage32_handle; | |
1802 | commpage_address = | |
1803 | (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS; | |
1804 | commpage_size = _COMM_PAGE32_AREA_LENGTH; | |
1805 | objc_size = _COMM_PAGE32_OBJC_SIZE; | |
1806 | objc_address = _COMM_PAGE32_OBJC_BASE; | |
316670eb A |
1807 | commpage_text_handle = commpage_text32_handle; |
1808 | commpage_text_address = (vm_map_offset_t) commpage_text32_location; | |
2d21ac55 A |
1809 | } |
1810 | ||
1811 | if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 && | |
1812 | (commpage_size & (pmap_nesting_size_min - 1)) == 0) { | |
1813 | /* the commpage is properly aligned or sized for pmap-nesting */ | |
1814 | vm_flags |= VM_MAKE_TAG(VM_MEMORY_SHARED_PMAP); | |
1815 | } | |
2d21ac55 A |
1816 | /* map the comm page in the task's address space */ |
1817 | assert(commpage_handle != IPC_PORT_NULL); | |
1818 | kr = vm_map_enter_mem_object( | |
1819 | map, | |
1820 | &commpage_address, | |
1821 | commpage_size, | |
1822 | 0, | |
1823 | vm_flags, | |
1824 | commpage_handle, | |
1825 | 0, | |
1826 | FALSE, | |
316670eb A |
1827 | VM_PROT_READ, |
1828 | VM_PROT_READ, | |
2d21ac55 A |
1829 | VM_INHERIT_SHARE); |
1830 | if (kr != KERN_SUCCESS) { | |
1831 | SHARED_REGION_TRACE_ERROR( | |
1832 | ("commpage: enter(%p,0x%llx,0x%llx) " | |
1833 | "commpage %p mapping failed 0x%x\n", | |
1834 | map, (long long)commpage_address, | |
1835 | (long long)commpage_size, commpage_handle, kr)); | |
1836 | } | |
1837 | ||
316670eb A |
1838 | /* map the comm text page in the task's address space */ |
1839 | assert(commpage_text_handle != IPC_PORT_NULL); | |
1840 | kr = vm_map_enter_mem_object( | |
1841 | map, | |
1842 | &commpage_text_address, | |
1843 | commpage_text_size, | |
1844 | 0, | |
1845 | vm_flags, | |
1846 | commpage_text_handle, | |
1847 | 0, | |
1848 | FALSE, | |
1849 | VM_PROT_READ|VM_PROT_EXECUTE, | |
1850 | VM_PROT_READ|VM_PROT_EXECUTE, | |
1851 | VM_INHERIT_SHARE); | |
1852 | if (kr != KERN_SUCCESS) { | |
1853 | SHARED_REGION_TRACE_ERROR( | |
1854 | ("commpage text: enter(%p,0x%llx,0x%llx) " | |
1855 | "commpage text %p mapping failed 0x%x\n", | |
1856 | map, (long long)commpage_text_address, | |
1857 | (long long)commpage_text_size, commpage_text_handle, kr)); | |
1858 | } | |
1859 | ||
2d21ac55 A |
1860 | /* |
1861 | * Since we're here, we also pre-allocate some virtual space for the | |
1862 | * Objective-C run-time, if needed... | |
1863 | */ | |
1864 | if (objc_size != 0) { | |
1865 | kr = vm_map_enter_mem_object( | |
1866 | map, | |
1867 | &objc_address, | |
1868 | objc_size, | |
1869 | 0, | |
1870 | VM_FLAGS_FIXED | VM_FLAGS_BEYOND_MAX, | |
1871 | IPC_PORT_NULL, | |
1872 | 0, | |
1873 | FALSE, | |
1874 | VM_PROT_ALL, | |
1875 | VM_PROT_ALL, | |
1876 | VM_INHERIT_DEFAULT); | |
1877 | if (kr != KERN_SUCCESS) { | |
1878 | SHARED_REGION_TRACE_ERROR( | |
1879 | ("commpage: enter(%p,0x%llx,0x%llx) " | |
1880 | "objc mapping failed 0x%x\n", | |
1881 | map, (long long)objc_address, | |
1882 | (long long)objc_size, kr)); | |
1883 | } | |
1884 | } | |
1885 | ||
1886 | SHARED_REGION_TRACE_DEBUG( | |
1887 | ("commpage: enter(%p,%p) <- 0x%x\n", | |
1888 | map, task, kr)); | |
1889 | return kr; | |
1890 | } | |
b0d623f7 A |
1891 | |
1892 | ||
1893 | /* | |
1894 | * This is called from powermanagement code to let kernel know the current source of power. | |
1895 | * 0 if it is external source (connected to power ) | |
1896 | * 1 if it is internal power source ie battery | |
1897 | */ | |
1898 | void | |
1899 | #if defined(__i386__) || defined(__x86_64__) | |
1900 | post_sys_powersource(int i) | |
1901 | #else | |
1902 | post_sys_powersource(__unused int i) | |
1903 | #endif | |
1904 | { | |
1905 | #if defined(__i386__) || defined(__x86_64__) | |
1906 | post_sys_powersource_internal(i, 0); | |
1907 | #endif /* __i386__ || __x86_64__ */ | |
1908 | } | |
1909 | ||
1910 | ||
1911 | #if defined(__i386__) || defined(__x86_64__) | |
1912 | static void | |
1913 | post_sys_powersource_internal(int i, int internal) | |
1914 | { | |
1915 | if (internal == 0) | |
1916 | __system_power_source = i; | |
1917 | ||
1918 | if (__commpage_setup != 0) { | |
1919 | if (__system_power_source != 0) | |
1920 | commpage_set_spin_count(0); | |
1921 | else | |
1922 | commpage_set_spin_count(MP_SPIN_TRIES); | |
1923 | } | |
1924 | } | |
1925 | #endif /* __i386__ || __x86_64__ */ |