]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_map.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/vm_map.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Date: 1985
56 *
57 * Virtual memory mapping module.
58 */
59
60#include <cpus.h>
61#include <task_swapper.h>
62#include <mach_assert.h>
63
64#include <mach/kern_return.h>
65#include <mach/port.h>
66#include <mach/vm_attributes.h>
67#include <mach/vm_param.h>
68#include <mach/vm_behavior.h>
69#include <kern/assert.h>
70#include <kern/counters.h>
71#include <kern/zalloc.h>
72#include <vm/vm_init.h>
73#include <vm/vm_fault.h>
74#include <vm/vm_map.h>
75#include <vm/vm_object.h>
76#include <vm/vm_page.h>
77#include <vm/vm_kern.h>
78#include <ipc/ipc_port.h>
79#include <kern/sched_prim.h>
80#include <kern/misc_protos.h>
81#include <mach/vm_map_server.h>
82#include <mach/mach_host_server.h>
83#include <ddb/tr.h>
84#include <kern/xpr.h>
85
86/* Internal prototypes
87 */
88extern boolean_t vm_map_range_check(
89 vm_map_t map,
90 vm_offset_t start,
91 vm_offset_t end,
92 vm_map_entry_t *entry);
93
94extern vm_map_entry_t _vm_map_entry_create(
95 struct vm_map_header *map_header);
96
97extern void _vm_map_entry_dispose(
98 struct vm_map_header *map_header,
99 vm_map_entry_t entry);
100
101extern void vm_map_pmap_enter(
102 vm_map_t map,
103 vm_offset_t addr,
104 vm_offset_t end_addr,
105 vm_object_t object,
106 vm_object_offset_t offset,
107 vm_prot_t protection);
108
109extern void _vm_map_clip_end(
110 struct vm_map_header *map_header,
111 vm_map_entry_t entry,
112 vm_offset_t end);
113
114extern void vm_map_entry_delete(
115 vm_map_t map,
116 vm_map_entry_t entry);
117
118extern kern_return_t vm_map_delete(
119 vm_map_t map,
120 vm_offset_t start,
121 vm_offset_t end,
122 int flags);
123
124extern void vm_map_copy_steal_pages(
125 vm_map_copy_t copy);
126
127extern kern_return_t vm_map_copy_overwrite_unaligned(
128 vm_map_t dst_map,
129 vm_map_entry_t entry,
130 vm_map_copy_t copy,
131 vm_offset_t start);
132
133extern kern_return_t vm_map_copy_overwrite_aligned(
134 vm_map_t dst_map,
135 vm_map_entry_t tmp_entry,
136 vm_map_copy_t copy,
137 vm_offset_t start,
138 pmap_t pmap);
139
140extern kern_return_t vm_map_copyin_kernel_buffer(
141 vm_map_t src_map,
142 vm_offset_t src_addr,
143 vm_size_t len,
144 boolean_t src_destroy,
145 vm_map_copy_t *copy_result); /* OUT */
146
147extern kern_return_t vm_map_copyout_kernel_buffer(
148 vm_map_t map,
149 vm_offset_t *addr, /* IN/OUT */
150 vm_map_copy_t copy,
151 boolean_t overwrite);
152
1c79356b
A
153extern void vm_map_fork_share(
154 vm_map_t old_map,
155 vm_map_entry_t old_entry,
156 vm_map_t new_map);
157
158extern boolean_t vm_map_fork_copy(
159 vm_map_t old_map,
160 vm_map_entry_t *old_entry_p,
161 vm_map_t new_map);
162
163extern kern_return_t vm_remap_range_allocate(
164 vm_map_t map,
165 vm_offset_t *address, /* IN/OUT */
166 vm_size_t size,
167 vm_offset_t mask,
168 boolean_t anywhere,
169 vm_map_entry_t *map_entry); /* OUT */
170
171extern void _vm_map_clip_start(
172 struct vm_map_header *map_header,
173 vm_map_entry_t entry,
174 vm_offset_t start);
175
176void vm_region_top_walk(
177 vm_map_entry_t entry,
178 vm_region_top_info_t top);
179
180void vm_region_walk(
181 vm_map_entry_t entry,
182 vm_region_extended_info_t extended,
183 vm_object_offset_t offset,
184 vm_offset_t range,
185 vm_map_t map,
186 vm_offset_t va);
187
188/*
189 * Macros to copy a vm_map_entry. We must be careful to correctly
190 * manage the wired page count. vm_map_entry_copy() creates a new
191 * map entry to the same memory - the wired count in the new entry
192 * must be set to zero. vm_map_entry_copy_full() creates a new
193 * entry that is identical to the old entry. This preserves the
194 * wire count; it's used for map splitting and zone changing in
195 * vm_map_copyout.
196 */
197#define vm_map_entry_copy(NEW,OLD) \
198MACRO_BEGIN \
199 *(NEW) = *(OLD); \
200 (NEW)->is_shared = FALSE; \
201 (NEW)->needs_wakeup = FALSE; \
202 (NEW)->in_transition = FALSE; \
203 (NEW)->wired_count = 0; \
204 (NEW)->user_wired_count = 0; \
205MACRO_END
206
207#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
208
209/*
210 * Virtual memory maps provide for the mapping, protection,
211 * and sharing of virtual memory objects. In addition,
212 * this module provides for an efficient virtual copy of
213 * memory from one map to another.
214 *
215 * Synchronization is required prior to most operations.
216 *
217 * Maps consist of an ordered doubly-linked list of simple
218 * entries; a single hint is used to speed up lookups.
219 *
220 * Sharing maps have been deleted from this version of Mach.
221 * All shared objects are now mapped directly into the respective
222 * maps. This requires a change in the copy on write strategy;
223 * the asymmetric (delayed) strategy is used for shared temporary
224 * objects instead of the symmetric (shadow) strategy. All maps
225 * are now "top level" maps (either task map, kernel map or submap
226 * of the kernel map).
227 *
228 * Since portions of maps are specified by start/end addreses,
229 * which may not align with existing map entries, all
230 * routines merely "clip" entries to these start/end values.
231 * [That is, an entry is split into two, bordering at a
232 * start or end value.] Note that these clippings may not
233 * always be necessary (as the two resulting entries are then
234 * not changed); however, the clipping is done for convenience.
235 * No attempt is currently made to "glue back together" two
236 * abutting entries.
237 *
238 * The symmetric (shadow) copy strategy implements virtual copy
239 * by copying VM object references from one map to
240 * another, and then marking both regions as copy-on-write.
241 * It is important to note that only one writeable reference
242 * to a VM object region exists in any map when this strategy
243 * is used -- this means that shadow object creation can be
244 * delayed until a write operation occurs. The symmetric (delayed)
245 * strategy allows multiple maps to have writeable references to
246 * the same region of a vm object, and hence cannot delay creating
247 * its copy objects. See vm_object_copy_quickly() in vm_object.c.
248 * Copying of permanent objects is completely different; see
249 * vm_object_copy_strategically() in vm_object.c.
250 */
251
252zone_t vm_map_zone; /* zone for vm_map structures */
253zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */
254zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
255zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
256
257
258/*
259 * Placeholder object for submap operations. This object is dropped
260 * into the range by a call to vm_map_find, and removed when
261 * vm_map_submap creates the submap.
262 */
263
264vm_object_t vm_submap_object;
265
266/*
267 * vm_map_init:
268 *
269 * Initialize the vm_map module. Must be called before
270 * any other vm_map routines.
271 *
272 * Map and entry structures are allocated from zones -- we must
273 * initialize those zones.
274 *
275 * There are three zones of interest:
276 *
277 * vm_map_zone: used to allocate maps.
278 * vm_map_entry_zone: used to allocate map entries.
279 * vm_map_kentry_zone: used to allocate map entries for the kernel.
280 *
281 * The kernel allocates map entries from a special zone that is initially
282 * "crammed" with memory. It would be difficult (perhaps impossible) for
283 * the kernel to allocate more memory to a entry zone when it became
284 * empty since the very act of allocating memory implies the creation
285 * of a new entry.
286 */
287
288vm_offset_t map_data;
289vm_size_t map_data_size;
290vm_offset_t kentry_data;
291vm_size_t kentry_data_size;
292int kentry_count = 2048; /* to init kentry_data_size */
293
0b4e3aa0
A
294#define NO_COALESCE_LIMIT (1024 * 128)
295
1c79356b
A
296/*
297 * Threshold for aggressive (eager) page map entering for vm copyout
298 * operations. Any copyout larger will NOT be aggressively entered.
299 */
300vm_size_t vm_map_aggressive_enter_max; /* set by bootstrap */
301
302void
303vm_map_init(
304 void)
305{
306 vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 40*1024,
307 PAGE_SIZE, "maps");
308
309 vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry),
310 1024*1024, PAGE_SIZE*5,
311 "non-kernel map entries");
312
313 vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry),
314 kentry_data_size, kentry_data_size,
315 "kernel map entries");
316
317 vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy),
318 16*1024, PAGE_SIZE, "map copies");
319
320 /*
321 * Cram the map and kentry zones with initial data.
322 * Set kentry_zone non-collectible to aid zone_gc().
323 */
324 zone_change(vm_map_zone, Z_COLLECT, FALSE);
325 zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE);
326 zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE);
327 zcram(vm_map_zone, map_data, map_data_size);
328 zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
329}
330
331void
332vm_map_steal_memory(
333 void)
334{
335 map_data_size = round_page(10 * sizeof(struct vm_map));
336 map_data = pmap_steal_memory(map_data_size);
337
338#if 0
339 /*
340 * Limiting worst case: vm_map_kentry_zone needs to map each "available"
341 * physical page (i.e. that beyond the kernel image and page tables)
342 * individually; we guess at most one entry per eight pages in the
343 * real world. This works out to roughly .1 of 1% of physical memory,
344 * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
345 */
346#endif
347 kentry_count = pmap_free_pages() / 8;
348
349
350 kentry_data_size =
351 round_page(kentry_count * sizeof(struct vm_map_entry));
352 kentry_data = pmap_steal_memory(kentry_data_size);
353}
354
355/*
356 * vm_map_create:
357 *
358 * Creates and returns a new empty VM map with
359 * the given physical map structure, and having
360 * the given lower and upper address bounds.
361 */
362vm_map_t
363vm_map_create(
364 pmap_t pmap,
365 vm_offset_t min,
366 vm_offset_t max,
367 boolean_t pageable)
368{
369 register vm_map_t result;
370
371 result = (vm_map_t) zalloc(vm_map_zone);
372 if (result == VM_MAP_NULL)
373 panic("vm_map_create");
374
375 vm_map_first_entry(result) = vm_map_to_entry(result);
376 vm_map_last_entry(result) = vm_map_to_entry(result);
377 result->hdr.nentries = 0;
378 result->hdr.entries_pageable = pageable;
379
380 result->size = 0;
381 result->ref_count = 1;
382#if TASK_SWAPPER
383 result->res_count = 1;
384 result->sw_state = MAP_SW_IN;
385#endif /* TASK_SWAPPER */
386 result->pmap = pmap;
387 result->min_offset = min;
388 result->max_offset = max;
389 result->wiring_required = FALSE;
390 result->no_zero_fill = FALSE;
391 result->wait_for_space = FALSE;
392 result->first_free = vm_map_to_entry(result);
393 result->hint = vm_map_to_entry(result);
394 vm_map_lock_init(result);
395 mutex_init(&result->s_lock, ETAP_VM_RESULT);
396
397 return(result);
398}
399
400/*
401 * vm_map_entry_create: [ internal use only ]
402 *
403 * Allocates a VM map entry for insertion in the
404 * given map (or map copy). No fields are filled.
405 */
406#define vm_map_entry_create(map) \
407 _vm_map_entry_create(&(map)->hdr)
408
409#define vm_map_copy_entry_create(copy) \
410 _vm_map_entry_create(&(copy)->cpy_hdr)
411
412vm_map_entry_t
413_vm_map_entry_create(
414 register struct vm_map_header *map_header)
415{
416 register zone_t zone;
417 register vm_map_entry_t entry;
418
419 if (map_header->entries_pageable)
420 zone = vm_map_entry_zone;
421 else
422 zone = vm_map_kentry_zone;
423
424 entry = (vm_map_entry_t) zalloc(zone);
425 if (entry == VM_MAP_ENTRY_NULL)
426 panic("vm_map_entry_create");
427
428 return(entry);
429}
430
431/*
432 * vm_map_entry_dispose: [ internal use only ]
433 *
434 * Inverse of vm_map_entry_create.
435 */
436#define vm_map_entry_dispose(map, entry) \
437MACRO_BEGIN \
438 if((entry) == (map)->first_free) \
439 (map)->first_free = vm_map_to_entry(map); \
440 if((entry) == (map)->hint) \
441 (map)->hint = vm_map_to_entry(map); \
442 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
443MACRO_END
444
445#define vm_map_copy_entry_dispose(map, entry) \
446 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
447
448void
449_vm_map_entry_dispose(
450 register struct vm_map_header *map_header,
451 register vm_map_entry_t entry)
452{
453 register zone_t zone;
454
455 if (map_header->entries_pageable)
456 zone = vm_map_entry_zone;
457 else
458 zone = vm_map_kentry_zone;
459
460 zfree(zone, (vm_offset_t) entry);
461}
462
463boolean_t first_free_is_valid(vm_map_t map); /* forward */
464boolean_t first_free_check = FALSE;
465boolean_t
466first_free_is_valid(
467 vm_map_t map)
468{
469 vm_map_entry_t entry, next;
470
471 if (!first_free_check)
472 return TRUE;
473
474 entry = vm_map_to_entry(map);
475 next = entry->vme_next;
476 while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) ||
477 (trunc_page(next->vme_start) == trunc_page(entry->vme_start) &&
478 next != vm_map_to_entry(map))) {
479 entry = next;
480 next = entry->vme_next;
481 if (entry == vm_map_to_entry(map))
482 break;
483 }
484 if (map->first_free != entry) {
485 printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n",
486 map, map->first_free, entry);
487 return FALSE;
488 }
489 return TRUE;
490}
491
492/*
493 * UPDATE_FIRST_FREE:
494 *
495 * Updates the map->first_free pointer to the
496 * entry immediately before the first hole in the map.
497 * The map should be locked.
498 */
499#define UPDATE_FIRST_FREE(map, new_first_free) \
500MACRO_BEGIN \
501 vm_map_t UFF_map; \
502 vm_map_entry_t UFF_first_free; \
503 vm_map_entry_t UFF_next_entry; \
504 UFF_map = (map); \
505 UFF_first_free = (new_first_free); \
506 UFF_next_entry = UFF_first_free->vme_next; \
507 while (trunc_page(UFF_next_entry->vme_start) == \
508 trunc_page(UFF_first_free->vme_end) || \
509 (trunc_page(UFF_next_entry->vme_start) == \
510 trunc_page(UFF_first_free->vme_start) && \
511 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
512 UFF_first_free = UFF_next_entry; \
513 UFF_next_entry = UFF_first_free->vme_next; \
514 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
515 break; \
516 } \
517 UFF_map->first_free = UFF_first_free; \
518 assert(first_free_is_valid(UFF_map)); \
519MACRO_END
520
521/*
522 * vm_map_entry_{un,}link:
523 *
524 * Insert/remove entries from maps (or map copies).
525 */
526#define vm_map_entry_link(map, after_where, entry) \
527MACRO_BEGIN \
528 vm_map_t VMEL_map; \
529 vm_map_entry_t VMEL_entry; \
530 VMEL_map = (map); \
531 VMEL_entry = (entry); \
532 _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \
533 UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \
534MACRO_END
535
536
537#define vm_map_copy_entry_link(copy, after_where, entry) \
538 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
539
540#define _vm_map_entry_link(hdr, after_where, entry) \
541 MACRO_BEGIN \
542 (hdr)->nentries++; \
543 (entry)->vme_prev = (after_where); \
544 (entry)->vme_next = (after_where)->vme_next; \
545 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
546 MACRO_END
547
548#define vm_map_entry_unlink(map, entry) \
549MACRO_BEGIN \
550 vm_map_t VMEU_map; \
551 vm_map_entry_t VMEU_entry; \
552 vm_map_entry_t VMEU_first_free; \
553 VMEU_map = (map); \
554 VMEU_entry = (entry); \
555 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \
556 VMEU_first_free = VMEU_entry->vme_prev; \
557 else \
558 VMEU_first_free = VMEU_map->first_free; \
559 _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \
560 UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \
561MACRO_END
562
563#define vm_map_copy_entry_unlink(copy, entry) \
564 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
565
566#define _vm_map_entry_unlink(hdr, entry) \
567 MACRO_BEGIN \
568 (hdr)->nentries--; \
569 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
570 (entry)->vme_prev->vme_next = (entry)->vme_next; \
571 MACRO_END
572
573/*
574 * kernel_vm_map_reference:
575 *
576 * kernel internal export version for iokit and bsd components
577 * in lieu of component interface semantics.
578 *
579 */
580void
581kernel_vm_map_reference(
582 register vm_map_t map)
583{
584 if (map == VM_MAP_NULL)
585 return;
586
587 mutex_lock(&map->s_lock);
588#if TASK_SWAPPER
589 assert(map->res_count > 0);
590 assert(map->ref_count >= map->res_count);
591 map->res_count++;
592#endif
593 map->ref_count++;
594 mutex_unlock(&map->s_lock);
595}
596
597#if MACH_ASSERT && TASK_SWAPPER
598/*
599 * vm_map_reference:
600 *
601 * Adds valid reference and residence counts to the given map.
602 * The map must be in memory (i.e. non-zero residence count).
603 *
604 */
605void
606vm_map_reference(
607 register vm_map_t map)
608{
609 if (map == VM_MAP_NULL)
610 return;
611
612 mutex_lock(&map->s_lock);
613 assert(map->res_count > 0);
614 assert(map->ref_count >= map->res_count);
615 map->ref_count++;
616 map->res_count++;
617 mutex_unlock(&map->s_lock);
618}
619
620/*
621 * vm_map_res_reference:
622 *
623 * Adds another valid residence count to the given map.
624 *
625 * Map is locked so this function can be called from
626 * vm_map_swapin.
627 *
628 */
629void vm_map_res_reference(register vm_map_t map)
630{
631 /* assert map is locked */
632 assert(map->res_count >= 0);
633 assert(map->ref_count >= map->res_count);
634 if (map->res_count == 0) {
635 mutex_unlock(&map->s_lock);
636 vm_map_lock(map);
637 vm_map_swapin(map);
638 mutex_lock(&map->s_lock);
639 ++map->res_count;
640 vm_map_unlock(map);
641 } else
642 ++map->res_count;
643}
644
645/*
646 * vm_map_reference_swap:
647 *
648 * Adds valid reference and residence counts to the given map.
649 *
650 * The map may not be in memory (i.e. zero residence count).
651 *
652 */
653void vm_map_reference_swap(register vm_map_t map)
654{
655 assert(map != VM_MAP_NULL);
656 mutex_lock(&map->s_lock);
657 assert(map->res_count >= 0);
658 assert(map->ref_count >= map->res_count);
659 map->ref_count++;
660 vm_map_res_reference(map);
661 mutex_unlock(&map->s_lock);
662}
663
664/*
665 * vm_map_res_deallocate:
666 *
667 * Decrement residence count on a map; possibly causing swapout.
668 *
669 * The map must be in memory (i.e. non-zero residence count).
670 *
671 * The map is locked, so this function is callable from vm_map_deallocate.
672 *
673 */
674void vm_map_res_deallocate(register vm_map_t map)
675{
676 assert(map->res_count > 0);
677 if (--map->res_count == 0) {
678 mutex_unlock(&map->s_lock);
679 vm_map_lock(map);
680 vm_map_swapout(map);
681 vm_map_unlock(map);
682 mutex_lock(&map->s_lock);
683 }
684 assert(map->ref_count >= map->res_count);
685}
686#endif /* MACH_ASSERT && TASK_SWAPPER */
687
688/*
689 * vm_map_deallocate:
690 *
691 * Removes a reference from the specified map,
692 * destroying it if no references remain.
693 * The map should not be locked.
694 */
695void
696vm_map_deallocate(
697 register vm_map_t map)
698{
699 unsigned int ref;
700
701 if (map == VM_MAP_NULL)
702 return;
703
704 mutex_lock(&map->s_lock);
705 ref = --map->ref_count;
706 if (ref > 0) {
707 vm_map_res_deallocate(map);
708 mutex_unlock(&map->s_lock);
709 return;
710 }
711 assert(map->ref_count == 0);
712 mutex_unlock(&map->s_lock);
713
714#if TASK_SWAPPER
715 /*
716 * The map residence count isn't decremented here because
717 * the vm_map_delete below will traverse the entire map,
718 * deleting entries, and the residence counts on objects
719 * and sharing maps will go away then.
720 */
721#endif
722
723 vm_map_destroy(map);
724}
725
726/*
727 * vm_map_destroy:
728 *
729 * Actually destroy a map.
730 */
731void
732vm_map_destroy(
733 register vm_map_t map)
734{
735 vm_map_lock(map);
736 (void) vm_map_delete(map, map->min_offset,
737 map->max_offset, VM_MAP_NO_FLAGS);
738 vm_map_unlock(map);
739
740 pmap_destroy(map->pmap);
741
742 zfree(vm_map_zone, (vm_offset_t) map);
743}
744
745#if TASK_SWAPPER
746/*
747 * vm_map_swapin/vm_map_swapout
748 *
749 * Swap a map in and out, either referencing or releasing its resources.
750 * These functions are internal use only; however, they must be exported
751 * because they may be called from macros, which are exported.
752 *
753 * In the case of swapout, there could be races on the residence count,
754 * so if the residence count is up, we return, assuming that a
755 * vm_map_deallocate() call in the near future will bring us back.
756 *
757 * Locking:
758 * -- We use the map write lock for synchronization among races.
759 * -- The map write lock, and not the simple s_lock, protects the
760 * swap state of the map.
761 * -- If a map entry is a share map, then we hold both locks, in
762 * hierarchical order.
763 *
764 * Synchronization Notes:
765 * 1) If a vm_map_swapin() call happens while swapout in progress, it
766 * will block on the map lock and proceed when swapout is through.
767 * 2) A vm_map_reference() call at this time is illegal, and will
768 * cause a panic. vm_map_reference() is only allowed on resident
769 * maps, since it refuses to block.
770 * 3) A vm_map_swapin() call during a swapin will block, and
771 * proceeed when the first swapin is done, turning into a nop.
772 * This is the reason the res_count is not incremented until
773 * after the swapin is complete.
774 * 4) There is a timing hole after the checks of the res_count, before
775 * the map lock is taken, during which a swapin may get the lock
776 * before a swapout about to happen. If this happens, the swapin
777 * will detect the state and increment the reference count, causing
778 * the swapout to be a nop, thereby delaying it until a later
779 * vm_map_deallocate. If the swapout gets the lock first, then
780 * the swapin will simply block until the swapout is done, and
781 * then proceed.
782 *
783 * Because vm_map_swapin() is potentially an expensive operation, it
784 * should be used with caution.
785 *
786 * Invariants:
787 * 1) A map with a residence count of zero is either swapped, or
788 * being swapped.
789 * 2) A map with a non-zero residence count is either resident,
790 * or being swapped in.
791 */
792
793int vm_map_swap_enable = 1;
794
795void vm_map_swapin (vm_map_t map)
796{
797 register vm_map_entry_t entry;
798
799 if (!vm_map_swap_enable) /* debug */
800 return;
801
802 /*
803 * Map is locked
804 * First deal with various races.
805 */
806 if (map->sw_state == MAP_SW_IN)
807 /*
808 * we raced with swapout and won. Returning will incr.
809 * the res_count, turning the swapout into a nop.
810 */
811 return;
812
813 /*
814 * The residence count must be zero. If we raced with another
815 * swapin, the state would have been IN; if we raced with a
816 * swapout (after another competing swapin), we must have lost
817 * the race to get here (see above comment), in which case
818 * res_count is still 0.
819 */
820 assert(map->res_count == 0);
821
822 /*
823 * There are no intermediate states of a map going out or
824 * coming in, since the map is locked during the transition.
825 */
826 assert(map->sw_state == MAP_SW_OUT);
827
828 /*
829 * We now operate upon each map entry. If the entry is a sub-
830 * or share-map, we call vm_map_res_reference upon it.
831 * If the entry is an object, we call vm_object_res_reference
832 * (this may iterate through the shadow chain).
833 * Note that we hold the map locked the entire time,
834 * even if we get back here via a recursive call in
835 * vm_map_res_reference.
836 */
837 entry = vm_map_first_entry(map);
838
839 while (entry != vm_map_to_entry(map)) {
840 if (entry->object.vm_object != VM_OBJECT_NULL) {
841 if (entry->is_sub_map) {
842 vm_map_t lmap = entry->object.sub_map;
843 mutex_lock(&lmap->s_lock);
844 vm_map_res_reference(lmap);
845 mutex_unlock(&lmap->s_lock);
846 } else {
847 vm_object_t object = entry->object.vm_object;
848 vm_object_lock(object);
849 /*
850 * This call may iterate through the
851 * shadow chain.
852 */
853 vm_object_res_reference(object);
854 vm_object_unlock(object);
855 }
856 }
857 entry = entry->vme_next;
858 }
859 assert(map->sw_state == MAP_SW_OUT);
860 map->sw_state = MAP_SW_IN;
861}
862
863void vm_map_swapout(vm_map_t map)
864{
865 register vm_map_entry_t entry;
866
867 /*
868 * Map is locked
869 * First deal with various races.
870 * If we raced with a swapin and lost, the residence count
871 * will have been incremented to 1, and we simply return.
872 */
873 mutex_lock(&map->s_lock);
874 if (map->res_count != 0) {
875 mutex_unlock(&map->s_lock);
876 return;
877 }
878 mutex_unlock(&map->s_lock);
879
880 /*
881 * There are no intermediate states of a map going out or
882 * coming in, since the map is locked during the transition.
883 */
884 assert(map->sw_state == MAP_SW_IN);
885
886 if (!vm_map_swap_enable)
887 return;
888
889 /*
890 * We now operate upon each map entry. If the entry is a sub-
891 * or share-map, we call vm_map_res_deallocate upon it.
892 * If the entry is an object, we call vm_object_res_deallocate
893 * (this may iterate through the shadow chain).
894 * Note that we hold the map locked the entire time,
895 * even if we get back here via a recursive call in
896 * vm_map_res_deallocate.
897 */
898 entry = vm_map_first_entry(map);
899
900 while (entry != vm_map_to_entry(map)) {
901 if (entry->object.vm_object != VM_OBJECT_NULL) {
902 if (entry->is_sub_map) {
903 vm_map_t lmap = entry->object.sub_map;
904 mutex_lock(&lmap->s_lock);
905 vm_map_res_deallocate(lmap);
906 mutex_unlock(&lmap->s_lock);
907 } else {
908 vm_object_t object = entry->object.vm_object;
909 vm_object_lock(object);
910 /*
911 * This call may take a long time,
912 * since it could actively push
913 * out pages (if we implement it
914 * that way).
915 */
916 vm_object_res_deallocate(object);
917 vm_object_unlock(object);
918 }
919 }
920 entry = entry->vme_next;
921 }
922 assert(map->sw_state == MAP_SW_IN);
923 map->sw_state = MAP_SW_OUT;
924}
925
926#endif /* TASK_SWAPPER */
927
928
929/*
930 * SAVE_HINT:
931 *
932 * Saves the specified entry as the hint for
933 * future lookups. Performs necessary interlocks.
934 */
935#define SAVE_HINT(map,value) \
936 mutex_lock(&(map)->s_lock); \
937 (map)->hint = (value); \
938 mutex_unlock(&(map)->s_lock);
939
940/*
941 * vm_map_lookup_entry: [ internal use only ]
942 *
943 * Finds the map entry containing (or
944 * immediately preceding) the specified address
945 * in the given map; the entry is returned
946 * in the "entry" parameter. The boolean
947 * result indicates whether the address is
948 * actually contained in the map.
949 */
950boolean_t
951vm_map_lookup_entry(
952 register vm_map_t map,
953 register vm_offset_t address,
954 vm_map_entry_t *entry) /* OUT */
955{
956 register vm_map_entry_t cur;
957 register vm_map_entry_t last;
958
959 /*
960 * Start looking either from the head of the
961 * list, or from the hint.
962 */
963
964 mutex_lock(&map->s_lock);
965 cur = map->hint;
966 mutex_unlock(&map->s_lock);
967
968 if (cur == vm_map_to_entry(map))
969 cur = cur->vme_next;
970
971 if (address >= cur->vme_start) {
972 /*
973 * Go from hint to end of list.
974 *
975 * But first, make a quick check to see if
976 * we are already looking at the entry we
977 * want (which is usually the case).
978 * Note also that we don't need to save the hint
979 * here... it is the same hint (unless we are
980 * at the header, in which case the hint didn't
981 * buy us anything anyway).
982 */
983 last = vm_map_to_entry(map);
984 if ((cur != last) && (cur->vme_end > address)) {
985 *entry = cur;
986 return(TRUE);
987 }
988 }
989 else {
990 /*
991 * Go from start to hint, *inclusively*
992 */
993 last = cur->vme_next;
994 cur = vm_map_first_entry(map);
995 }
996
997 /*
998 * Search linearly
999 */
1000
1001 while (cur != last) {
1002 if (cur->vme_end > address) {
1003 if (address >= cur->vme_start) {
1004 /*
1005 * Save this lookup for future
1006 * hints, and return
1007 */
1008
1009 *entry = cur;
1010 SAVE_HINT(map, cur);
1011 return(TRUE);
1012 }
1013 break;
1014 }
1015 cur = cur->vme_next;
1016 }
1017 *entry = cur->vme_prev;
1018 SAVE_HINT(map, *entry);
1019 return(FALSE);
1020}
1021
1022/*
1023 * Routine: vm_map_find_space
1024 * Purpose:
1025 * Allocate a range in the specified virtual address map,
1026 * returning the entry allocated for that range.
1027 * Used by kmem_alloc, etc.
1028 *
1029 * The map must be NOT be locked. It will be returned locked
1030 * on KERN_SUCCESS, unlocked on failure.
1031 *
1032 * If an entry is allocated, the object/offset fields
1033 * are initialized to zero.
1034 */
1035kern_return_t
1036vm_map_find_space(
1037 register vm_map_t map,
1038 vm_offset_t *address, /* OUT */
1039 vm_size_t size,
1040 vm_offset_t mask,
1041 vm_map_entry_t *o_entry) /* OUT */
1042{
1043 register vm_map_entry_t entry, new_entry;
1044 register vm_offset_t start;
1045 register vm_offset_t end;
1046
1047 new_entry = vm_map_entry_create(map);
1048
1049 /*
1050 * Look for the first possible address; if there's already
1051 * something at this address, we have to start after it.
1052 */
1053
1054 vm_map_lock(map);
1055
1056 assert(first_free_is_valid(map));
1057 if ((entry = map->first_free) == vm_map_to_entry(map))
1058 start = map->min_offset;
1059 else
1060 start = entry->vme_end;
1061
1062 /*
1063 * In any case, the "entry" always precedes
1064 * the proposed new region throughout the loop:
1065 */
1066
1067 while (TRUE) {
1068 register vm_map_entry_t next;
1069
1070 /*
1071 * Find the end of the proposed new region.
1072 * Be sure we didn't go beyond the end, or
1073 * wrap around the address.
1074 */
1075
1076 end = ((start + mask) & ~mask);
1077 if (end < start) {
1078 vm_map_entry_dispose(map, new_entry);
1079 vm_map_unlock(map);
1080 return(KERN_NO_SPACE);
1081 }
1082 start = end;
1083 end += size;
1084
1085 if ((end > map->max_offset) || (end < start)) {
1086 vm_map_entry_dispose(map, new_entry);
1087 vm_map_unlock(map);
1088 return(KERN_NO_SPACE);
1089 }
1090
1091 /*
1092 * If there are no more entries, we must win.
1093 */
1094
1095 next = entry->vme_next;
1096 if (next == vm_map_to_entry(map))
1097 break;
1098
1099 /*
1100 * If there is another entry, it must be
1101 * after the end of the potential new region.
1102 */
1103
1104 if (next->vme_start >= end)
1105 break;
1106
1107 /*
1108 * Didn't fit -- move to the next entry.
1109 */
1110
1111 entry = next;
1112 start = entry->vme_end;
1113 }
1114
1115 /*
1116 * At this point,
1117 * "start" and "end" should define the endpoints of the
1118 * available new range, and
1119 * "entry" should refer to the region before the new
1120 * range, and
1121 *
1122 * the map should be locked.
1123 */
1124
1125 *address = start;
1126
1127 new_entry->vme_start = start;
1128 new_entry->vme_end = end;
1129 assert(page_aligned(new_entry->vme_start));
1130 assert(page_aligned(new_entry->vme_end));
1131
1132 new_entry->is_shared = FALSE;
1133 new_entry->is_sub_map = FALSE;
1134 new_entry->use_pmap = FALSE;
1135 new_entry->object.vm_object = VM_OBJECT_NULL;
1136 new_entry->offset = (vm_object_offset_t) 0;
1137
1138 new_entry->needs_copy = FALSE;
1139
1140 new_entry->inheritance = VM_INHERIT_DEFAULT;
1141 new_entry->protection = VM_PROT_DEFAULT;
1142 new_entry->max_protection = VM_PROT_ALL;
1143 new_entry->behavior = VM_BEHAVIOR_DEFAULT;
1144 new_entry->wired_count = 0;
1145 new_entry->user_wired_count = 0;
1146
1147 new_entry->in_transition = FALSE;
1148 new_entry->needs_wakeup = FALSE;
1149
1150 /*
1151 * Insert the new entry into the list
1152 */
1153
1154 vm_map_entry_link(map, entry, new_entry);
1155
1156 map->size += size;
1157
1158 /*
1159 * Update the lookup hint
1160 */
1161 SAVE_HINT(map, new_entry);
1162
1163 *o_entry = new_entry;
1164 return(KERN_SUCCESS);
1165}
1166
1167int vm_map_pmap_enter_print = FALSE;
1168int vm_map_pmap_enter_enable = FALSE;
1169
1170/*
1171 * Routine: vm_map_pmap_enter
1172 *
1173 * Description:
1174 * Force pages from the specified object to be entered into
1175 * the pmap at the specified address if they are present.
1176 * As soon as a page not found in the object the scan ends.
1177 *
1178 * Returns:
1179 * Nothing.
1180 *
1181 * In/out conditions:
1182 * The source map should not be locked on entry.
1183 */
1184void
1185vm_map_pmap_enter(
1186 vm_map_t map,
1187 register vm_offset_t addr,
1188 register vm_offset_t end_addr,
1189 register vm_object_t object,
1190 vm_object_offset_t offset,
1191 vm_prot_t protection)
1192{
0b4e3aa0 1193
1c79356b
A
1194 while (addr < end_addr) {
1195 register vm_page_t m;
1196
1197 vm_object_lock(object);
1198 vm_object_paging_begin(object);
1199
1200 m = vm_page_lookup(object, offset);
1201 if (m == VM_PAGE_NULL || m->busy ||
1202 (m->unusual && ( m->error || m->restart || m->absent ||
1203 protection & m->page_lock))) {
1204
1205 vm_object_paging_end(object);
1206 vm_object_unlock(object);
1207 return;
1208 }
1209
1210 assert(!m->fictitious); /* XXX is this possible ??? */
1211
1212 if (vm_map_pmap_enter_print) {
1213 printf("vm_map_pmap_enter:");
1214 printf("map: %x, addr: %x, object: %x, offset: %x\n",
1215 map, addr, object, offset);
1216 }
1c79356b 1217 m->busy = TRUE;
765c9de3
A
1218
1219 if (m->no_isync == TRUE) {
1220 pmap_sync_caches_phys(m->phys_addr);
1221
1222 m->no_isync = FALSE;
1223 }
1c79356b
A
1224 vm_object_unlock(object);
1225
1226 PMAP_ENTER(map->pmap, addr, m,
1227 protection, FALSE);
1228
1229 vm_object_lock(object);
0b4e3aa0 1230
1c79356b
A
1231 PAGE_WAKEUP_DONE(m);
1232 vm_page_lock_queues();
1233 if (!m->active && !m->inactive)
1234 vm_page_activate(m);
1235 vm_page_unlock_queues();
1236 vm_object_paging_end(object);
1237 vm_object_unlock(object);
1238
1239 offset += PAGE_SIZE_64;
1240 addr += PAGE_SIZE;
1241 }
1242}
1243
1244/*
1245 * Routine: vm_map_enter
1246 *
1247 * Description:
1248 * Allocate a range in the specified virtual address map.
1249 * The resulting range will refer to memory defined by
1250 * the given memory object and offset into that object.
1251 *
1252 * Arguments are as defined in the vm_map call.
1253 */
1254kern_return_t
1255vm_map_enter(
1256 register vm_map_t map,
1257 vm_offset_t *address, /* IN/OUT */
1258 vm_size_t size,
1259 vm_offset_t mask,
1260 int flags,
1261 vm_object_t object,
1262 vm_object_offset_t offset,
1263 boolean_t needs_copy,
1264 vm_prot_t cur_protection,
1265 vm_prot_t max_protection,
1266 vm_inherit_t inheritance)
1267{
1268 vm_map_entry_t entry;
1269 register vm_offset_t start;
1270 register vm_offset_t end;
1271 kern_return_t result = KERN_SUCCESS;
1272
1273 boolean_t anywhere = VM_FLAGS_ANYWHERE & flags;
1274 char alias;
1275
1276 VM_GET_FLAGS_ALIAS(flags, alias);
1277
1278#define RETURN(value) { result = value; goto BailOut; }
1279
1280 assert(page_aligned(*address));
1281 assert(page_aligned(size));
1282 StartAgain: ;
1283
1284 start = *address;
1285
1286 if (anywhere) {
1287 vm_map_lock(map);
1288
1289 /*
1290 * Calculate the first possible address.
1291 */
1292
1293 if (start < map->min_offset)
1294 start = map->min_offset;
1295 if (start > map->max_offset)
1296 RETURN(KERN_NO_SPACE);
1297
1298 /*
1299 * Look for the first possible address;
1300 * if there's already something at this
1301 * address, we have to start after it.
1302 */
1303
1304 assert(first_free_is_valid(map));
1305 if (start == map->min_offset) {
1306 if ((entry = map->first_free) != vm_map_to_entry(map))
1307 start = entry->vme_end;
1308 } else {
1309 vm_map_entry_t tmp_entry;
1310 if (vm_map_lookup_entry(map, start, &tmp_entry))
1311 start = tmp_entry->vme_end;
1312 entry = tmp_entry;
1313 }
1314
1315 /*
1316 * In any case, the "entry" always precedes
1317 * the proposed new region throughout the
1318 * loop:
1319 */
1320
1321 while (TRUE) {
1322 register vm_map_entry_t next;
1323
1324 /*
1325 * Find the end of the proposed new region.
1326 * Be sure we didn't go beyond the end, or
1327 * wrap around the address.
1328 */
1329
1330 end = ((start + mask) & ~mask);
1331 if (end < start)
1332 RETURN(KERN_NO_SPACE);
1333 start = end;
1334 end += size;
1335
1336 if ((end > map->max_offset) || (end < start)) {
1337 if (map->wait_for_space) {
1338 if (size <= (map->max_offset -
1339 map->min_offset)) {
1340 assert_wait((event_t)map,
1341 THREAD_ABORTSAFE);
1342 vm_map_unlock(map);
1343 thread_block((void (*)(void))0);
1344 goto StartAgain;
1345 }
1346 }
1347 RETURN(KERN_NO_SPACE);
1348 }
1349
1350 /*
1351 * If there are no more entries, we must win.
1352 */
1353
1354 next = entry->vme_next;
1355 if (next == vm_map_to_entry(map))
1356 break;
1357
1358 /*
1359 * If there is another entry, it must be
1360 * after the end of the potential new region.
1361 */
1362
1363 if (next->vme_start >= end)
1364 break;
1365
1366 /*
1367 * Didn't fit -- move to the next entry.
1368 */
1369
1370 entry = next;
1371 start = entry->vme_end;
1372 }
1373 *address = start;
1374 } else {
1375 vm_map_entry_t temp_entry;
1376
1377 /*
1378 * Verify that:
1379 * the address doesn't itself violate
1380 * the mask requirement.
1381 */
1382
1383 vm_map_lock(map);
1384 if ((start & mask) != 0)
1385 RETURN(KERN_NO_SPACE);
1386
1387 /*
1388 * ... the address is within bounds
1389 */
1390
1391 end = start + size;
1392
1393 if ((start < map->min_offset) ||
1394 (end > map->max_offset) ||
1395 (start >= end)) {
1396 RETURN(KERN_INVALID_ADDRESS);
1397 }
1398
1399 /*
1400 * ... the starting address isn't allocated
1401 */
1402
1403 if (vm_map_lookup_entry(map, start, &temp_entry))
1404 RETURN(KERN_NO_SPACE);
1405
1406 entry = temp_entry;
1407
1408 /*
1409 * ... the next region doesn't overlap the
1410 * end point.
1411 */
1412
1413 if ((entry->vme_next != vm_map_to_entry(map)) &&
1414 (entry->vme_next->vme_start < end))
1415 RETURN(KERN_NO_SPACE);
1416 }
1417
1418 /*
1419 * At this point,
1420 * "start" and "end" should define the endpoints of the
1421 * available new range, and
1422 * "entry" should refer to the region before the new
1423 * range, and
1424 *
1425 * the map should be locked.
1426 */
1427
1428 /*
1429 * See whether we can avoid creating a new entry (and object) by
1430 * extending one of our neighbors. [So far, we only attempt to
1431 * extend from below.]
1432 */
1433
1434 if ((object == VM_OBJECT_NULL) &&
1435 (entry != vm_map_to_entry(map)) &&
1436 (entry->vme_end == start) &&
1437 (!entry->is_shared) &&
1438 (!entry->is_sub_map) &&
1439 (entry->alias == alias) &&
1440 (entry->inheritance == inheritance) &&
1441 (entry->protection == cur_protection) &&
1442 (entry->max_protection == max_protection) &&
1443 (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
1444 (entry->in_transition == 0) &&
0b4e3aa0 1445 ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) &&
1c79356b
A
1446 (entry->wired_count == 0)) { /* implies user_wired_count == 0 */
1447 if (vm_object_coalesce(entry->object.vm_object,
1448 VM_OBJECT_NULL,
1449 entry->offset,
1450 (vm_object_offset_t) 0,
1451 (vm_size_t)(entry->vme_end - entry->vme_start),
1452 (vm_size_t)(end - entry->vme_end))) {
1453
1454 /*
1455 * Coalesced the two objects - can extend
1456 * the previous map entry to include the
1457 * new range.
1458 */
1459 map->size += (end - entry->vme_end);
1460 entry->vme_end = end;
1461 UPDATE_FIRST_FREE(map, map->first_free);
1462 RETURN(KERN_SUCCESS);
1463 }
1464 }
1465
1466 /*
1467 * Create a new entry
1468 */
1469
1470 { /**/
1471 register vm_map_entry_t new_entry;
1472
1473 new_entry = vm_map_entry_insert(map, entry, start, end, object,
1474 offset, needs_copy, FALSE, FALSE,
1475 cur_protection, max_protection,
1476 VM_BEHAVIOR_DEFAULT, inheritance, 0);
1477 new_entry->alias = alias;
1478 vm_map_unlock(map);
1479
1480 /* Wire down the new entry if the user
1481 * requested all new map entries be wired.
1482 */
1483 if (map->wiring_required) {
1484 result = vm_map_wire(map, start, end,
1485 new_entry->protection, TRUE);
1486 return(result);
1487 }
1488
1489 if ((object != VM_OBJECT_NULL) &&
1490 (vm_map_pmap_enter_enable) &&
1491 (!anywhere) &&
1492 (!needs_copy) &&
1493 (size < (128*1024))) {
1494 vm_map_pmap_enter(map, start, end,
1495 object, offset, cur_protection);
1496 }
1497
1498 return(result);
1499 } /**/
1500
1501 BailOut: ;
1502 vm_map_unlock(map);
1503 return(result);
1504
1505#undef RETURN
1506}
1507
1508/*
1509 * vm_map_clip_start: [ internal use only ]
1510 *
1511 * Asserts that the given entry begins at or after
1512 * the specified address; if necessary,
1513 * it splits the entry into two.
1514 */
1515#ifndef i386
1516#define vm_map_clip_start(map, entry, startaddr) \
1517MACRO_BEGIN \
1518 vm_map_t VMCS_map; \
1519 vm_map_entry_t VMCS_entry; \
1520 vm_offset_t VMCS_startaddr; \
1521 VMCS_map = (map); \
1522 VMCS_entry = (entry); \
1523 VMCS_startaddr = (startaddr); \
1524 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1525 if(entry->use_pmap) { \
1526 vm_offset_t pmap_base_addr; \
1527 vm_offset_t pmap_end_addr; \
1528 \
1529 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1530 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1531 pmap_unnest(map->pmap, pmap_base_addr, \
1532 (pmap_end_addr - pmap_base_addr) + 1); \
1533 entry->use_pmap = FALSE; \
1534 } \
1535 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1536 } \
1537 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1538MACRO_END
1539#else
1540#define vm_map_clip_start(map, entry, startaddr) \
1541MACRO_BEGIN \
1542 vm_map_t VMCS_map; \
1543 vm_map_entry_t VMCS_entry; \
1544 vm_offset_t VMCS_startaddr; \
1545 VMCS_map = (map); \
1546 VMCS_entry = (entry); \
1547 VMCS_startaddr = (startaddr); \
1548 if (VMCS_startaddr > VMCS_entry->vme_start) { \
1549 _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
1550 } \
1551 UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \
1552MACRO_END
1553#endif
1554
1555#define vm_map_copy_clip_start(copy, entry, startaddr) \
1556 MACRO_BEGIN \
1557 if ((startaddr) > (entry)->vme_start) \
1558 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
1559 MACRO_END
1560
1561/*
1562 * This routine is called only when it is known that
1563 * the entry must be split.
1564 */
1565void
1566_vm_map_clip_start(
1567 register struct vm_map_header *map_header,
1568 register vm_map_entry_t entry,
1569 register vm_offset_t start)
1570{
1571 register vm_map_entry_t new_entry;
1572
1573 /*
1574 * Split off the front portion --
1575 * note that we must insert the new
1576 * entry BEFORE this one, so that
1577 * this entry has the specified starting
1578 * address.
1579 */
1580
1581 new_entry = _vm_map_entry_create(map_header);
1582 vm_map_entry_copy_full(new_entry, entry);
1583
1584 new_entry->vme_end = start;
1585 entry->offset += (start - entry->vme_start);
1586 entry->vme_start = start;
1587
1588 _vm_map_entry_link(map_header, entry->vme_prev, new_entry);
1589
1590 if (entry->is_sub_map)
1591 vm_map_reference(new_entry->object.sub_map);
1592 else
1593 vm_object_reference(new_entry->object.vm_object);
1594}
1595
1596
1597/*
1598 * vm_map_clip_end: [ internal use only ]
1599 *
1600 * Asserts that the given entry ends at or before
1601 * the specified address; if necessary,
1602 * it splits the entry into two.
1603 */
1604#ifndef i386
1605#define vm_map_clip_end(map, entry, endaddr) \
1606MACRO_BEGIN \
1607 vm_map_t VMCE_map; \
1608 vm_map_entry_t VMCE_entry; \
1609 vm_offset_t VMCE_endaddr; \
1610 VMCE_map = (map); \
1611 VMCE_entry = (entry); \
1612 VMCE_endaddr = (endaddr); \
1613 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1614 if(entry->use_pmap) { \
1615 vm_offset_t pmap_base_addr; \
1616 vm_offset_t pmap_end_addr; \
1617 \
1618 pmap_base_addr = 0xF0000000 & entry->vme_start; \
1619 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
1620 pmap_unnest(map->pmap, pmap_base_addr, \
1621 (pmap_end_addr - pmap_base_addr) + 1); \
1622 entry->use_pmap = FALSE; \
1623 } \
1624 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1625 } \
1626 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1627MACRO_END
1628#else
1629#define vm_map_clip_end(map, entry, endaddr) \
1630MACRO_BEGIN \
1631 vm_map_t VMCE_map; \
1632 vm_map_entry_t VMCE_entry; \
1633 vm_offset_t VMCE_endaddr; \
1634 VMCE_map = (map); \
1635 VMCE_entry = (entry); \
1636 VMCE_endaddr = (endaddr); \
1637 if (VMCE_endaddr < VMCE_entry->vme_end) { \
1638 _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
1639 } \
1640 UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \
1641MACRO_END
1642#endif
1643
1644#define vm_map_copy_clip_end(copy, entry, endaddr) \
1645 MACRO_BEGIN \
1646 if ((endaddr) < (entry)->vme_end) \
1647 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
1648 MACRO_END
1649
1650/*
1651 * This routine is called only when it is known that
1652 * the entry must be split.
1653 */
1654void
1655_vm_map_clip_end(
1656 register struct vm_map_header *map_header,
1657 register vm_map_entry_t entry,
1658 register vm_offset_t end)
1659{
1660 register vm_map_entry_t new_entry;
1661
1662 /*
1663 * Create a new entry and insert it
1664 * AFTER the specified entry
1665 */
1666
1667 new_entry = _vm_map_entry_create(map_header);
1668 vm_map_entry_copy_full(new_entry, entry);
1669
1670 new_entry->vme_start = entry->vme_end = end;
1671 new_entry->offset += (end - entry->vme_start);
1672
1673 _vm_map_entry_link(map_header, entry, new_entry);
1674
1675 if (entry->is_sub_map)
1676 vm_map_reference(new_entry->object.sub_map);
1677 else
1678 vm_object_reference(new_entry->object.vm_object);
1679}
1680
1681
1682/*
1683 * VM_MAP_RANGE_CHECK: [ internal use only ]
1684 *
1685 * Asserts that the starting and ending region
1686 * addresses fall within the valid range of the map.
1687 */
1688#define VM_MAP_RANGE_CHECK(map, start, end) \
1689 { \
1690 if (start < vm_map_min(map)) \
1691 start = vm_map_min(map); \
1692 if (end > vm_map_max(map)) \
1693 end = vm_map_max(map); \
1694 if (start > end) \
1695 start = end; \
1696 }
1697
1698/*
1699 * vm_map_range_check: [ internal use only ]
1700 *
1701 * Check that the region defined by the specified start and
1702 * end addresses are wholly contained within a single map
1703 * entry or set of adjacent map entries of the spacified map,
1704 * i.e. the specified region contains no unmapped space.
1705 * If any or all of the region is unmapped, FALSE is returned.
1706 * Otherwise, TRUE is returned and if the output argument 'entry'
1707 * is not NULL it points to the map entry containing the start
1708 * of the region.
1709 *
1710 * The map is locked for reading on entry and is left locked.
1711 */
1712boolean_t
1713vm_map_range_check(
1714 register vm_map_t map,
1715 register vm_offset_t start,
1716 register vm_offset_t end,
1717 vm_map_entry_t *entry)
1718{
1719 vm_map_entry_t cur;
1720 register vm_offset_t prev;
1721
1722 /*
1723 * Basic sanity checks first
1724 */
1725 if (start < vm_map_min(map) || end > vm_map_max(map) || start > end)
1726 return (FALSE);
1727
1728 /*
1729 * Check first if the region starts within a valid
1730 * mapping for the map.
1731 */
1732 if (!vm_map_lookup_entry(map, start, &cur))
1733 return (FALSE);
1734
1735 /*
1736 * Optimize for the case that the region is contained
1737 * in a single map entry.
1738 */
1739 if (entry != (vm_map_entry_t *) NULL)
1740 *entry = cur;
1741 if (end <= cur->vme_end)
1742 return (TRUE);
1743
1744 /*
1745 * If the region is not wholly contained within a
1746 * single entry, walk the entries looking for holes.
1747 */
1748 prev = cur->vme_end;
1749 cur = cur->vme_next;
1750 while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) {
1751 if (end <= cur->vme_end)
1752 return (TRUE);
1753 prev = cur->vme_end;
1754 cur = cur->vme_next;
1755 }
1756 return (FALSE);
1757}
1758
1759/*
1760 * vm_map_submap: [ kernel use only ]
1761 *
1762 * Mark the given range as handled by a subordinate map.
1763 *
1764 * This range must have been created with vm_map_find using
1765 * the vm_submap_object, and no other operations may have been
1766 * performed on this range prior to calling vm_map_submap.
1767 *
1768 * Only a limited number of operations can be performed
1769 * within this rage after calling vm_map_submap:
1770 * vm_fault
1771 * [Don't try vm_map_copyin!]
1772 *
1773 * To remove a submapping, one must first remove the
1774 * range from the superior map, and then destroy the
1775 * submap (if desired). [Better yet, don't try it.]
1776 */
1777kern_return_t
1778vm_map_submap(
1779 register vm_map_t map,
1780 register vm_offset_t start,
1781 register vm_offset_t end,
1782 vm_map_t submap,
1783 vm_offset_t offset,
1784 boolean_t use_pmap)
1785{
1786 vm_map_entry_t entry;
1787 register kern_return_t result = KERN_INVALID_ARGUMENT;
1788 register vm_object_t object;
1789
1790 vm_map_lock(map);
1791
1792 VM_MAP_RANGE_CHECK(map, start, end);
1793
1794 if (vm_map_lookup_entry(map, start, &entry)) {
1795 vm_map_clip_start(map, entry, start);
1796 }
1797 else
1798 entry = entry->vme_next;
1799
1800 if(entry == vm_map_to_entry(map)) {
1801 vm_map_unlock(map);
1802 return KERN_INVALID_ARGUMENT;
1803 }
1804
1805 vm_map_clip_end(map, entry, end);
1806
1807 if ((entry->vme_start == start) && (entry->vme_end == end) &&
1808 (!entry->is_sub_map) &&
1809 ((object = entry->object.vm_object) == vm_submap_object) &&
1810 (object->resident_page_count == 0) &&
1811 (object->copy == VM_OBJECT_NULL) &&
1812 (object->shadow == VM_OBJECT_NULL) &&
1813 (!object->pager_created)) {
1814 entry->offset = (vm_object_offset_t)offset;
1815 entry->object.vm_object = VM_OBJECT_NULL;
1816 vm_object_deallocate(object);
1817 entry->is_sub_map = TRUE;
1818 vm_map_reference(entry->object.sub_map = submap);
1819#ifndef i386
1820 if ((use_pmap) && (offset == 0)) {
1821 /* nest if platform code will allow */
1822 result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap,
1823 start, end - start);
1824 if(result)
1825 panic("pmap_nest failed!");
1826 entry->use_pmap = TRUE;
1827 }
1828#endif
1829#ifdef i386
1830 pmap_remove(map->pmap, start, end);
1831#endif
1832 result = KERN_SUCCESS;
1833 }
1834 vm_map_unlock(map);
1835
1836 return(result);
1837}
1838
1839/*
1840 * vm_map_protect:
1841 *
1842 * Sets the protection of the specified address
1843 * region in the target map. If "set_max" is
1844 * specified, the maximum protection is to be set;
1845 * otherwise, only the current protection is affected.
1846 */
1847kern_return_t
1848vm_map_protect(
1849 register vm_map_t map,
1850 register vm_offset_t start,
1851 register vm_offset_t end,
1852 register vm_prot_t new_prot,
1853 register boolean_t set_max)
1854{
1855 register vm_map_entry_t current;
1856 register vm_offset_t prev;
1857 vm_map_entry_t entry;
1858 vm_prot_t new_max;
1859 boolean_t clip;
1860
1861 XPR(XPR_VM_MAP,
1862 "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d",
1863 (integer_t)map, start, end, new_prot, set_max);
1864
1865 vm_map_lock(map);
1866
1867 /*
1868 * Lookup the entry. If it doesn't start in a valid
1869 * entry, return an error. Remember if we need to
1870 * clip the entry. We don't do it here because we don't
1871 * want to make any changes until we've scanned the
1872 * entire range below for address and protection
1873 * violations.
1874 */
1875 if (!(clip = vm_map_lookup_entry(map, start, &entry))) {
1876 vm_map_unlock(map);
1877 return(KERN_INVALID_ADDRESS);
1878 }
1879
1880 /*
1881 * Make a first pass to check for protection and address
1882 * violations.
1883 */
1884
1885 current = entry;
1886 prev = current->vme_start;
1887 while ((current != vm_map_to_entry(map)) &&
1888 (current->vme_start < end)) {
1889
1890 /*
1891 * If there is a hole, return an error.
1892 */
1893 if (current->vme_start != prev) {
1894 vm_map_unlock(map);
1895 return(KERN_INVALID_ADDRESS);
1896 }
1897
1898 new_max = current->max_protection;
1899 if(new_prot & VM_PROT_COPY) {
1900 new_max |= VM_PROT_WRITE;
1901 if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) {
1902 vm_map_unlock(map);
1903 return(KERN_PROTECTION_FAILURE);
1904 }
1905 } else {
1906 if ((new_prot & new_max) != new_prot) {
1907 vm_map_unlock(map);
1908 return(KERN_PROTECTION_FAILURE);
1909 }
1910 }
1911
1912 prev = current->vme_end;
1913 current = current->vme_next;
1914 }
1915 if (end > prev) {
1916 vm_map_unlock(map);
1917 return(KERN_INVALID_ADDRESS);
1918 }
1919
1920 /*
1921 * Go back and fix up protections.
1922 * Clip to start here if the range starts within
1923 * the entry.
1924 */
1925
1926 current = entry;
1927 if (clip) {
1928 vm_map_clip_start(map, entry, start);
1929 }
1930 while ((current != vm_map_to_entry(map)) &&
1931 (current->vme_start < end)) {
1932
1933 vm_prot_t old_prot;
1934
1935 vm_map_clip_end(map, current, end);
1936
1937 old_prot = current->protection;
1938
1939 if(new_prot & VM_PROT_COPY) {
1940 /* caller is asking specifically to copy the */
1941 /* mapped data, this implies that max protection */
1942 /* will include write. Caller must be prepared */
1943 /* for loss of shared memory communication in the */
1944 /* target area after taking this step */
1945 current->needs_copy = TRUE;
1946 current->max_protection |= VM_PROT_WRITE;
1947 }
1948
1949 if (set_max)
1950 current->protection =
1951 (current->max_protection =
1952 new_prot & ~VM_PROT_COPY) &
1953 old_prot;
1954 else
1955 current->protection = new_prot & ~VM_PROT_COPY;
1956
1957 /*
1958 * Update physical map if necessary.
1959 * If the request is to turn off write protection,
1960 * we won't do it for real (in pmap). This is because
1961 * it would cause copy-on-write to fail. We've already
1962 * set, the new protection in the map, so if a
1963 * write-protect fault occurred, it will be fixed up
1964 * properly, COW or not.
1965 */
1966 /* the 256M hack for existing hardware limitations */
1967 if (current->protection != old_prot) {
1968 if(current->is_sub_map && current->use_pmap) {
1969 vm_offset_t pmap_base_addr;
1970 vm_offset_t pmap_end_addr;
1971 vm_map_entry_t local_entry;
1972
1973 pmap_base_addr = 0xF0000000 & current->vme_start;
1974 pmap_end_addr = (pmap_base_addr + 0x10000000) - 1;
1975#ifndef i386
1976 if(!vm_map_lookup_entry(map,
1977 pmap_base_addr, &local_entry))
1978 panic("vm_map_protect: nested pmap area is missing");
1979 while ((local_entry != vm_map_to_entry(map)) &&
1980 (local_entry->vme_start < pmap_end_addr)) {
1981 local_entry->use_pmap = FALSE;
1982 local_entry = local_entry->vme_next;
1983 }
1984 pmap_unnest(map->pmap, pmap_base_addr,
1985 (pmap_end_addr - pmap_base_addr) + 1);
1986#endif
1987 }
1988 if (!(current->protection & VM_PROT_WRITE)) {
1989 /* Look one level in we support nested pmaps */
1990 /* from mapped submaps which are direct entries */
1991 /* in our map */
1992 if(current->is_sub_map && current->use_pmap) {
1993 pmap_protect(current->object.sub_map->pmap,
1994 current->vme_start,
1995 current->vme_end,
1996 current->protection);
1997 } else {
1998 pmap_protect(map->pmap, current->vme_start,
1999 current->vme_end,
2000 current->protection);
2001 }
2002 }
2003 }
2004 current = current->vme_next;
2005 }
2006
2007 vm_map_unlock(map);
2008 return(KERN_SUCCESS);
2009}
2010
2011/*
2012 * vm_map_inherit:
2013 *
2014 * Sets the inheritance of the specified address
2015 * range in the target map. Inheritance
2016 * affects how the map will be shared with
2017 * child maps at the time of vm_map_fork.
2018 */
2019kern_return_t
2020vm_map_inherit(
2021 register vm_map_t map,
2022 register vm_offset_t start,
2023 register vm_offset_t end,
2024 register vm_inherit_t new_inheritance)
2025{
2026 register vm_map_entry_t entry;
2027 vm_map_entry_t temp_entry;
2028
2029 vm_map_lock(map);
2030
2031 VM_MAP_RANGE_CHECK(map, start, end);
2032
2033 if (vm_map_lookup_entry(map, start, &temp_entry)) {
2034 entry = temp_entry;
2035 vm_map_clip_start(map, entry, start);
2036 }
2037 else {
2038 temp_entry = temp_entry->vme_next;
2039 entry = temp_entry;
2040 }
2041
2042 /* first check entire range for submaps which can't support the */
2043 /* given inheritance. */
2044 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
2045 if(entry->is_sub_map) {
2046 if(new_inheritance == VM_INHERIT_COPY)
2047 return(KERN_INVALID_ARGUMENT);
2048 }
2049
2050 entry = entry->vme_next;
2051 }
2052
2053 entry = temp_entry;
2054
2055 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
2056 vm_map_clip_end(map, entry, end);
2057
2058 entry->inheritance = new_inheritance;
2059
2060 entry = entry->vme_next;
2061 }
2062
2063 vm_map_unlock(map);
2064 return(KERN_SUCCESS);
2065}
2066
2067/*
2068 * vm_map_wire:
2069 *
2070 * Sets the pageability of the specified address range in the
2071 * target map as wired. Regions specified as not pageable require
2072 * locked-down physical memory and physical page maps. The
2073 * access_type variable indicates types of accesses that must not
2074 * generate page faults. This is checked against protection of
2075 * memory being locked-down.
2076 *
2077 * The map must not be locked, but a reference must remain to the
2078 * map throughout the call.
2079 */
2080kern_return_t
2081vm_map_wire_nested(
2082 register vm_map_t map,
2083 register vm_offset_t start,
2084 register vm_offset_t end,
2085 register vm_prot_t access_type,
2086 boolean_t user_wire,
2087 pmap_t map_pmap)
2088{
2089 register vm_map_entry_t entry;
2090 struct vm_map_entry *first_entry, tmp_entry;
2091 vm_map_t pmap_map;
2092 register vm_offset_t s,e;
2093 kern_return_t rc;
2094 boolean_t need_wakeup;
2095 boolean_t main_map = FALSE;
0b4e3aa0
A
2096 boolean_t interruptible_state;
2097 thread_t cur_thread;
1c79356b
A
2098 unsigned int last_timestamp;
2099 vm_size_t size;
2100
2101 vm_map_lock(map);
2102 if(map_pmap == NULL)
2103 main_map = TRUE;
2104 last_timestamp = map->timestamp;
2105
2106 VM_MAP_RANGE_CHECK(map, start, end);
2107 assert(page_aligned(start));
2108 assert(page_aligned(end));
0b4e3aa0
A
2109 if (start == end) {
2110 /* We wired what the caller asked for, zero pages */
2111 vm_map_unlock(map);
2112 return KERN_SUCCESS;
2113 }
1c79356b
A
2114
2115 if (vm_map_lookup_entry(map, start, &first_entry)) {
2116 entry = first_entry;
2117 /* vm_map_clip_start will be done later. */
2118 } else {
2119 /* Start address is not in map */
2120 vm_map_unlock(map);
2121 return(KERN_INVALID_ADDRESS);
2122 }
2123
2124 s=start;
2125 need_wakeup = FALSE;
0b4e3aa0 2126 cur_thread = current_thread();
1c79356b
A
2127 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
2128 /*
2129 * If another thread is wiring/unwiring this entry then
2130 * block after informing other thread to wake us up.
2131 */
2132 if (entry->in_transition) {
2133 /*
2134 * We have not clipped the entry. Make sure that
2135 * the start address is in range so that the lookup
2136 * below will succeed.
2137 */
2138 s = entry->vme_start < start? start: entry->vme_start;
2139
2140 entry->needs_wakeup = TRUE;
2141
2142 /*
2143 * wake up anybody waiting on entries that we have
2144 * already wired.
2145 */
2146 if (need_wakeup) {
2147 vm_map_entry_wakeup(map);
2148 need_wakeup = FALSE;
2149 }
2150 /*
2151 * User wiring is interruptible
2152 */
2153 vm_map_entry_wait(map,
2154 (user_wire) ? THREAD_ABORTSAFE :
2155 THREAD_UNINT);
0b4e3aa0 2156 if (user_wire && cur_thread->wait_result ==
1c79356b
A
2157 THREAD_INTERRUPTED) {
2158 /*
2159 * undo the wirings we have done so far
2160 * We do not clear the needs_wakeup flag,
2161 * because we cannot tell if we were the
2162 * only one waiting.
2163 */
2164 vm_map_unwire(map, start, s, user_wire);
2165 return(KERN_FAILURE);
2166 }
2167
2168 vm_map_lock(map);
2169 /*
2170 * Cannot avoid a lookup here. reset timestamp.
2171 */
2172 last_timestamp = map->timestamp;
2173
2174 /*
2175 * The entry could have been clipped, look it up again.
2176 * Worse that can happen is, it may not exist anymore.
2177 */
2178 if (!vm_map_lookup_entry(map, s, &first_entry)) {
2179 if (!user_wire)
2180 panic("vm_map_wire: re-lookup failed");
2181
2182 /*
2183 * User: undo everything upto the previous
2184 * entry. let vm_map_unwire worry about
2185 * checking the validity of the range.
2186 */
2187 vm_map_unlock(map);
2188 vm_map_unwire(map, start, s, user_wire);
2189 return(KERN_FAILURE);
2190 }
2191 entry = first_entry;
2192 continue;
2193 }
2194
2195 if(entry->is_sub_map) {
2196 vm_offset_t sub_start;
2197 vm_offset_t sub_end;
2198 vm_offset_t local_end;
2199 pmap_t pmap;
2200
2201 vm_map_clip_start(map, entry, start);
2202 vm_map_clip_end(map, entry, end);
2203
2204 sub_start += entry->offset;
2205 sub_end = entry->vme_end - entry->vme_start;
2206 sub_end += entry->offset;
2207
2208 local_end = entry->vme_end;
2209 if(map_pmap == NULL) {
2210 if(entry->use_pmap) {
2211 pmap = entry->object.sub_map->pmap;
2212 } else {
2213 pmap = map->pmap;
2214 }
2215 if (entry->wired_count) {
2216 if (entry->wired_count
2217 >= MAX_WIRE_COUNT)
2218 panic("vm_map_wire: too many wirings");
2219
2220 if (user_wire &&
2221 entry->user_wired_count
2222 >= MAX_WIRE_COUNT) {
2223 vm_map_unlock(map);
2224 vm_map_unwire(map, start,
2225 entry->vme_start, user_wire);
2226 return(KERN_FAILURE);
2227 }
2228 if (!user_wire ||
2229 (entry->user_wired_count++ == 0))
2230 entry->wired_count++;
2231 entry = entry->vme_next;
2232 continue;
2233
2234 } else {
2235 vm_object_t object;
2236 vm_object_offset_t offset_hi;
2237 vm_object_offset_t offset_lo;
2238 vm_object_offset_t offset;
2239 vm_prot_t prot;
2240 boolean_t wired;
2241 vm_behavior_t behavior;
2242 vm_offset_t local_start;
2243 vm_map_entry_t local_entry;
2244 vm_map_version_t version;
2245 vm_map_t lookup_map;
2246
2247 /* call vm_map_lookup_locked to */
2248 /* cause any needs copy to be */
2249 /* evaluated */
2250 local_start = entry->vme_start;
2251 lookup_map = map;
2252 vm_map_lock_write_to_read(map);
2253 if(vm_map_lookup_locked(
2254 &lookup_map, local_start,
2255 VM_PROT_WRITE,
2256 &version, &object,
2257 &offset, &prot, &wired,
2258 &behavior, &offset_lo,
2259 &offset_hi, &pmap_map)) {
2260
2261 vm_map_unlock(lookup_map);
2262 vm_map_unwire(map, start,
2263 entry->vme_start, user_wire);
2264 return(KERN_FAILURE);
2265 }
2266 if(pmap_map != lookup_map)
2267 vm_map_unlock(pmap_map);
2268 if(lookup_map != map) {
2269 vm_map_unlock(lookup_map);
2270 vm_map_lock(map);
2271 } else {
2272 vm_map_unlock(map);
2273 vm_map_lock(map);
2274 }
2275 last_timestamp =
2276 version.main_timestamp;
2277 vm_object_unlock(object);
2278 if (vm_map_lookup_entry(map,
2279 local_start, &local_entry)) {
2280 vm_map_unlock(map);
2281 vm_map_unwire(map, start,
2282 entry->vme_start, user_wire);
2283 return(KERN_FAILURE);
2284 }
2285 /* did we have a change of type? */
2286 if (!local_entry->is_sub_map)
2287 continue;
2288 entry = local_entry;
2289 if (user_wire)
2290 entry->user_wired_count++;
2291 entry->wired_count++;
2292
2293 entry->in_transition = TRUE;
2294
2295 vm_map_unlock(map);
2296 rc = vm_map_wire_nested(
2297 entry->object.sub_map,
2298 sub_start, sub_end,
2299 access_type,
2300 user_wire, pmap);
2301 vm_map_lock(map);
2302 last_timestamp = map->timestamp;
2303 }
2304 } else {
2305 vm_map_unlock(map);
2306 rc = vm_map_wire_nested(entry->object.sub_map,
2307 sub_start, sub_end,
2308 access_type,
2309 user_wire, pmap);
2310 vm_map_lock(map);
2311 last_timestamp = map->timestamp;
2312 }
2313 s = entry->vme_start;
2314 e = entry->vme_end;
2315 if (last_timestamp+1 != map->timestamp) {
2316 /*
2317 * Find the entry again. It could have been clipped
2318 * after we unlocked the map.
2319 */
2320 if (!vm_map_lookup_entry(map, local_end,
2321 &first_entry))
2322 panic("vm_map_wire: re-lookup failed");
2323
2324 entry = first_entry;
2325 }
2326
2327 last_timestamp = map->timestamp;
2328 while ((entry != vm_map_to_entry(map)) &&
2329 (entry->vme_start < e)) {
2330 assert(entry->in_transition);
2331 entry->in_transition = FALSE;
2332 if (entry->needs_wakeup) {
2333 entry->needs_wakeup = FALSE;
2334 need_wakeup = TRUE;
2335 }
2336 if (rc != KERN_SUCCESS) {/* from vm_*_wire */
2337 if(main_map) {
2338 if (user_wire)
2339 entry->user_wired_count--;
2340 entry->wired_count--;
2341 }
2342 }
2343 entry = entry->vme_next;
2344 }
2345 if (rc != KERN_SUCCESS) { /* from vm_*_wire */
2346 vm_map_unlock(map);
2347 if (need_wakeup)
2348 vm_map_entry_wakeup(map);
2349 /*
2350 * undo everything upto the previous entry.
2351 */
2352 (void)vm_map_unwire(map, start, s, user_wire);
2353 return rc;
2354 }
2355 continue;
2356 }
2357
2358 /*
2359 * If this entry is already wired then increment
2360 * the appropriate wire reference count.
2361 */
2362 if (entry->wired_count && main_map) {
2363 /* sanity check: wired_count is a short */
2364 if (entry->wired_count >= MAX_WIRE_COUNT)
2365 panic("vm_map_wire: too many wirings");
2366
2367 if (user_wire &&
2368 entry->user_wired_count >= MAX_WIRE_COUNT) {
2369 vm_map_unlock(map);
2370 vm_map_unwire(map, start,
2371 entry->vme_start, user_wire);
2372 return(KERN_FAILURE);
2373 }
2374 /*
2375 * entry is already wired down, get our reference
2376 * after clipping to our range.
2377 */
2378 vm_map_clip_start(map, entry, start);
2379 vm_map_clip_end(map, entry, end);
2380 if (!user_wire || (entry->user_wired_count++ == 0))
2381 entry->wired_count++;
2382
2383 entry = entry->vme_next;
2384 continue;
2385 }
2386
2387 /*
2388 * Unwired entry or wire request transmitted via submap
2389 */
2390
2391
2392 /*
2393 * Perform actions of vm_map_lookup that need the write
2394 * lock on the map: create a shadow object for a
2395 * copy-on-write region, or an object for a zero-fill
2396 * region.
2397 */
2398 size = entry->vme_end - entry->vme_start;
2399 /*
2400 * If wiring a copy-on-write page, we need to copy it now
2401 * even if we're only (currently) requesting read access.
2402 * This is aggressive, but once it's wired we can't move it.
2403 */
2404 if (entry->needs_copy) {
2405 vm_object_shadow(&entry->object.vm_object,
2406 &entry->offset, size);
2407 entry->needs_copy = FALSE;
2408 } else if (entry->object.vm_object == VM_OBJECT_NULL) {
2409 entry->object.vm_object = vm_object_allocate(size);
2410 entry->offset = (vm_object_offset_t)0;
2411 }
2412
2413 vm_map_clip_start(map, entry, start);
2414 vm_map_clip_end(map, entry, end);
2415
2416 s = entry->vme_start;
2417 e = entry->vme_end;
2418
2419 /*
2420 * Check for holes and protection mismatch.
2421 * Holes: Next entry should be contiguous unless this
2422 * is the end of the region.
2423 * Protection: Access requested must be allowed, unless
2424 * wiring is by protection class
2425 */
2426 if ((((entry->vme_end < end) &&
2427 ((entry->vme_next == vm_map_to_entry(map)) ||
2428 (entry->vme_next->vme_start > entry->vme_end))) ||
2429 ((entry->protection & access_type) != access_type))) {
2430 /*
2431 * Found a hole or protection problem.
2432 * Unwire the region we wired so far.
2433 */
2434 if (start != entry->vme_start) {
2435 vm_map_unlock(map);
2436 vm_map_unwire(map, start, s, user_wire);
2437 } else {
2438 vm_map_unlock(map);
2439 }
2440 return((entry->protection&access_type) != access_type?
2441 KERN_PROTECTION_FAILURE: KERN_INVALID_ADDRESS);
2442 }
2443
2444 assert(entry->wired_count == 0 && entry->user_wired_count == 0);
2445
2446 if(main_map) {
2447 if (user_wire)
2448 entry->user_wired_count++;
2449 entry->wired_count++;
2450 }
2451
2452 entry->in_transition = TRUE;
2453
2454 /*
2455 * This entry might get split once we unlock the map.
2456 * In vm_fault_wire(), we need the current range as
2457 * defined by this entry. In order for this to work
2458 * along with a simultaneous clip operation, we make a
2459 * temporary copy of this entry and use that for the
2460 * wiring. Note that the underlying objects do not
2461 * change during a clip.
2462 */
2463 tmp_entry = *entry;
2464
2465 /*
2466 * The in_transition state guarentees that the entry
2467 * (or entries for this range, if split occured) will be
2468 * there when the map lock is acquired for the second time.
2469 */
2470 vm_map_unlock(map);
0b4e3aa0
A
2471
2472 if (!user_wire && cur_thread != THREAD_NULL) {
2473 interruptible_state = cur_thread->interruptible;
2474 cur_thread->interruptible = FALSE;
2475 }
2476
1c79356b
A
2477 if(map_pmap)
2478 rc = vm_fault_wire(map, &tmp_entry, map_pmap);
2479 else
2480 rc = vm_fault_wire(map, &tmp_entry, map->pmap);
0b4e3aa0
A
2481
2482 if (!user_wire && cur_thread != THREAD_NULL)
2483 cur_thread->interruptible = interruptible_state;
2484
1c79356b
A
2485 vm_map_lock(map);
2486
2487 if (last_timestamp+1 != map->timestamp) {
2488 /*
2489 * Find the entry again. It could have been clipped
2490 * after we unlocked the map.
2491 */
2492 if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
2493 &first_entry))
2494 panic("vm_map_wire: re-lookup failed");
2495
2496 entry = first_entry;
2497 }
2498
2499 last_timestamp = map->timestamp;
2500
2501 while ((entry != vm_map_to_entry(map)) &&
2502 (entry->vme_start < tmp_entry.vme_end)) {
2503 assert(entry->in_transition);
2504 entry->in_transition = FALSE;
2505 if (entry->needs_wakeup) {
2506 entry->needs_wakeup = FALSE;
2507 need_wakeup = TRUE;
2508 }
2509 if (rc != KERN_SUCCESS) { /* from vm_*_wire */
2510 if(main_map) {
2511 if (user_wire)
2512 entry->user_wired_count--;
2513 entry->wired_count--;
2514 }
2515 }
2516 entry = entry->vme_next;
2517 }
2518
2519 if (rc != KERN_SUCCESS) { /* from vm_*_wire */
2520 vm_map_unlock(map);
2521 if (need_wakeup)
2522 vm_map_entry_wakeup(map);
2523 /*
2524 * undo everything upto the previous entry.
2525 */
2526 (void)vm_map_unwire(map, start, s, user_wire);
2527 return rc;
2528 }
2529 } /* end while loop through map entries */
2530 vm_map_unlock(map);
2531
2532 /*
2533 * wake up anybody waiting on entries we wired.
2534 */
2535 if (need_wakeup)
2536 vm_map_entry_wakeup(map);
2537
2538 return(KERN_SUCCESS);
2539
2540}
2541
2542kern_return_t
2543vm_map_wire(
2544 register vm_map_t map,
2545 register vm_offset_t start,
2546 register vm_offset_t end,
2547 register vm_prot_t access_type,
2548 boolean_t user_wire)
2549{
2550
2551 kern_return_t kret;
2552
2553#ifdef ppc
2554 /*
2555 * the calls to mapping_prealloc and mapping_relpre
2556 * (along with the VM_MAP_RANGE_CHECK to insure a
2557 * resonable range was passed in) are
2558 * currently necessary because
2559 * we haven't enabled kernel pre-emption
2560 * and/or the pmap_enter cannot purge and re-use
2561 * existing mappings
2562 */
2563 VM_MAP_RANGE_CHECK(map, start, end);
2564 mapping_prealloc(end - start);
2565#endif
2566 kret = vm_map_wire_nested(map, start, end, access_type,
2567 user_wire, (pmap_t)NULL);
2568#ifdef ppc
2569 mapping_relpre();
2570#endif
2571 return kret;
2572}
2573
2574/*
2575 * vm_map_unwire:
2576 *
2577 * Sets the pageability of the specified address range in the target
2578 * as pageable. Regions specified must have been wired previously.
2579 *
2580 * The map must not be locked, but a reference must remain to the map
2581 * throughout the call.
2582 *
2583 * Kernel will panic on failures. User unwire ignores holes and
2584 * unwired and intransition entries to avoid losing memory by leaving
2585 * it unwired.
2586 */
2587kern_return_t
2588vm_map_unwire_nested(
2589 register vm_map_t map,
2590 register vm_offset_t start,
2591 register vm_offset_t end,
2592 boolean_t user_wire,
2593 pmap_t map_pmap)
2594{
2595 register vm_map_entry_t entry;
2596 struct vm_map_entry *first_entry, tmp_entry;
2597 boolean_t need_wakeup;
2598 boolean_t main_map = FALSE;
2599 unsigned int last_timestamp;
2600
2601 vm_map_lock(map);
2602 if(map_pmap == NULL)
2603 main_map = TRUE;
2604 last_timestamp = map->timestamp;
2605
2606 VM_MAP_RANGE_CHECK(map, start, end);
2607 assert(page_aligned(start));
2608 assert(page_aligned(end));
2609
2610 if (vm_map_lookup_entry(map, start, &first_entry)) {
2611 entry = first_entry;
2612 /* vm_map_clip_start will be done later. */
2613 }
2614 else {
2615 /* Start address is not in map. */
2616 vm_map_unlock(map);
2617 return(KERN_INVALID_ADDRESS);
2618 }
2619
2620 need_wakeup = FALSE;
2621 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
2622 if (entry->in_transition) {
2623 /*
2624 * 1)
2625 * Another thread is wiring down this entry. Note
2626 * that if it is not for the other thread we would
2627 * be unwiring an unwired entry. This is not
2628 * permitted. If we wait, we will be unwiring memory
2629 * we did not wire.
2630 *
2631 * 2)
2632 * Another thread is unwiring this entry. We did not
2633 * have a reference to it, because if we did, this
2634 * entry will not be getting unwired now.
2635 */
2636 if (!user_wire)
2637 panic("vm_map_unwire: in_transition entry");
2638
2639 entry = entry->vme_next;
2640 continue;
2641 }
2642
2643 if(entry->is_sub_map) {
2644 vm_offset_t sub_start;
2645 vm_offset_t sub_end;
2646 vm_offset_t local_end;
2647 pmap_t pmap;
2648
2649
2650 vm_map_clip_start(map, entry, start);
2651 vm_map_clip_end(map, entry, end);
2652
2653 sub_start = entry->offset;
2654 sub_end = entry->vme_end - entry->vme_start;
2655 sub_end += entry->offset;
2656 local_end = entry->vme_end;
2657 if(map_pmap == NULL) {
2658 if(entry->use_pmap) {
2659 pmap = entry->object.sub_map->pmap;
2660 } else {
2661 pmap = map->pmap;
2662 }
2663 if (entry->wired_count == 0 ||
2664 (user_wire && entry->user_wired_count == 0)) {
2665 if (!user_wire)
2666 panic("vm_map_unwire: entry is unwired");
2667 entry = entry->vme_next;
2668 continue;
2669 }
2670
2671 /*
2672 * Check for holes
2673 * Holes: Next entry should be contiguous unless
2674 * this is the end of the region.
2675 */
2676 if (((entry->vme_end < end) &&
2677 ((entry->vme_next == vm_map_to_entry(map)) ||
2678 (entry->vme_next->vme_start
2679 > entry->vme_end)))) {
2680 if (!user_wire)
2681 panic("vm_map_unwire: non-contiguous region");
2682/*
2683 entry = entry->vme_next;
2684 continue;
2685*/
2686 }
2687
2688 if (!user_wire || (--entry->user_wired_count == 0))
2689 entry->wired_count--;
2690
2691 if (entry->wired_count != 0) {
2692 entry = entry->vme_next;
2693 continue;
2694 }
2695
2696 entry->in_transition = TRUE;
2697 tmp_entry = *entry;/* see comment in vm_map_wire() */
2698
2699 /*
2700 * We can unlock the map now. The in_transition state
2701 * guarantees existance of the entry.
2702 */
2703 vm_map_unlock(map);
2704 vm_map_unwire_nested(entry->object.sub_map,
2705 sub_start, sub_end, user_wire, pmap);
2706 vm_map_lock(map);
2707
2708 if (last_timestamp+1 != map->timestamp) {
2709 /*
2710 * Find the entry again. It could have been
2711 * clipped or deleted after we unlocked the map.
2712 */
2713 if (!vm_map_lookup_entry(map,
2714 tmp_entry.vme_start,
2715 &first_entry)) {
2716 if (!user_wire)
2717 panic("vm_map_unwire: re-lookup failed");
2718 entry = first_entry->vme_next;
2719 } else
2720 entry = first_entry;
2721 }
2722 last_timestamp = map->timestamp;
2723
2724 /*
2725 * clear transition bit for all constituent entries
2726 * that were in the original entry (saved in
2727 * tmp_entry). Also check for waiters.
2728 */
2729 while ((entry != vm_map_to_entry(map)) &&
2730 (entry->vme_start < tmp_entry.vme_end)) {
2731 assert(entry->in_transition);
2732 entry->in_transition = FALSE;
2733 if (entry->needs_wakeup) {
2734 entry->needs_wakeup = FALSE;
2735 need_wakeup = TRUE;
2736 }
2737 entry = entry->vme_next;
2738 }
2739 continue;
2740 } else {
2741 vm_map_unlock(map);
2742 vm_map_unwire_nested(entry->object.sub_map,
2743 sub_start, sub_end, user_wire, pmap);
2744 vm_map_lock(map);
2745
2746 if (last_timestamp+1 != map->timestamp) {
2747 /*
2748 * Find the entry again. It could have been
2749 * clipped or deleted after we unlocked the map.
2750 */
2751 if (!vm_map_lookup_entry(map,
2752 tmp_entry.vme_start,
2753 &first_entry)) {
2754 if (!user_wire)
2755 panic("vm_map_unwire: re-lookup failed");
2756 entry = first_entry->vme_next;
2757 } else
2758 entry = first_entry;
2759 }
2760 last_timestamp = map->timestamp;
2761 }
2762 }
2763
2764
2765 if (main_map && (entry->wired_count == 0 ||
2766 (user_wire && entry->user_wired_count == 0))) {
2767 if (!user_wire)
2768 panic("vm_map_unwire: entry is unwired");
2769
2770 entry = entry->vme_next;
2771 continue;
2772 }
2773
2774 assert(entry->wired_count > 0 &&
2775 (!user_wire || entry->user_wired_count > 0));
2776
2777 vm_map_clip_start(map, entry, start);
2778 vm_map_clip_end(map, entry, end);
2779
2780 /*
2781 * Check for holes
2782 * Holes: Next entry should be contiguous unless
2783 * this is the end of the region.
2784 */
2785 if (((entry->vme_end < end) &&
2786 ((entry->vme_next == vm_map_to_entry(map)) ||
2787 (entry->vme_next->vme_start > entry->vme_end)))) {
2788
2789 if (!user_wire)
2790 panic("vm_map_unwire: non-contiguous region");
2791 entry = entry->vme_next;
2792 continue;
2793 }
2794
2795 if(main_map) {
2796 if (!user_wire || (--entry->user_wired_count == 0))
2797 entry->wired_count--;
2798
2799 if (entry->wired_count != 0) {
2800 entry = entry->vme_next;
2801 continue;
2802 }
2803 }
2804
2805 entry->in_transition = TRUE;
2806 tmp_entry = *entry; /* see comment in vm_map_wire() */
2807
2808 /*
2809 * We can unlock the map now. The in_transition state
2810 * guarantees existance of the entry.
2811 */
2812 vm_map_unlock(map);
2813 if(map_pmap) {
2814 vm_fault_unwire(map, &tmp_entry, FALSE, map_pmap);
2815 } else {
2816 vm_fault_unwire(map, &tmp_entry, FALSE, map->pmap);
2817 }
2818 vm_map_lock(map);
2819
2820 if (last_timestamp+1 != map->timestamp) {
2821 /*
2822 * Find the entry again. It could have been clipped
2823 * or deleted after we unlocked the map.
2824 */
2825 if (!vm_map_lookup_entry(map, tmp_entry.vme_start,
2826 &first_entry)) {
2827 if (!user_wire)
2828 panic("vm_map_unwire: re-lookup failed");
2829 entry = first_entry->vme_next;
2830 } else
2831 entry = first_entry;
2832 }
2833 last_timestamp = map->timestamp;
2834
2835 /*
2836 * clear transition bit for all constituent entries that
2837 * were in the original entry (saved in tmp_entry). Also
2838 * check for waiters.
2839 */
2840 while ((entry != vm_map_to_entry(map)) &&
2841 (entry->vme_start < tmp_entry.vme_end)) {
2842 assert(entry->in_transition);
2843 entry->in_transition = FALSE;
2844 if (entry->needs_wakeup) {
2845 entry->needs_wakeup = FALSE;
2846 need_wakeup = TRUE;
2847 }
2848 entry = entry->vme_next;
2849 }
2850 }
2851 vm_map_unlock(map);
2852 /*
2853 * wake up anybody waiting on entries that we have unwired.
2854 */
2855 if (need_wakeup)
2856 vm_map_entry_wakeup(map);
2857 return(KERN_SUCCESS);
2858
2859}
2860
2861kern_return_t
2862vm_map_unwire(
2863 register vm_map_t map,
2864 register vm_offset_t start,
2865 register vm_offset_t end,
2866 boolean_t user_wire)
2867{
2868 return vm_map_unwire_nested(map, start, end, user_wire, (pmap_t)NULL);
2869}
2870
2871
2872/*
2873 * vm_map_entry_delete: [ internal use only ]
2874 *
2875 * Deallocate the given entry from the target map.
2876 */
2877void
2878vm_map_entry_delete(
2879 register vm_map_t map,
2880 register vm_map_entry_t entry)
2881{
2882 register vm_offset_t s, e;
2883 register vm_object_t object;
2884 register vm_map_t submap;
2885 extern vm_object_t kernel_object;
2886
2887 s = entry->vme_start;
2888 e = entry->vme_end;
2889 assert(page_aligned(s));
2890 assert(page_aligned(e));
2891 assert(entry->wired_count == 0);
2892 assert(entry->user_wired_count == 0);
2893
2894 if (entry->is_sub_map) {
2895 object = NULL;
2896 submap = entry->object.sub_map;
2897 } else {
2898 submap = NULL;
2899 object = entry->object.vm_object;
2900 }
2901
2902 vm_map_entry_unlink(map, entry);
2903 map->size -= e - s;
2904
2905 vm_map_entry_dispose(map, entry);
2906
2907 vm_map_unlock(map);
2908 /*
2909 * Deallocate the object only after removing all
2910 * pmap entries pointing to its pages.
2911 */
2912 if (submap)
2913 vm_map_deallocate(submap);
2914 else
2915 vm_object_deallocate(object);
2916
2917}
2918
2919void
2920vm_map_submap_pmap_clean(
2921 vm_map_t map,
2922 vm_offset_t start,
2923 vm_offset_t end,
2924 vm_map_t sub_map,
2925 vm_offset_t offset)
2926{
2927 vm_offset_t submap_start;
2928 vm_offset_t submap_end;
2929 vm_offset_t addr;
2930 vm_size_t remove_size;
2931 vm_map_entry_t entry;
2932
2933 submap_end = offset + (end - start);
2934 submap_start = offset;
2935 if(vm_map_lookup_entry(sub_map, offset, &entry)) {
2936
2937 remove_size = (entry->vme_end - entry->vme_start);
2938 if(offset > entry->vme_start)
2939 remove_size -= offset - entry->vme_start;
2940
2941
2942 if(submap_end < entry->vme_end) {
2943 remove_size -=
2944 entry->vme_end - submap_end;
2945 }
2946 if(entry->is_sub_map) {
2947 vm_map_submap_pmap_clean(
2948 sub_map,
2949 start,
2950 start + remove_size,
2951 entry->object.sub_map,
2952 entry->offset);
2953 } else {
2954 pmap_remove(map->pmap, start, start + remove_size);
2955 }
2956 }
2957
2958 entry = entry->vme_next;
2959
2960 while((entry != vm_map_to_entry(sub_map))
2961 && (entry->vme_start < submap_end)) {
2962 remove_size = (entry->vme_end - entry->vme_start);
2963 if(submap_end < entry->vme_end) {
2964 remove_size -= entry->vme_end - submap_end;
2965 }
2966 if(entry->is_sub_map) {
2967 vm_map_submap_pmap_clean(
2968 sub_map,
2969 (start + entry->vme_start) - offset,
2970 ((start + entry->vme_start) - offset) + remove_size,
2971 entry->object.sub_map,
2972 entry->offset);
2973 } else {
2974 pmap_remove(map->pmap,
2975 (start + entry->vme_start) - offset,
2976 ((start + entry->vme_start) - offset) + remove_size);
2977 }
2978 entry = entry->vme_next;
2979 }
2980 return;
2981}
2982
2983/*
2984 * vm_map_delete: [ internal use only ]
2985 *
2986 * Deallocates the given address range from the target map.
2987 * Removes all user wirings. Unwires one kernel wiring if
2988 * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go
2989 * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps
2990 * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set.
2991 *
2992 * This routine is called with map locked and leaves map locked.
2993 */
2994kern_return_t
2995vm_map_delete(
2996 register vm_map_t map,
2997 vm_offset_t start,
2998 register vm_offset_t end,
2999 int flags)
3000{
3001 vm_map_entry_t entry, next;
3002 struct vm_map_entry *first_entry, tmp_entry;
3003 register vm_offset_t s, e;
3004 register vm_object_t object;
3005 boolean_t need_wakeup;
3006 unsigned int last_timestamp = ~0; /* unlikely value */
3007 int interruptible;
3008 extern vm_map_t kernel_map;
3009
3010 interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ?
3011 THREAD_ABORTSAFE : THREAD_UNINT;
3012
3013 /*
3014 * All our DMA I/O operations in IOKit are currently done by
3015 * wiring through the map entries of the task requesting the I/O.
3016 * Because of this, we must always wait for kernel wirings
3017 * to go away on the entries before deleting them.
3018 *
3019 * Any caller who wants to actually remove a kernel wiring
3020 * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to
3021 * properly remove one wiring instead of blasting through
3022 * them all.
3023 */
3024 flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE;
3025
3026 /*
3027 * Find the start of the region, and clip it
3028 */
3029 if (vm_map_lookup_entry(map, start, &first_entry)) {
3030 entry = first_entry;
3031 vm_map_clip_start(map, entry, start);
3032
3033 /*
3034 * Fix the lookup hint now, rather than each
3035 * time through the loop.
3036 */
3037 SAVE_HINT(map, entry->vme_prev);
3038 } else {
3039 entry = first_entry->vme_next;
3040 }
3041
3042 need_wakeup = FALSE;
3043 /*
3044 * Step through all entries in this region
3045 */
3046 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
3047
3048 vm_map_clip_end(map, entry, end);
3049 if (entry->in_transition) {
3050 /*
3051 * Another thread is wiring/unwiring this entry.
3052 * Let the other thread know we are waiting.
3053 */
3054 s = entry->vme_start;
3055 entry->needs_wakeup = TRUE;
3056
3057 /*
3058 * wake up anybody waiting on entries that we have
3059 * already unwired/deleted.
3060 */
3061 if (need_wakeup) {
3062 vm_map_entry_wakeup(map);
3063 need_wakeup = FALSE;
3064 }
3065
3066 vm_map_entry_wait(map, interruptible);
3067
3068 if (interruptible &&
3069 current_thread()->wait_result == THREAD_INTERRUPTED)
3070 /*
3071 * We do not clear the needs_wakeup flag,
3072 * since we cannot tell if we were the only one.
3073 */
3074 return KERN_ABORTED;
3075
3076 vm_map_lock(map);
3077 /*
3078 * Cannot avoid a lookup here. reset timestamp.
3079 */
3080 last_timestamp = map->timestamp;
3081
3082 /*
3083 * The entry could have been clipped or it
3084 * may not exist anymore. Look it up again.
3085 */
3086 if (!vm_map_lookup_entry(map, s, &first_entry)) {
3087 assert((map != kernel_map) &&
3088 (!entry->is_sub_map));
3089 /*
3090 * User: use the next entry
3091 */
3092 entry = first_entry->vme_next;
3093 } else {
3094 entry = first_entry;
3095 SAVE_HINT(map, entry->vme_prev);
3096 }
3097 continue;
3098 } /* end in_transition */
3099
3100 if (entry->wired_count) {
3101 /*
3102 * Remove a kernel wiring if requested or if
3103 * there are user wirings.
3104 */
3105 if ((flags & VM_MAP_REMOVE_KUNWIRE) ||
3106 (entry->user_wired_count > 0))
3107 entry->wired_count--;
3108
3109 /* remove all user wire references */
3110 entry->user_wired_count = 0;
3111
3112 if (entry->wired_count != 0) {
3113 assert((map != kernel_map) &&
3114 (!entry->is_sub_map));
3115 /*
3116 * Cannot continue. Typical case is when
3117 * a user thread has physical io pending on
3118 * on this page. Either wait for the
3119 * kernel wiring to go away or return an
3120 * error.
3121 */
3122 if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) {
3123
3124 s = entry->vme_start;
3125 entry->needs_wakeup = TRUE;
3126 vm_map_entry_wait(map, interruptible);
3127
3128 if (interruptible &&
3129 current_thread()->wait_result ==
3130 THREAD_INTERRUPTED)
3131 /*
3132 * We do not clear the
3133 * needs_wakeup flag, since we
3134 * cannot tell if we were the
3135 * only one.
3136 */
3137 return KERN_ABORTED;
3138
3139 vm_map_lock(map);
3140 /*
3141 * Cannot avoid a lookup here. reset
3142 * timestamp.
3143 */
3144 last_timestamp = map->timestamp;
3145
3146 /*
3147 * The entry could have been clipped or
3148 * it may not exist anymore. Look it
3149 * up again.
3150 */
3151 if (!vm_map_lookup_entry(map, s,
3152 &first_entry)) {
3153 assert((map != kernel_map) &&
3154 (!entry->is_sub_map));
3155 /*
3156 * User: use the next entry
3157 */
3158 entry = first_entry->vme_next;
3159 } else {
3160 entry = first_entry;
3161 SAVE_HINT(map, entry->vme_prev);
3162 }
3163 continue;
3164 }
3165 else {
3166 return KERN_FAILURE;
3167 }
3168 }
3169
3170 entry->in_transition = TRUE;
3171 /*
3172 * copy current entry. see comment in vm_map_wire()
3173 */
3174 tmp_entry = *entry;
3175 s = entry->vme_start;
3176 e = entry->vme_end;
3177
3178 /*
3179 * We can unlock the map now. The in_transition
3180 * state guarentees existance of the entry.
3181 */
3182 vm_map_unlock(map);
3183 vm_fault_unwire(map, &tmp_entry,
3184 tmp_entry.object.vm_object == kernel_object,
3185 map->pmap);
3186 vm_map_lock(map);
3187
3188 if (last_timestamp+1 != map->timestamp) {
3189 /*
3190 * Find the entry again. It could have
3191 * been clipped after we unlocked the map.
3192 */
3193 if (!vm_map_lookup_entry(map, s, &first_entry)){
3194 assert((map != kernel_map) &&
3195 (!entry->is_sub_map));
3196 first_entry = first_entry->vme_next;
3197 } else {
3198 SAVE_HINT(map, entry->vme_prev);
3199 }
3200 } else {
3201 SAVE_HINT(map, entry->vme_prev);
3202 first_entry = entry;
3203 }
3204
3205 last_timestamp = map->timestamp;
3206
3207 entry = first_entry;
3208 while ((entry != vm_map_to_entry(map)) &&
3209 (entry->vme_start < tmp_entry.vme_end)) {
3210 assert(entry->in_transition);
3211 entry->in_transition = FALSE;
3212 if (entry->needs_wakeup) {
3213 entry->needs_wakeup = FALSE;
3214 need_wakeup = TRUE;
3215 }
3216 entry = entry->vme_next;
3217 }
3218 /*
3219 * We have unwired the entry(s). Go back and
3220 * delete them.
3221 */
3222 entry = first_entry;
3223 continue;
3224 }
3225
3226 /* entry is unwired */
3227 assert(entry->wired_count == 0);
3228 assert(entry->user_wired_count == 0);
3229
3230 if ((!entry->is_sub_map &&
3231 entry->object.vm_object != kernel_object) ||
3232 entry->is_sub_map) {
3233 if(entry->is_sub_map) {
3234 if(entry->use_pmap) {
3235#ifndef i386
3236 pmap_unnest(map->pmap, entry->vme_start,
3237 entry->vme_end - entry->vme_start);
3238#endif
3239 } else {
3240 vm_map_submap_pmap_clean(
3241 map, entry->vme_start, entry->vme_end,
3242 entry->object.sub_map,
3243 entry->offset);
3244 }
3245 } else {
3246 pmap_remove(map->pmap,
3247 entry->vme_start, entry->vme_end);
3248 }
3249 }
3250
3251 next = entry->vme_next;
3252 s = next->vme_start;
3253 last_timestamp = map->timestamp;
3254 vm_map_entry_delete(map, entry);
3255 /* vm_map_entry_delete unlocks the map */
3256 vm_map_lock(map);
3257 entry = next;
3258
3259 if(entry == vm_map_to_entry(map)) {
3260 break;
3261 }
3262 if (last_timestamp+1 != map->timestamp) {
3263 /*
3264 * we are responsible for deleting everything
3265 * from the give space, if someone has interfered
3266 * we pick up where we left off, back fills should
3267 * be all right for anyone except map_delete and
3268 * we have to assume that the task has been fully
3269 * disabled before we get here
3270 */
3271 if (!vm_map_lookup_entry(map, s, &entry)){
3272 entry = entry->vme_next;
3273 } else {
3274 SAVE_HINT(map, entry->vme_prev);
3275 }
3276 /*
3277 * others can not only allocate behind us, we can
3278 * also see coalesce while we don't have the map lock
3279 */
3280 if(entry == vm_map_to_entry(map)) {
3281 break;
3282 }
3283 vm_map_clip_start(map, entry, s);
3284 }
3285 last_timestamp = map->timestamp;
3286 }
3287
3288 if (map->wait_for_space)
3289 thread_wakeup((event_t) map);
3290 /*
3291 * wake up anybody waiting on entries that we have already deleted.
3292 */
3293 if (need_wakeup)
3294 vm_map_entry_wakeup(map);
3295
3296 return KERN_SUCCESS;
3297}
3298
3299/*
3300 * vm_map_remove:
3301 *
3302 * Remove the given address range from the target map.
3303 * This is the exported form of vm_map_delete.
3304 */
3305kern_return_t
3306vm_map_remove(
3307 register vm_map_t map,
3308 register vm_offset_t start,
3309 register vm_offset_t end,
3310 register boolean_t flags)
3311{
3312 register kern_return_t result;
3313
3314 vm_map_lock(map);
3315 VM_MAP_RANGE_CHECK(map, start, end);
3316 result = vm_map_delete(map, start, end, flags);
3317 vm_map_unlock(map);
3318
3319 return(result);
3320}
3321
3322
1c79356b
A
3323/*
3324 * Routine: vm_map_copy_discard
3325 *
3326 * Description:
3327 * Dispose of a map copy object (returned by
3328 * vm_map_copyin).
3329 */
3330void
3331vm_map_copy_discard(
3332 vm_map_copy_t copy)
3333{
3334 TR_DECL("vm_map_copy_discard");
3335
3336/* tr3("enter: copy 0x%x type %d", copy, copy->type);*/
3337free_next_copy:
3338 if (copy == VM_MAP_COPY_NULL)
3339 return;
3340
3341 switch (copy->type) {
3342 case VM_MAP_COPY_ENTRY_LIST:
3343 while (vm_map_copy_first_entry(copy) !=
3344 vm_map_copy_to_entry(copy)) {
3345 vm_map_entry_t entry = vm_map_copy_first_entry(copy);
3346
3347 vm_map_copy_entry_unlink(copy, entry);
3348 vm_object_deallocate(entry->object.vm_object);
3349 vm_map_copy_entry_dispose(copy, entry);
3350 }
3351 break;
3352 case VM_MAP_COPY_OBJECT:
3353 vm_object_deallocate(copy->cpy_object);
3354 break;
1c79356b
A
3355 case VM_MAP_COPY_KERNEL_BUFFER:
3356
3357 /*
3358 * The vm_map_copy_t and possibly the data buffer were
3359 * allocated by a single call to kalloc(), i.e. the
3360 * vm_map_copy_t was not allocated out of the zone.
3361 */
3362 kfree((vm_offset_t) copy, copy->cpy_kalloc_size);
3363 return;
3364 }
3365 zfree(vm_map_copy_zone, (vm_offset_t) copy);
3366}
3367
3368/*
3369 * Routine: vm_map_copy_copy
3370 *
3371 * Description:
3372 * Move the information in a map copy object to
3373 * a new map copy object, leaving the old one
3374 * empty.
3375 *
3376 * This is used by kernel routines that need
3377 * to look at out-of-line data (in copyin form)
3378 * before deciding whether to return SUCCESS.
3379 * If the routine returns FAILURE, the original
3380 * copy object will be deallocated; therefore,
3381 * these routines must make a copy of the copy
3382 * object and leave the original empty so that
3383 * deallocation will not fail.
3384 */
3385vm_map_copy_t
3386vm_map_copy_copy(
3387 vm_map_copy_t copy)
3388{
3389 vm_map_copy_t new_copy;
3390
3391 if (copy == VM_MAP_COPY_NULL)
3392 return VM_MAP_COPY_NULL;
3393
3394 /*
3395 * Allocate a new copy object, and copy the information
3396 * from the old one into it.
3397 */
3398
3399 new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
3400 *new_copy = *copy;
3401
3402 if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
3403 /*
3404 * The links in the entry chain must be
3405 * changed to point to the new copy object.
3406 */
3407 vm_map_copy_first_entry(copy)->vme_prev
3408 = vm_map_copy_to_entry(new_copy);
3409 vm_map_copy_last_entry(copy)->vme_next
3410 = vm_map_copy_to_entry(new_copy);
3411 }
3412
3413 /*
3414 * Change the old copy object into one that contains
3415 * nothing to be deallocated.
3416 */
3417 copy->type = VM_MAP_COPY_OBJECT;
3418 copy->cpy_object = VM_OBJECT_NULL;
3419
3420 /*
3421 * Return the new object.
3422 */
3423 return new_copy;
3424}
3425
1c79356b
A
3426kern_return_t
3427vm_map_overwrite_submap_recurse(
3428 vm_map_t dst_map,
3429 vm_offset_t dst_addr,
3430 vm_size_t dst_size)
3431{
3432 vm_offset_t dst_end;
3433 vm_map_entry_t tmp_entry;
3434 vm_map_entry_t entry;
3435 kern_return_t result;
3436 boolean_t encountered_sub_map = FALSE;
3437
3438
3439
3440 /*
3441 * Verify that the destination is all writeable
3442 * initially. We have to trunc the destination
3443 * address and round the copy size or we'll end up
3444 * splitting entries in strange ways.
3445 */
3446
3447 dst_end = round_page(dst_addr + dst_size);
3448
3449start_pass_1:
3450 vm_map_lock(dst_map);
3451 if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
3452 vm_map_unlock(dst_map);
3453 return(KERN_INVALID_ADDRESS);
3454 }
3455
3456 vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
3457
3458 for (entry = tmp_entry;;) {
3459 vm_map_entry_t next;
3460
3461 next = entry->vme_next;
3462 while(entry->is_sub_map) {
3463 vm_offset_t sub_start;
3464 vm_offset_t sub_end;
3465 vm_offset_t local_end;
3466
3467 if (entry->in_transition) {
3468 /*
3469 * Say that we are waiting, and wait for entry.
3470 */
3471 entry->needs_wakeup = TRUE;
3472 vm_map_entry_wait(dst_map, THREAD_UNINT);
3473
3474 goto start_pass_1;
3475 }
3476
3477 encountered_sub_map = TRUE;
3478 sub_start = entry->offset;
3479
3480 if(entry->vme_end < dst_end)
3481 sub_end = entry->vme_end;
3482 else
3483 sub_end = dst_end;
3484 sub_end -= entry->vme_start;
3485 sub_end += entry->offset;
3486 local_end = entry->vme_end;
3487 vm_map_unlock(dst_map);
3488
3489 result = vm_map_overwrite_submap_recurse(
3490 entry->object.sub_map,
3491 sub_start,
3492 sub_end - sub_start);
3493
3494 if(result != KERN_SUCCESS)
3495 return result;
3496 if (dst_end <= entry->vme_end)
3497 return KERN_SUCCESS;
3498 vm_map_lock(dst_map);
3499 if(!vm_map_lookup_entry(dst_map, local_end,
3500 &tmp_entry)) {
3501 vm_map_unlock(dst_map);
3502 return(KERN_INVALID_ADDRESS);
3503 }
3504 entry = tmp_entry;
3505 next = entry->vme_next;
3506 }
3507
3508 if ( ! (entry->protection & VM_PROT_WRITE)) {
3509 vm_map_unlock(dst_map);
3510 return(KERN_PROTECTION_FAILURE);
3511 }
3512
3513 /*
3514 * If the entry is in transition, we must wait
3515 * for it to exit that state. Anything could happen
3516 * when we unlock the map, so start over.
3517 */
3518 if (entry->in_transition) {
3519
3520 /*
3521 * Say that we are waiting, and wait for entry.
3522 */
3523 entry->needs_wakeup = TRUE;
3524 vm_map_entry_wait(dst_map, THREAD_UNINT);
3525
3526 goto start_pass_1;
3527 }
3528
3529/*
3530 * our range is contained completely within this map entry
3531 */
3532 if (dst_end <= entry->vme_end) {
3533 vm_map_unlock(dst_map);
3534 return KERN_SUCCESS;
3535 }
3536/*
3537 * check that range specified is contiguous region
3538 */
3539 if ((next == vm_map_to_entry(dst_map)) ||
3540 (next->vme_start != entry->vme_end)) {
3541 vm_map_unlock(dst_map);
3542 return(KERN_INVALID_ADDRESS);
3543 }
3544
3545 /*
3546 * Check for permanent objects in the destination.
3547 */
3548 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
3549 ((!entry->object.vm_object->internal) ||
3550 (entry->object.vm_object->true_share))) {
3551 if(encountered_sub_map) {
3552 vm_map_unlock(dst_map);
3553 return(KERN_FAILURE);
3554 }
3555 }
3556
3557
3558 entry = next;
3559 }/* for */
3560 vm_map_unlock(dst_map);
3561 return(KERN_SUCCESS);
3562}
3563
3564/*
3565 * Routine: vm_map_copy_overwrite
3566 *
3567 * Description:
3568 * Copy the memory described by the map copy
3569 * object (copy; returned by vm_map_copyin) onto
3570 * the specified destination region (dst_map, dst_addr).
3571 * The destination must be writeable.
3572 *
3573 * Unlike vm_map_copyout, this routine actually
3574 * writes over previously-mapped memory. If the
3575 * previous mapping was to a permanent (user-supplied)
3576 * memory object, it is preserved.
3577 *
3578 * The attributes (protection and inheritance) of the
3579 * destination region are preserved.
3580 *
3581 * If successful, consumes the copy object.
3582 * Otherwise, the caller is responsible for it.
3583 *
3584 * Implementation notes:
3585 * To overwrite aligned temporary virtual memory, it is
3586 * sufficient to remove the previous mapping and insert
3587 * the new copy. This replacement is done either on
3588 * the whole region (if no permanent virtual memory
3589 * objects are embedded in the destination region) or
3590 * in individual map entries.
3591 *
3592 * To overwrite permanent virtual memory , it is necessary
3593 * to copy each page, as the external memory management
3594 * interface currently does not provide any optimizations.
3595 *
3596 * Unaligned memory also has to be copied. It is possible
3597 * to use 'vm_trickery' to copy the aligned data. This is
3598 * not done but not hard to implement.
3599 *
3600 * Once a page of permanent memory has been overwritten,
3601 * it is impossible to interrupt this function; otherwise,
3602 * the call would be neither atomic nor location-independent.
3603 * The kernel-state portion of a user thread must be
3604 * interruptible.
3605 *
3606 * It may be expensive to forward all requests that might
3607 * overwrite permanent memory (vm_write, vm_copy) to
3608 * uninterruptible kernel threads. This routine may be
3609 * called by interruptible threads; however, success is
3610 * not guaranteed -- if the request cannot be performed
3611 * atomically and interruptibly, an error indication is
3612 * returned.
3613 */
3614
3615kern_return_t
3616vm_map_copy_overwrite_nested(
3617 vm_map_t dst_map,
3618 vm_offset_t dst_addr,
3619 vm_map_copy_t copy,
3620 boolean_t interruptible,
3621 pmap_t pmap)
3622{
3623 vm_offset_t dst_end;
3624 vm_map_entry_t tmp_entry;
3625 vm_map_entry_t entry;
3626 kern_return_t kr;
3627 boolean_t aligned = TRUE;
3628 boolean_t contains_permanent_objects = FALSE;
3629 boolean_t encountered_sub_map = FALSE;
3630 vm_offset_t base_addr;
3631 vm_size_t copy_size;
3632 vm_size_t total_size;
3633
3634
3635 /*
3636 * Check for null copy object.
3637 */
3638
3639 if (copy == VM_MAP_COPY_NULL)
3640 return(KERN_SUCCESS);
3641
3642 /*
3643 * Check for special kernel buffer allocated
3644 * by new_ipc_kmsg_copyin.
3645 */
3646
3647 if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
0b4e3aa0
A
3648 return(vm_map_copyout_kernel_buffer(
3649 dst_map, &dst_addr,
3650 copy, TRUE));
1c79356b
A
3651 }
3652
3653 /*
3654 * Only works for entry lists at the moment. Will
3655 * support page lists later.
3656 */
3657
3658 assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
3659
3660 if (copy->size == 0) {
3661 vm_map_copy_discard(copy);
3662 return(KERN_SUCCESS);
3663 }
3664
3665 /*
3666 * Verify that the destination is all writeable
3667 * initially. We have to trunc the destination
3668 * address and round the copy size or we'll end up
3669 * splitting entries in strange ways.
3670 */
3671
3672 if (!page_aligned(copy->size) ||
3673 !page_aligned (copy->offset) ||
3674 !page_aligned (dst_addr))
3675 {
3676 aligned = FALSE;
3677 dst_end = round_page(dst_addr + copy->size);
3678 } else {
3679 dst_end = dst_addr + copy->size;
3680 }
3681
3682start_pass_1:
3683 vm_map_lock(dst_map);
3684 if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
3685 vm_map_unlock(dst_map);
3686 return(KERN_INVALID_ADDRESS);
3687 }
3688 vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
3689 for (entry = tmp_entry;;) {
3690 vm_map_entry_t next = entry->vme_next;
3691
3692 while(entry->is_sub_map) {
3693 vm_offset_t sub_start;
3694 vm_offset_t sub_end;
3695 vm_offset_t local_end;
3696
3697 if (entry->in_transition) {
3698
3699 /*
3700 * Say that we are waiting, and wait for entry.
3701 */
3702 entry->needs_wakeup = TRUE;
3703 vm_map_entry_wait(dst_map, THREAD_UNINT);
3704
3705 goto start_pass_1;
3706 }
3707
3708 local_end = entry->vme_end;
3709 if (!(entry->needs_copy)) {
3710 /* if needs_copy we are a COW submap */
3711 /* in such a case we just replace so */
3712 /* there is no need for the follow- */
3713 /* ing check. */
3714 encountered_sub_map = TRUE;
3715 sub_start = entry->offset;
3716
3717 if(entry->vme_end < dst_end)
3718 sub_end = entry->vme_end;
3719 else
3720 sub_end = dst_end;
3721 sub_end -= entry->vme_start;
3722 sub_end += entry->offset;
3723 vm_map_unlock(dst_map);
3724
3725 kr = vm_map_overwrite_submap_recurse(
3726 entry->object.sub_map,
3727 sub_start,
3728 sub_end - sub_start);
3729 if(kr != KERN_SUCCESS)
3730 return kr;
3731 vm_map_lock(dst_map);
3732 }
3733
3734 if (dst_end <= entry->vme_end)
3735 goto start_overwrite;
3736 if(!vm_map_lookup_entry(dst_map, local_end,
3737 &entry)) {
3738 vm_map_unlock(dst_map);
3739 return(KERN_INVALID_ADDRESS);
3740 }
3741 next = entry->vme_next;
3742 }
3743
3744 if ( ! (entry->protection & VM_PROT_WRITE)) {
3745 vm_map_unlock(dst_map);
3746 return(KERN_PROTECTION_FAILURE);
3747 }
3748
3749 /*
3750 * If the entry is in transition, we must wait
3751 * for it to exit that state. Anything could happen
3752 * when we unlock the map, so start over.
3753 */
3754 if (entry->in_transition) {
3755
3756 /*
3757 * Say that we are waiting, and wait for entry.
3758 */
3759 entry->needs_wakeup = TRUE;
3760 vm_map_entry_wait(dst_map, THREAD_UNINT);
3761
3762 goto start_pass_1;
3763 }
3764
3765/*
3766 * our range is contained completely within this map entry
3767 */
3768 if (dst_end <= entry->vme_end)
3769 break;
3770/*
3771 * check that range specified is contiguous region
3772 */
3773 if ((next == vm_map_to_entry(dst_map)) ||
3774 (next->vme_start != entry->vme_end)) {
3775 vm_map_unlock(dst_map);
3776 return(KERN_INVALID_ADDRESS);
3777 }
3778
3779
3780 /*
3781 * Check for permanent objects in the destination.
3782 */
3783 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
3784 ((!entry->object.vm_object->internal) ||
3785 (entry->object.vm_object->true_share))) {
3786 contains_permanent_objects = TRUE;
3787 }
3788
3789 entry = next;
3790 }/* for */
3791
3792start_overwrite:
3793 /*
3794 * If there are permanent objects in the destination, then
3795 * the copy cannot be interrupted.
3796 */
3797
3798 if (interruptible && contains_permanent_objects) {
3799 vm_map_unlock(dst_map);
3800 return(KERN_FAILURE); /* XXX */
3801 }
3802
3803 /*
3804 *
3805 * Make a second pass, overwriting the data
3806 * At the beginning of each loop iteration,
3807 * the next entry to be overwritten is "tmp_entry"
3808 * (initially, the value returned from the lookup above),
3809 * and the starting address expected in that entry
3810 * is "start".
3811 */
3812
3813 total_size = copy->size;
3814 if(encountered_sub_map) {
3815 copy_size = 0;
3816 /* re-calculate tmp_entry since we've had the map */
3817 /* unlocked */
3818 if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) {
3819 vm_map_unlock(dst_map);
3820 return(KERN_INVALID_ADDRESS);
3821 }
3822 } else {
3823 copy_size = copy->size;
3824 }
3825
3826 base_addr = dst_addr;
3827 while(TRUE) {
3828 /* deconstruct the copy object and do in parts */
3829 /* only in sub_map, interruptable case */
3830 vm_map_entry_t copy_entry;
3831 vm_map_entry_t previous_prev;
3832 vm_map_entry_t next_copy;
3833 int nentries;
3834 int remaining_entries;
3835 int new_offset;
3836
3837 for (entry = tmp_entry; copy_size == 0;) {
3838 vm_map_entry_t next;
3839
3840 next = entry->vme_next;
3841
3842 /* tmp_entry and base address are moved along */
3843 /* each time we encounter a sub-map. Otherwise */
3844 /* entry can outpase tmp_entry, and the copy_size */
3845 /* may reflect the distance between them */
3846 /* if the current entry is found to be in transition */
3847 /* we will start over at the beginning or the last */
3848 /* encounter of a submap as dictated by base_addr */
3849 /* we will zero copy_size accordingly. */
3850 if (entry->in_transition) {
3851 /*
3852 * Say that we are waiting, and wait for entry.
3853 */
3854 entry->needs_wakeup = TRUE;
3855 vm_map_entry_wait(dst_map, THREAD_UNINT);
3856
3857 vm_map_lock(dst_map);
3858 if(!vm_map_lookup_entry(dst_map, base_addr,
3859 &tmp_entry)) {
3860 vm_map_unlock(dst_map);
3861 return(KERN_INVALID_ADDRESS);
3862 }
3863 copy_size = 0;
3864 entry = tmp_entry;
3865 continue;
3866 }
3867 if(entry->is_sub_map) {
3868 vm_offset_t sub_start;
3869 vm_offset_t sub_end;
3870 vm_offset_t local_end;
3871
3872 if (entry->needs_copy) {
3873 /* if this is a COW submap */
3874 /* just back the range with a */
3875 /* anonymous entry */
3876 if(entry->vme_end < dst_end)
3877 sub_end = entry->vme_end;
3878 else
3879 sub_end = dst_end;
3880 if(entry->vme_start < base_addr)
3881 sub_start = base_addr;
3882 else
3883 sub_start = entry->vme_start;
3884 vm_map_clip_end(
3885 dst_map, entry, sub_end);
3886 vm_map_clip_start(
3887 dst_map, entry, sub_start);
3888 entry->is_sub_map = FALSE;
3889 vm_map_deallocate(
3890 entry->object.sub_map);
3891 entry->object.sub_map = NULL;
3892 entry->is_shared = FALSE;
3893 entry->needs_copy = FALSE;
3894 entry->offset = 0;
3895 entry->protection = VM_PROT_ALL;
3896 entry->max_protection = VM_PROT_ALL;
3897 entry->wired_count = 0;
3898 entry->user_wired_count = 0;
3899 if(entry->inheritance
3900 == VM_INHERIT_SHARE)
3901 entry->inheritance = VM_INHERIT_COPY;
3902 continue;
3903 }
3904 /* first take care of any non-sub_map */
3905 /* entries to send */
3906 if(base_addr < entry->vme_start) {
3907 /* stuff to send */
3908 copy_size =
3909 entry->vme_start - base_addr;
3910 break;
3911 }
3912 sub_start = entry->offset;
3913
3914 if(entry->vme_end < dst_end)
3915 sub_end = entry->vme_end;
3916 else
3917 sub_end = dst_end;
3918 sub_end -= entry->vme_start;
3919 sub_end += entry->offset;
3920 local_end = entry->vme_end;
3921 vm_map_unlock(dst_map);
3922 copy_size = sub_end - sub_start;
3923
3924 /* adjust the copy object */
3925 if (total_size > copy_size) {
3926 vm_size_t local_size = 0;
3927 vm_size_t entry_size;
3928
3929 nentries = 1;
3930 new_offset = copy->offset;
3931 copy_entry = vm_map_copy_first_entry(copy);
3932 while(copy_entry !=
3933 vm_map_copy_to_entry(copy)){
3934 entry_size = copy_entry->vme_end -
3935 copy_entry->vme_start;
3936 if((local_size < copy_size) &&
3937 ((local_size + entry_size)
3938 >= copy_size)) {
3939 vm_map_copy_clip_end(copy,
3940 copy_entry,
3941 copy_entry->vme_start +
3942 (copy_size - local_size));
3943 entry_size = copy_entry->vme_end -
3944 copy_entry->vme_start;
3945 local_size += entry_size;
3946 new_offset += entry_size;
3947 }
3948 if(local_size >= copy_size) {
3949 next_copy = copy_entry->vme_next;
3950 copy_entry->vme_next =
3951 vm_map_copy_to_entry(copy);
3952 previous_prev =
3953 copy->cpy_hdr.links.prev;
3954 copy->cpy_hdr.links.prev = copy_entry;
3955 copy->size = copy_size;
3956 remaining_entries =
3957 copy->cpy_hdr.nentries;
3958 remaining_entries -= nentries;
3959 copy->cpy_hdr.nentries = nentries;
3960 break;
3961 } else {
3962 local_size += entry_size;
3963 new_offset += entry_size;
3964 nentries++;
3965 }
3966 copy_entry = copy_entry->vme_next;
3967 }
3968 }
3969
3970 if((entry->use_pmap) && (pmap == NULL)) {
3971 kr = vm_map_copy_overwrite_nested(
3972 entry->object.sub_map,
3973 sub_start,
3974 copy,
3975 interruptible,
3976 entry->object.sub_map->pmap);
3977 } else if (pmap != NULL) {
3978 kr = vm_map_copy_overwrite_nested(
3979 entry->object.sub_map,
3980 sub_start,
3981 copy,
3982 interruptible, pmap);
3983 } else {
3984 kr = vm_map_copy_overwrite_nested(
3985 entry->object.sub_map,
3986 sub_start,
3987 copy,
3988 interruptible,
3989 dst_map->pmap);
3990 }
3991 if(kr != KERN_SUCCESS) {
3992 if(next_copy != NULL) {
3993 copy->cpy_hdr.nentries +=
3994 remaining_entries;
3995 copy->cpy_hdr.links.prev->vme_next =
3996 next_copy;
3997 copy->cpy_hdr.links.prev
3998 = previous_prev;
3999 copy->size = total_size;
4000 }
4001 return kr;
4002 }
4003 if (dst_end <= local_end) {
4004 return(KERN_SUCCESS);
4005 }
4006 /* otherwise copy no longer exists, it was */
4007 /* destroyed after successful copy_overwrite */
4008 copy = (vm_map_copy_t)
4009 zalloc(vm_map_copy_zone);
4010 vm_map_copy_first_entry(copy) =
4011 vm_map_copy_last_entry(copy) =
4012 vm_map_copy_to_entry(copy);
4013 copy->type = VM_MAP_COPY_ENTRY_LIST;
4014 copy->offset = new_offset;
4015
4016 total_size -= copy_size;
4017 copy_size = 0;
4018 /* put back remainder of copy in container */
4019 if(next_copy != NULL) {
4020 copy->cpy_hdr.nentries = remaining_entries;
4021 copy->cpy_hdr.links.next = next_copy;
4022 copy->cpy_hdr.links.prev = previous_prev;
4023 copy->size = total_size;
4024 next_copy->vme_prev =
4025 vm_map_copy_to_entry(copy);
4026 next_copy = NULL;
4027 }
4028 base_addr = local_end;
4029 vm_map_lock(dst_map);
4030 if(!vm_map_lookup_entry(dst_map,
4031 local_end, &tmp_entry)) {
4032 vm_map_unlock(dst_map);
4033 return(KERN_INVALID_ADDRESS);
4034 }
4035 entry = tmp_entry;
4036 continue;
4037 }
4038 if (dst_end <= entry->vme_end) {
4039 copy_size = dst_end - base_addr;
4040 break;
4041 }
4042
4043 if ((next == vm_map_to_entry(dst_map)) ||
4044 (next->vme_start != entry->vme_end)) {
4045 vm_map_unlock(dst_map);
4046 return(KERN_INVALID_ADDRESS);
4047 }
4048
4049 entry = next;
4050 }/* for */
4051
4052 next_copy = NULL;
4053 nentries = 1;
4054
4055 /* adjust the copy object */
4056 if (total_size > copy_size) {
4057 vm_size_t local_size = 0;
4058 vm_size_t entry_size;
4059
4060 new_offset = copy->offset;
4061 copy_entry = vm_map_copy_first_entry(copy);
4062 while(copy_entry != vm_map_copy_to_entry(copy)) {
4063 entry_size = copy_entry->vme_end -
4064 copy_entry->vme_start;
4065 if((local_size < copy_size) &&
4066 ((local_size + entry_size)
4067 >= copy_size)) {
4068 vm_map_copy_clip_end(copy, copy_entry,
4069 copy_entry->vme_start +
4070 (copy_size - local_size));
4071 entry_size = copy_entry->vme_end -
4072 copy_entry->vme_start;
4073 local_size += entry_size;
4074 new_offset += entry_size;
4075 }
4076 if(local_size >= copy_size) {
4077 next_copy = copy_entry->vme_next;
4078 copy_entry->vme_next =
4079 vm_map_copy_to_entry(copy);
4080 previous_prev =
4081 copy->cpy_hdr.links.prev;
4082 copy->cpy_hdr.links.prev = copy_entry;
4083 copy->size = copy_size;
4084 remaining_entries =
4085 copy->cpy_hdr.nentries;
4086 remaining_entries -= nentries;
4087 copy->cpy_hdr.nentries = nentries;
4088 break;
4089 } else {
4090 local_size += entry_size;
4091 new_offset += entry_size;
4092 nentries++;
4093 }
4094 copy_entry = copy_entry->vme_next;
4095 }
4096 }
4097
4098 if (aligned) {
4099 pmap_t local_pmap;
4100
4101 if(pmap)
4102 local_pmap = pmap;
4103 else
4104 local_pmap = dst_map->pmap;
4105
4106 if ((kr = vm_map_copy_overwrite_aligned(
4107 dst_map, tmp_entry, copy,
4108 base_addr, local_pmap)) != KERN_SUCCESS) {
4109 if(next_copy != NULL) {
4110 copy->cpy_hdr.nentries +=
4111 remaining_entries;
4112 copy->cpy_hdr.links.prev->vme_next =
4113 next_copy;
4114 copy->cpy_hdr.links.prev =
4115 previous_prev;
4116 copy->size += copy_size;
4117 }
4118 return kr;
4119 }
4120 vm_map_unlock(dst_map);
4121 } else {
4122 /*
4123 * Performance gain:
4124 *
4125 * if the copy and dst address are misaligned but the same
4126 * offset within the page we can copy_not_aligned the
4127 * misaligned parts and copy aligned the rest. If they are
4128 * aligned but len is unaligned we simply need to copy
4129 * the end bit unaligned. We'll need to split the misaligned
4130 * bits of the region in this case !
4131 */
4132 /* ALWAYS UNLOCKS THE dst_map MAP */
4133 if ((kr = vm_map_copy_overwrite_unaligned( dst_map,
4134 tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
4135 if(next_copy != NULL) {
4136 copy->cpy_hdr.nentries +=
4137 remaining_entries;
4138 copy->cpy_hdr.links.prev->vme_next =
4139 next_copy;
4140 copy->cpy_hdr.links.prev =
4141 previous_prev;
4142 copy->size += copy_size;
4143 }
4144 return kr;
4145 }
4146 }
4147 total_size -= copy_size;
4148 if(total_size == 0)
4149 break;
4150 base_addr += copy_size;
4151 copy_size = 0;
4152 copy->offset = new_offset;
4153 if(next_copy != NULL) {
4154 copy->cpy_hdr.nentries = remaining_entries;
4155 copy->cpy_hdr.links.next = next_copy;
4156 copy->cpy_hdr.links.prev = previous_prev;
4157 next_copy->vme_prev = vm_map_copy_to_entry(copy);
4158 copy->size = total_size;
4159 }
4160 vm_map_lock(dst_map);
4161 while(TRUE) {
4162 if (!vm_map_lookup_entry(dst_map,
4163 base_addr, &tmp_entry)) {
4164 vm_map_unlock(dst_map);
4165 return(KERN_INVALID_ADDRESS);
4166 }
4167 if (tmp_entry->in_transition) {
4168 entry->needs_wakeup = TRUE;
4169 vm_map_entry_wait(dst_map, THREAD_UNINT);
4170 } else {
4171 break;
4172 }
4173 }
4174 vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr));
4175
4176 entry = tmp_entry;
4177 } /* while */
4178
4179 /*
4180 * Throw away the vm_map_copy object
4181 */
4182 vm_map_copy_discard(copy);
4183
4184 return(KERN_SUCCESS);
4185}/* vm_map_copy_overwrite */
4186
4187kern_return_t
4188vm_map_copy_overwrite(
4189 vm_map_t dst_map,
4190 vm_offset_t dst_addr,
4191 vm_map_copy_t copy,
4192 boolean_t interruptible)
4193{
4194 return vm_map_copy_overwrite_nested(
4195 dst_map, dst_addr, copy, interruptible, (pmap_t) NULL);
4196}
4197
4198
4199/*
4200 * Routine: vm_map_copy_overwrite_unaligned
4201 *
4202 * Decription:
4203 * Physically copy unaligned data
4204 *
4205 * Implementation:
4206 * Unaligned parts of pages have to be physically copied. We use
4207 * a modified form of vm_fault_copy (which understands none-aligned
4208 * page offsets and sizes) to do the copy. We attempt to copy as
4209 * much memory in one go as possibly, however vm_fault_copy copies
4210 * within 1 memory object so we have to find the smaller of "amount left"
4211 * "source object data size" and "target object data size". With
4212 * unaligned data we don't need to split regions, therefore the source
4213 * (copy) object should be one map entry, the target range may be split
4214 * over multiple map entries however. In any event we are pessimistic
4215 * about these assumptions.
4216 *
4217 * Assumptions:
4218 * dst_map is locked on entry and is return locked on success,
4219 * unlocked on error.
4220 */
4221
4222kern_return_t
4223vm_map_copy_overwrite_unaligned(
4224 vm_map_t dst_map,
4225 vm_map_entry_t entry,
4226 vm_map_copy_t copy,
4227 vm_offset_t start)
4228{
4229 vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
4230 vm_map_version_t version;
4231 vm_object_t dst_object;
4232 vm_object_offset_t dst_offset;
4233 vm_object_offset_t src_offset;
4234 vm_object_offset_t entry_offset;
4235 vm_offset_t entry_end;
4236 vm_size_t src_size,
4237 dst_size,
4238 copy_size,
4239 amount_left;
4240 kern_return_t kr = KERN_SUCCESS;
4241
4242 vm_map_lock_write_to_read(dst_map);
4243
4244 src_offset = copy->offset - trunc_page_64(copy->offset);
4245 amount_left = copy->size;
4246/*
4247 * unaligned so we never clipped this entry, we need the offset into
4248 * the vm_object not just the data.
4249 */
4250 while (amount_left > 0) {
4251
4252 if (entry == vm_map_to_entry(dst_map)) {
4253 vm_map_unlock_read(dst_map);
4254 return KERN_INVALID_ADDRESS;
4255 }
4256
4257 /* "start" must be within the current map entry */
4258 assert ((start>=entry->vme_start) && (start<entry->vme_end));
4259
4260 dst_offset = start - entry->vme_start;
4261
4262 dst_size = entry->vme_end - start;
4263
4264 src_size = copy_entry->vme_end -
4265 (copy_entry->vme_start + src_offset);
4266
4267 if (dst_size < src_size) {
4268/*
4269 * we can only copy dst_size bytes before
4270 * we have to get the next destination entry
4271 */
4272 copy_size = dst_size;
4273 } else {
4274/*
4275 * we can only copy src_size bytes before
4276 * we have to get the next source copy entry
4277 */
4278 copy_size = src_size;
4279 }
4280
4281 if (copy_size > amount_left) {
4282 copy_size = amount_left;
4283 }
4284/*
4285 * Entry needs copy, create a shadow shadow object for
4286 * Copy on write region.
4287 */
4288 if (entry->needs_copy &&
4289 ((entry->protection & VM_PROT_WRITE) != 0))
4290 {
4291 if (vm_map_lock_read_to_write(dst_map)) {
4292 vm_map_lock_read(dst_map);
4293 goto RetryLookup;
4294 }
4295 vm_object_shadow(&entry->object.vm_object,
4296 &entry->offset,
4297 (vm_size_t)(entry->vme_end
4298 - entry->vme_start));
4299 entry->needs_copy = FALSE;
4300 vm_map_lock_write_to_read(dst_map);
4301 }
4302 dst_object = entry->object.vm_object;
4303/*
4304 * unlike with the virtual (aligned) copy we're going
4305 * to fault on it therefore we need a target object.
4306 */
4307 if (dst_object == VM_OBJECT_NULL) {
4308 if (vm_map_lock_read_to_write(dst_map)) {
4309 vm_map_lock_read(dst_map);
4310 goto RetryLookup;
4311 }
4312 dst_object = vm_object_allocate((vm_size_t)
4313 entry->vme_end - entry->vme_start);
4314 entry->object.vm_object = dst_object;
4315 entry->offset = 0;
4316 vm_map_lock_write_to_read(dst_map);
4317 }
4318/*
4319 * Take an object reference and unlock map. The "entry" may
4320 * disappear or change when the map is unlocked.
4321 */
4322 vm_object_reference(dst_object);
4323 version.main_timestamp = dst_map->timestamp;
4324 entry_offset = entry->offset;
4325 entry_end = entry->vme_end;
4326 vm_map_unlock_read(dst_map);
4327/*
4328 * Copy as much as possible in one pass
4329 */
4330 kr = vm_fault_copy(
4331 copy_entry->object.vm_object,
4332 copy_entry->offset + src_offset,
4333 &copy_size,
4334 dst_object,
4335 entry_offset + dst_offset,
4336 dst_map,
4337 &version,
4338 THREAD_UNINT );
4339
4340 start += copy_size;
4341 src_offset += copy_size;
4342 amount_left -= copy_size;
4343/*
4344 * Release the object reference
4345 */
4346 vm_object_deallocate(dst_object);
4347/*
4348 * If a hard error occurred, return it now
4349 */
4350 if (kr != KERN_SUCCESS)
4351 return kr;
4352
4353 if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
4354 || amount_left == 0)
4355 {
4356/*
4357 * all done with this copy entry, dispose.
4358 */
4359 vm_map_copy_entry_unlink(copy, copy_entry);
4360 vm_object_deallocate(copy_entry->object.vm_object);
4361 vm_map_copy_entry_dispose(copy, copy_entry);
4362
4363 if ((copy_entry = vm_map_copy_first_entry(copy))
4364 == vm_map_copy_to_entry(copy) && amount_left) {
4365/*
4366 * not finished copying but run out of source
4367 */
4368 return KERN_INVALID_ADDRESS;
4369 }
4370 src_offset = 0;
4371 }
4372
4373 if (amount_left == 0)
4374 return KERN_SUCCESS;
4375
4376 vm_map_lock_read(dst_map);
4377 if (version.main_timestamp == dst_map->timestamp) {
4378 if (start == entry_end) {
4379/*
4380 * destination region is split. Use the version
4381 * information to avoid a lookup in the normal
4382 * case.
4383 */
4384 entry = entry->vme_next;
4385/*
4386 * should be contiguous. Fail if we encounter
4387 * a hole in the destination.
4388 */
4389 if (start != entry->vme_start) {
4390 vm_map_unlock_read(dst_map);
4391 return KERN_INVALID_ADDRESS ;
4392 }
4393 }
4394 } else {
4395/*
4396 * Map version check failed.
4397 * we must lookup the entry because somebody
4398 * might have changed the map behind our backs.
4399 */
4400RetryLookup:
4401 if (!vm_map_lookup_entry(dst_map, start, &entry))
4402 {
4403 vm_map_unlock_read(dst_map);
4404 return KERN_INVALID_ADDRESS ;
4405 }
4406 }
4407 }/* while */
4408
4409 /* NOTREACHED ?? */
4410 vm_map_unlock_read(dst_map);
4411
4412 return KERN_SUCCESS;
4413}/* vm_map_copy_overwrite_unaligned */
4414
4415/*
4416 * Routine: vm_map_copy_overwrite_aligned
4417 *
4418 * Description:
4419 * Does all the vm_trickery possible for whole pages.
4420 *
4421 * Implementation:
4422 *
4423 * If there are no permanent objects in the destination,
4424 * and the source and destination map entry zones match,
4425 * and the destination map entry is not shared,
4426 * then the map entries can be deleted and replaced
4427 * with those from the copy. The following code is the
4428 * basic idea of what to do, but there are lots of annoying
4429 * little details about getting protection and inheritance
4430 * right. Should add protection, inheritance, and sharing checks
4431 * to the above pass and make sure that no wiring is involved.
4432 */
4433
4434kern_return_t
4435vm_map_copy_overwrite_aligned(
4436 vm_map_t dst_map,
4437 vm_map_entry_t tmp_entry,
4438 vm_map_copy_t copy,
4439 vm_offset_t start,
4440 pmap_t pmap)
4441{
4442 vm_object_t object;
4443 vm_map_entry_t copy_entry;
4444 vm_size_t copy_size;
4445 vm_size_t size;
4446 vm_map_entry_t entry;
4447
4448 while ((copy_entry = vm_map_copy_first_entry(copy))
4449 != vm_map_copy_to_entry(copy))
4450 {
4451 copy_size = (copy_entry->vme_end - copy_entry->vme_start);
4452
4453 entry = tmp_entry;
4454 if (entry == vm_map_to_entry(dst_map)) {
4455 vm_map_unlock(dst_map);
4456 return KERN_INVALID_ADDRESS;
4457 }
4458 size = (entry->vme_end - entry->vme_start);
4459 /*
4460 * Make sure that no holes popped up in the
4461 * address map, and that the protection is
4462 * still valid, in case the map was unlocked
4463 * earlier.
4464 */
4465
4466 if ((entry->vme_start != start) || ((entry->is_sub_map)
4467 && !entry->needs_copy)) {
4468 vm_map_unlock(dst_map);
4469 return(KERN_INVALID_ADDRESS);
4470 }
4471 assert(entry != vm_map_to_entry(dst_map));
4472
4473 /*
4474 * Check protection again
4475 */
4476
4477 if ( ! (entry->protection & VM_PROT_WRITE)) {
4478 vm_map_unlock(dst_map);
4479 return(KERN_PROTECTION_FAILURE);
4480 }
4481
4482 /*
4483 * Adjust to source size first
4484 */
4485
4486 if (copy_size < size) {
4487 vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
4488 size = copy_size;
4489 }
4490
4491 /*
4492 * Adjust to destination size
4493 */
4494
4495 if (size < copy_size) {
4496 vm_map_copy_clip_end(copy, copy_entry,
4497 copy_entry->vme_start + size);
4498 copy_size = size;
4499 }
4500
4501 assert((entry->vme_end - entry->vme_start) == size);
4502 assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
4503 assert((copy_entry->vme_end - copy_entry->vme_start) == size);
4504
4505 /*
4506 * If the destination contains temporary unshared memory,
4507 * we can perform the copy by throwing it away and
4508 * installing the source data.
4509 */
4510
4511 object = entry->object.vm_object;
4512 if ((!entry->is_shared &&
4513 ((object == VM_OBJECT_NULL) ||
4514 (object->internal && !object->true_share))) ||
4515 entry->needs_copy) {
4516 vm_object_t old_object = entry->object.vm_object;
4517 vm_object_offset_t old_offset = entry->offset;
4518 vm_object_offset_t offset;
4519
4520 /*
4521 * Ensure that the source and destination aren't
4522 * identical
4523 */
4524 if (old_object == copy_entry->object.vm_object &&
4525 old_offset == copy_entry->offset) {
4526 vm_map_copy_entry_unlink(copy, copy_entry);
4527 vm_map_copy_entry_dispose(copy, copy_entry);
4528
4529 if (old_object != VM_OBJECT_NULL)
4530 vm_object_deallocate(old_object);
4531
4532 start = tmp_entry->vme_end;
4533 tmp_entry = tmp_entry->vme_next;
4534 continue;
4535 }
4536
4537 if (old_object != VM_OBJECT_NULL) {
4538 if(entry->is_sub_map) {
4539 if(entry->use_pmap) {
4540#ifndef i386
4541 pmap_unnest(dst_map->pmap,
4542 entry->vme_start,
4543 entry->vme_end - entry->vme_start);
4544#endif
4545 } else {
4546 vm_map_submap_pmap_clean(
4547 dst_map, entry->vme_start,
4548 entry->vme_end,
4549 entry->object.sub_map,
4550 entry->offset);
4551 }
4552 vm_map_deallocate(
4553 entry->object.sub_map);
4554 } else {
4555 vm_object_pmap_protect(
4556 old_object,
4557 old_offset,
4558 size,
4559 pmap,
4560 tmp_entry->vme_start,
4561 VM_PROT_NONE);
4562
4563 vm_object_deallocate(old_object);
4564 }
4565 }
4566
4567 entry->is_sub_map = FALSE;
4568 entry->object = copy_entry->object;
4569 object = entry->object.vm_object;
4570 entry->needs_copy = copy_entry->needs_copy;
4571 entry->wired_count = 0;
4572 entry->user_wired_count = 0;
4573 offset = entry->offset = copy_entry->offset;
4574
4575 vm_map_copy_entry_unlink(copy, copy_entry);
4576 vm_map_copy_entry_dispose(copy, copy_entry);
4577#if BAD_OPTIMIZATION
4578 /*
4579 * if we turn this optimization back on
4580 * we need to revisit our use of pmap mappings
4581 * large copies will cause us to run out and panic
4582 * this optimization only saved on average 2 us per page if ALL
4583 * the pages in the source were currently mapped
4584 * and ALL the pages in the dest were touched, if there were fewer
4585 * than 2/3 of the pages touched, this optimization actually cost more cycles
4586 */
4587
4588 /*
4589 * Try to aggressively enter physical mappings
4590 * (but avoid uninstantiated objects)
4591 */
4592 if (object != VM_OBJECT_NULL) {
4593 vm_offset_t va = entry->vme_start;
4594
4595 while (va < entry->vme_end) {
4596 register vm_page_t m;
4597 vm_prot_t prot;
4598
4599 /*
4600 * Look for the page in the top object
4601 */
4602 prot = entry->protection;
4603 vm_object_lock(object);
4604 vm_object_paging_begin(object);
4605
4606 if ((m = vm_page_lookup(object,offset)) !=
4607 VM_PAGE_NULL && !m->busy &&
4608 !m->fictitious &&
4609 (!m->unusual || (!m->error &&
4610 !m->restart && !m->absent &&
4611 (prot & m->page_lock) == 0))) {
4612
4613 m->busy = TRUE;
4614 vm_object_unlock(object);
4615
4616 /*
4617 * Honor COW obligations
4618 */
4619 if (entry->needs_copy)
4620 prot &= ~VM_PROT_WRITE;
0b4e3aa0
A
4621 /* It is our policy to require */
4622 /* explicit sync from anyone */
4623 /* writing code and then */
4624 /* a pc to execute it. */
4625 /* No isync here */
1c79356b
A
4626
4627 PMAP_ENTER(pmap, va, m,
4628 prot, FALSE);
4629
4630 vm_object_lock(object);
4631 vm_page_lock_queues();
4632 if (!m->active && !m->inactive)
4633 vm_page_activate(m);
4634 vm_page_unlock_queues();
4635 PAGE_WAKEUP_DONE(m);
4636 }
4637 vm_object_paging_end(object);
4638 vm_object_unlock(object);
4639
4640 offset += PAGE_SIZE_64;
4641 va += PAGE_SIZE;
4642 } /* end while (va < entry->vme_end) */
4643 } /* end if (object) */
4644#endif
4645 /*
4646 * Set up for the next iteration. The map
4647 * has not been unlocked, so the next
4648 * address should be at the end of this
4649 * entry, and the next map entry should be
4650 * the one following it.
4651 */
4652
4653 start = tmp_entry->vme_end;
4654 tmp_entry = tmp_entry->vme_next;
4655 } else {
4656 vm_map_version_t version;
4657 vm_object_t dst_object = entry->object.vm_object;
4658 vm_object_offset_t dst_offset = entry->offset;
4659 kern_return_t r;
4660
4661 /*
4662 * Take an object reference, and record
4663 * the map version information so that the
4664 * map can be safely unlocked.
4665 */
4666
4667 vm_object_reference(dst_object);
4668
4669 version.main_timestamp = dst_map->timestamp;
4670
4671 vm_map_unlock(dst_map);
4672
4673 /*
4674 * Copy as much as possible in one pass
4675 */
4676
4677 copy_size = size;
4678 r = vm_fault_copy(
4679 copy_entry->object.vm_object,
4680 copy_entry->offset,
4681 &copy_size,
4682 dst_object,
4683 dst_offset,
4684 dst_map,
4685 &version,
4686 THREAD_UNINT );
4687
4688 /*
4689 * Release the object reference
4690 */
4691
4692 vm_object_deallocate(dst_object);
4693
4694 /*
4695 * If a hard error occurred, return it now
4696 */
4697
4698 if (r != KERN_SUCCESS)
4699 return(r);
4700
4701 if (copy_size != 0) {
4702 /*
4703 * Dispose of the copied region
4704 */
4705
4706 vm_map_copy_clip_end(copy, copy_entry,
4707 copy_entry->vme_start + copy_size);
4708 vm_map_copy_entry_unlink(copy, copy_entry);
4709 vm_object_deallocate(copy_entry->object.vm_object);
4710 vm_map_copy_entry_dispose(copy, copy_entry);
4711 }
4712
4713 /*
4714 * Pick up in the destination map where we left off.
4715 *
4716 * Use the version information to avoid a lookup
4717 * in the normal case.
4718 */
4719
4720 start += copy_size;
4721 vm_map_lock(dst_map);
4722 if ((version.main_timestamp + 1) == dst_map->timestamp) {
4723 /* We can safely use saved tmp_entry value */
4724
4725 vm_map_clip_end(dst_map, tmp_entry, start);
4726 tmp_entry = tmp_entry->vme_next;
4727 } else {
4728 /* Must do lookup of tmp_entry */
4729
4730 if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
4731 vm_map_unlock(dst_map);
4732 return(KERN_INVALID_ADDRESS);
4733 }
4734 vm_map_clip_start(dst_map, tmp_entry, start);
4735 }
4736 }
4737 }/* while */
4738
4739 return(KERN_SUCCESS);
4740}/* vm_map_copy_overwrite_aligned */
4741
4742/*
4743 * Routine: vm_map_copyin_kernel_buffer
4744 *
4745 * Description:
4746 * Copy in data to a kernel buffer from space in the
4747 * source map. The original space may be otpionally
4748 * deallocated.
4749 *
4750 * If successful, returns a new copy object.
4751 */
4752kern_return_t
4753vm_map_copyin_kernel_buffer(
4754 vm_map_t src_map,
4755 vm_offset_t src_addr,
4756 vm_size_t len,
4757 boolean_t src_destroy,
4758 vm_map_copy_t *copy_result)
4759{
4760 boolean_t flags;
4761 vm_map_copy_t copy;
4762 vm_size_t kalloc_size = sizeof(struct vm_map_copy) + len;
4763
4764 copy = (vm_map_copy_t) kalloc(kalloc_size);
4765 if (copy == VM_MAP_COPY_NULL) {
4766 return KERN_RESOURCE_SHORTAGE;
4767 }
4768 copy->type = VM_MAP_COPY_KERNEL_BUFFER;
4769 copy->size = len;
4770 copy->offset = 0;
4771 copy->cpy_kdata = (vm_offset_t) (copy + 1);
4772 copy->cpy_kalloc_size = kalloc_size;
4773
4774 if (src_map == kernel_map) {
4775 bcopy((char *)src_addr, (char *)copy->cpy_kdata, len);
4776 flags = VM_MAP_REMOVE_KUNWIRE | VM_MAP_REMOVE_WAIT_FOR_KWIRE |
4777 VM_MAP_REMOVE_INTERRUPTIBLE;
4778 } else {
4779 kern_return_t kr;
4780 kr = copyinmap(src_map, src_addr, copy->cpy_kdata, len);
4781 if (kr != KERN_SUCCESS) {
4782 kfree((vm_offset_t)copy, kalloc_size);
4783 return kr;
4784 }
4785 flags = VM_MAP_REMOVE_WAIT_FOR_KWIRE |
4786 VM_MAP_REMOVE_INTERRUPTIBLE;
4787 }
4788 if (src_destroy) {
4789 (void) vm_map_remove(src_map, trunc_page(src_addr),
4790 round_page(src_addr + len),
4791 flags);
4792 }
4793 *copy_result = copy;
4794 return KERN_SUCCESS;
4795}
4796
4797/*
4798 * Routine: vm_map_copyout_kernel_buffer
4799 *
4800 * Description:
4801 * Copy out data from a kernel buffer into space in the
4802 * destination map. The space may be otpionally dynamically
4803 * allocated.
4804 *
4805 * If successful, consumes the copy object.
4806 * Otherwise, the caller is responsible for it.
4807 */
4808kern_return_t
4809vm_map_copyout_kernel_buffer(
4810 vm_map_t map,
4811 vm_offset_t *addr, /* IN/OUT */
4812 vm_map_copy_t copy,
4813 boolean_t overwrite)
4814{
4815 kern_return_t kr = KERN_SUCCESS;
4816 thread_act_t thr_act = current_act();
4817
4818 if (!overwrite) {
4819
4820 /*
4821 * Allocate space in the target map for the data
4822 */
4823 *addr = 0;
4824 kr = vm_map_enter(map,
4825 addr,
4826 round_page(copy->size),
4827 (vm_offset_t) 0,
4828 TRUE,
4829 VM_OBJECT_NULL,
4830 (vm_object_offset_t) 0,
4831 FALSE,
4832 VM_PROT_DEFAULT,
4833 VM_PROT_ALL,
4834 VM_INHERIT_DEFAULT);
4835 if (kr != KERN_SUCCESS)
4836 return(kr);
4837 }
4838
4839 /*
4840 * Copyout the data from the kernel buffer to the target map.
4841 */
4842 if (thr_act->map == map) {
4843
4844 /*
4845 * If the target map is the current map, just do
4846 * the copy.
4847 */
4848 if (copyout((char *)copy->cpy_kdata, (char *)*addr,
4849 copy->size)) {
0b4e3aa0 4850 return(KERN_INVALID_ADDRESS);
1c79356b
A
4851 }
4852 }
4853 else {
4854 vm_map_t oldmap;
4855
4856 /*
4857 * If the target map is another map, assume the
4858 * target's address space identity for the duration
4859 * of the copy.
4860 */
4861 vm_map_reference(map);
4862 oldmap = vm_map_switch(map);
4863
4864 if (copyout((char *)copy->cpy_kdata, (char *)*addr,
4865 copy->size)) {
0b4e3aa0 4866 return(KERN_INVALID_ADDRESS);
1c79356b
A
4867 }
4868
4869 (void) vm_map_switch(oldmap);
4870 vm_map_deallocate(map);
4871 }
4872
4873 kfree((vm_offset_t)copy, copy->cpy_kalloc_size);
4874
4875 return(kr);
4876}
4877
4878/*
4879 * Macro: vm_map_copy_insert
4880 *
4881 * Description:
4882 * Link a copy chain ("copy") into a map at the
4883 * specified location (after "where").
4884 * Side effects:
4885 * The copy chain is destroyed.
4886 * Warning:
4887 * The arguments are evaluated multiple times.
4888 */
4889#define vm_map_copy_insert(map, where, copy) \
4890MACRO_BEGIN \
4891 vm_map_t VMCI_map; \
4892 vm_map_entry_t VMCI_where; \
4893 vm_map_copy_t VMCI_copy; \
4894 VMCI_map = (map); \
4895 VMCI_where = (where); \
4896 VMCI_copy = (copy); \
4897 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
4898 ->vme_next = (VMCI_where->vme_next); \
4899 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
4900 ->vme_prev = VMCI_where; \
4901 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
4902 UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
4903 zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \
4904MACRO_END
4905
4906/*
4907 * Routine: vm_map_copyout
4908 *
4909 * Description:
4910 * Copy out a copy chain ("copy") into newly-allocated
4911 * space in the destination map.
4912 *
4913 * If successful, consumes the copy object.
4914 * Otherwise, the caller is responsible for it.
4915 */
4916kern_return_t
4917vm_map_copyout(
4918 register vm_map_t dst_map,
4919 vm_offset_t *dst_addr, /* OUT */
4920 register vm_map_copy_t copy)
4921{
4922 vm_size_t size;
4923 vm_size_t adjustment;
4924 vm_offset_t start;
4925 vm_object_offset_t vm_copy_start;
4926 vm_map_entry_t last;
4927 register
4928 vm_map_entry_t entry;
4929
4930 /*
4931 * Check for null copy object.
4932 */
4933
4934 if (copy == VM_MAP_COPY_NULL) {
4935 *dst_addr = 0;
4936 return(KERN_SUCCESS);
4937 }
4938
4939 /*
4940 * Check for special copy object, created
4941 * by vm_map_copyin_object.
4942 */
4943
4944 if (copy->type == VM_MAP_COPY_OBJECT) {
4945 vm_object_t object = copy->cpy_object;
4946 kern_return_t kr;
4947 vm_object_offset_t offset;
4948
4949 offset = trunc_page_64(copy->offset);
4950 size = round_page(copy->size +
4951 (vm_size_t)(copy->offset - offset));
4952 *dst_addr = 0;
4953 kr = vm_map_enter(dst_map, dst_addr, size,
4954 (vm_offset_t) 0, TRUE,
4955 object, offset, FALSE,
4956 VM_PROT_DEFAULT, VM_PROT_ALL,
4957 VM_INHERIT_DEFAULT);
4958 if (kr != KERN_SUCCESS)
4959 return(kr);
4960 /* Account for non-pagealigned copy object */
4961 *dst_addr += (vm_offset_t)(copy->offset - offset);
4962 zfree(vm_map_copy_zone, (vm_offset_t) copy);
4963 return(KERN_SUCCESS);
4964 }
4965
4966 /*
4967 * Check for special kernel buffer allocated
4968 * by new_ipc_kmsg_copyin.
4969 */
4970
4971 if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
4972 return(vm_map_copyout_kernel_buffer(dst_map, dst_addr,
4973 copy, FALSE));
4974 }
4975
1c79356b
A
4976 /*
4977 * Find space for the data
4978 */
4979
4980 vm_copy_start = trunc_page_64(copy->offset);
4981 size = round_page((vm_size_t)copy->offset + copy->size)
4982 - vm_copy_start;
4983
4984 StartAgain: ;
4985
4986 vm_map_lock(dst_map);
4987 assert(first_free_is_valid(dst_map));
4988 start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
4989 vm_map_min(dst_map) : last->vme_end;
4990
4991 while (TRUE) {
4992 vm_map_entry_t next = last->vme_next;
4993 vm_offset_t end = start + size;
4994
4995 if ((end > dst_map->max_offset) || (end < start)) {
4996 if (dst_map->wait_for_space) {
4997 if (size <= (dst_map->max_offset - dst_map->min_offset)) {
4998 assert_wait((event_t) dst_map,
4999 THREAD_INTERRUPTIBLE);
5000 vm_map_unlock(dst_map);
5001 thread_block((void (*)(void))0);
5002 goto StartAgain;
5003 }
5004 }
5005 vm_map_unlock(dst_map);
5006 return(KERN_NO_SPACE);
5007 }
5008
5009 if ((next == vm_map_to_entry(dst_map)) ||
5010 (next->vme_start >= end))
5011 break;
5012
5013 last = next;
5014 start = last->vme_end;
5015 }
5016
5017 /*
5018 * Since we're going to just drop the map
5019 * entries from the copy into the destination
5020 * map, they must come from the same pool.
5021 */
5022
5023 if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
5024 /*
5025 * Mismatches occur when dealing with the default
5026 * pager.
5027 */
5028 zone_t old_zone;
5029 vm_map_entry_t next, new;
5030
5031 /*
5032 * Find the zone that the copies were allocated from
5033 */
5034 old_zone = (copy->cpy_hdr.entries_pageable)
5035 ? vm_map_entry_zone
5036 : vm_map_kentry_zone;
5037 entry = vm_map_copy_first_entry(copy);
5038
5039 /*
5040 * Reinitialize the copy so that vm_map_copy_entry_link
5041 * will work.
5042 */
5043 copy->cpy_hdr.nentries = 0;
5044 copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
5045 vm_map_copy_first_entry(copy) =
5046 vm_map_copy_last_entry(copy) =
5047 vm_map_copy_to_entry(copy);
5048
5049 /*
5050 * Copy each entry.
5051 */
5052 while (entry != vm_map_copy_to_entry(copy)) {
5053 new = vm_map_copy_entry_create(copy);
5054 vm_map_entry_copy_full(new, entry);
5055 new->use_pmap = FALSE; /* clr address space specifics */
5056 vm_map_copy_entry_link(copy,
5057 vm_map_copy_last_entry(copy),
5058 new);
5059 next = entry->vme_next;
5060 zfree(old_zone, (vm_offset_t) entry);
5061 entry = next;
5062 }
5063 }
5064
5065 /*
5066 * Adjust the addresses in the copy chain, and
5067 * reset the region attributes.
5068 */
5069
5070 adjustment = start - vm_copy_start;
5071 for (entry = vm_map_copy_first_entry(copy);
5072 entry != vm_map_copy_to_entry(copy);
5073 entry = entry->vme_next) {
5074 entry->vme_start += adjustment;
5075 entry->vme_end += adjustment;
5076
5077 entry->inheritance = VM_INHERIT_DEFAULT;
5078 entry->protection = VM_PROT_DEFAULT;
5079 entry->max_protection = VM_PROT_ALL;
5080 entry->behavior = VM_BEHAVIOR_DEFAULT;
5081
5082 /*
5083 * If the entry is now wired,
5084 * map the pages into the destination map.
5085 */
5086 if (entry->wired_count != 0) {
5087 register vm_offset_t va;
5088 vm_object_offset_t offset;
5089 register vm_object_t object;
5090
5091 object = entry->object.vm_object;
5092 offset = entry->offset;
5093 va = entry->vme_start;
5094
5095 pmap_pageable(dst_map->pmap,
5096 entry->vme_start,
5097 entry->vme_end,
5098 TRUE);
5099
5100 while (va < entry->vme_end) {
5101 register vm_page_t m;
5102
5103 /*
5104 * Look up the page in the object.
5105 * Assert that the page will be found in the
5106 * top object:
5107 * either
5108 * the object was newly created by
5109 * vm_object_copy_slowly, and has
5110 * copies of all of the pages from
5111 * the source object
5112 * or
5113 * the object was moved from the old
5114 * map entry; because the old map
5115 * entry was wired, all of the pages
5116 * were in the top-level object.
5117 * (XXX not true if we wire pages for
5118 * reading)
5119 */
5120 vm_object_lock(object);
5121 vm_object_paging_begin(object);
5122
5123 m = vm_page_lookup(object, offset);
5124 if (m == VM_PAGE_NULL || m->wire_count == 0 ||
5125 m->absent)
5126 panic("vm_map_copyout: wiring 0x%x", m);
5127
5128 m->busy = TRUE;
5129 vm_object_unlock(object);
5130
5131 PMAP_ENTER(dst_map->pmap, va, m,
5132 entry->protection, TRUE);
5133
5134 vm_object_lock(object);
5135 PAGE_WAKEUP_DONE(m);
5136 /* the page is wired, so we don't have to activate */
5137 vm_object_paging_end(object);
5138 vm_object_unlock(object);
5139
5140 offset += PAGE_SIZE_64;
5141 va += PAGE_SIZE;
5142 }
5143 }
5144 else if (size <= vm_map_aggressive_enter_max) {
5145
5146 register vm_offset_t va;
5147 vm_object_offset_t offset;
5148 register vm_object_t object;
5149 vm_prot_t prot;
5150
5151 object = entry->object.vm_object;
5152 if (object != VM_OBJECT_NULL) {
5153
5154 offset = entry->offset;
5155 va = entry->vme_start;
5156 while (va < entry->vme_end) {
5157 register vm_page_t m;
5158
5159 /*
5160 * Look up the page in the object.
5161 * Assert that the page will be found
5162 * in the top object if at all...
5163 */
5164 vm_object_lock(object);
5165 vm_object_paging_begin(object);
5166
5167 if (((m = vm_page_lookup(object,
5168 offset))
5169 != VM_PAGE_NULL) &&
5170 !m->busy && !m->fictitious &&
5171 !m->absent && !m->error) {
5172 m->busy = TRUE;
5173 vm_object_unlock(object);
5174
5175 /* honor cow obligations */
5176 prot = entry->protection;
5177 if (entry->needs_copy)
5178 prot &= ~VM_PROT_WRITE;
5179
5180 PMAP_ENTER(dst_map->pmap, va,
5181 m, prot, FALSE);
5182
5183 vm_object_lock(object);
5184 vm_page_lock_queues();
5185 if (!m->active && !m->inactive)
5186 vm_page_activate(m);
5187 vm_page_unlock_queues();
5188 PAGE_WAKEUP_DONE(m);
5189 }
5190 vm_object_paging_end(object);
5191 vm_object_unlock(object);
5192
5193 offset += PAGE_SIZE_64;
5194 va += PAGE_SIZE;
5195 }
5196 }
5197 }
5198 }
5199
5200 /*
5201 * Correct the page alignment for the result
5202 */
5203
5204 *dst_addr = start + (copy->offset - vm_copy_start);
5205
5206 /*
5207 * Update the hints and the map size
5208 */
5209
5210 SAVE_HINT(dst_map, vm_map_copy_last_entry(copy));
5211
5212 dst_map->size += size;
5213
5214 /*
5215 * Link in the copy
5216 */
5217
5218 vm_map_copy_insert(dst_map, last, copy);
5219
5220 vm_map_unlock(dst_map);
5221
5222 /*
5223 * XXX If wiring_required, call vm_map_pageable
5224 */
5225
5226 return(KERN_SUCCESS);
5227}
5228
5229boolean_t vm_map_aggressive_enter; /* not used yet */
5230
1c79356b
A
5231
5232/*
5233 * Routine: vm_map_copyin
5234 *
5235 * Description:
5236 * Copy the specified region (src_addr, len) from the
5237 * source address space (src_map), possibly removing
5238 * the region from the source address space (src_destroy).
5239 *
5240 * Returns:
5241 * A vm_map_copy_t object (copy_result), suitable for
5242 * insertion into another address space (using vm_map_copyout),
5243 * copying over another address space region (using
5244 * vm_map_copy_overwrite). If the copy is unused, it
5245 * should be destroyed (using vm_map_copy_discard).
5246 *
5247 * In/out conditions:
5248 * The source map should not be locked on entry.
5249 */
5250
5251typedef struct submap_map {
5252 vm_map_t parent_map;
5253 vm_offset_t base_start;
5254 vm_offset_t base_end;
5255 struct submap_map *next;
5256} submap_map_t;
5257
5258kern_return_t
5259vm_map_copyin_common(
5260 vm_map_t src_map,
5261 vm_offset_t src_addr,
5262 vm_size_t len,
5263 boolean_t src_destroy,
5264 boolean_t src_volatile,
5265 vm_map_copy_t *copy_result, /* OUT */
5266 boolean_t use_maxprot)
5267{
5268 extern int msg_ool_size_small;
5269
5270 vm_map_entry_t tmp_entry; /* Result of last map lookup --
5271 * in multi-level lookup, this
5272 * entry contains the actual
5273 * vm_object/offset.
5274 */
5275 register
5276 vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */
5277
5278 vm_offset_t src_start; /* Start of current entry --
5279 * where copy is taking place now
5280 */
5281 vm_offset_t src_end; /* End of entire region to be
5282 * copied */
5283 vm_offset_t base_start; /* submap fields to save offsets */
5284 /* in original map */
5285 vm_offset_t base_end;
5286 vm_map_t base_map=src_map;
5287 vm_map_entry_t base_entry;
5288 boolean_t map_share=FALSE;
5289 submap_map_t *parent_maps = NULL;
5290
5291 register
5292 vm_map_copy_t copy; /* Resulting copy */
5293 vm_offset_t copy_addr;
5294
5295 /*
5296 * Check for copies of zero bytes.
5297 */
5298
5299 if (len == 0) {
5300 *copy_result = VM_MAP_COPY_NULL;
5301 return(KERN_SUCCESS);
5302 }
5303
5304 /*
5305 * If the copy is sufficiently small, use a kernel buffer instead
5306 * of making a virtual copy. The theory being that the cost of
5307 * setting up VM (and taking C-O-W faults) dominates the copy costs
5308 * for small regions.
5309 */
5310 if ((len < msg_ool_size_small) && !use_maxprot)
5311 return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
5312 src_destroy, copy_result);
5313
5314 /*
5315 * Compute start and end of region
5316 */
5317
5318 src_start = trunc_page(src_addr);
5319 src_end = round_page(src_addr + len);
5320
5321 XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0);
5322
5323 /*
5324 * Check that the end address doesn't overflow
5325 */
5326
5327 if (src_end <= src_start)
5328 if ((src_end < src_start) || (src_start != 0))
5329 return(KERN_INVALID_ADDRESS);
5330
5331 /*
5332 * Allocate a header element for the list.
5333 *
5334 * Use the start and end in the header to
5335 * remember the endpoints prior to rounding.
5336 */
5337
5338 copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
5339 vm_map_copy_first_entry(copy) =
5340 vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
5341 copy->type = VM_MAP_COPY_ENTRY_LIST;
5342 copy->cpy_hdr.nentries = 0;
5343 copy->cpy_hdr.entries_pageable = TRUE;
5344
5345 copy->offset = src_addr;
5346 copy->size = len;
5347
5348 new_entry = vm_map_copy_entry_create(copy);
5349
5350#define RETURN(x) \
5351 MACRO_BEGIN \
5352 vm_map_unlock(src_map); \
5353 if (new_entry != VM_MAP_ENTRY_NULL) \
5354 vm_map_copy_entry_dispose(copy,new_entry); \
5355 vm_map_copy_discard(copy); \
5356 { \
5357 submap_map_t *ptr; \
5358 \
5359 for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
5360 parent_maps=parent_maps->next; \
5361 kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \
5362 } \
5363 } \
5364 MACRO_RETURN(x); \
5365 MACRO_END
5366
5367 /*
5368 * Find the beginning of the region.
5369 */
5370
5371 vm_map_lock(src_map);
5372
5373 if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
5374 RETURN(KERN_INVALID_ADDRESS);
5375 if(!tmp_entry->is_sub_map) {
5376 vm_map_clip_start(src_map, tmp_entry, src_start);
5377 }
5378 /* set for later submap fix-up */
5379 copy_addr = src_start;
5380
5381 /*
5382 * Go through entries until we get to the end.
5383 */
5384
5385 while (TRUE) {
5386 register
5387 vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
5388 vm_size_t src_size; /* Size of source
5389 * map entry (in both
5390 * maps)
5391 */
5392
5393 register
5394 vm_object_t src_object; /* Object to copy */
5395 vm_object_offset_t src_offset;
5396
5397 boolean_t src_needs_copy; /* Should source map
5398 * be made read-only
5399 * for copy-on-write?
5400 */
5401
5402 boolean_t new_entry_needs_copy; /* Will new entry be COW? */
5403
5404 boolean_t was_wired; /* Was source wired? */
5405 vm_map_version_t version; /* Version before locks
5406 * dropped to make copy
5407 */
5408 kern_return_t result; /* Return value from
5409 * copy_strategically.
5410 */
5411 while(tmp_entry->is_sub_map) {
5412 vm_size_t submap_len;
5413 submap_map_t *ptr;
5414
5415 ptr = (submap_map_t *)kalloc(sizeof(submap_map_t));
5416 ptr->next = parent_maps;
5417 parent_maps = ptr;
5418 ptr->parent_map = src_map;
5419 ptr->base_start = src_start;
5420 ptr->base_end = src_end;
5421 submap_len = tmp_entry->vme_end - src_start;
5422 if(submap_len > (src_end-src_start))
5423 submap_len = src_end-src_start;
5424 ptr->base_start += submap_len;
5425
5426 src_start -= tmp_entry->vme_start;
5427 src_start += tmp_entry->offset;
5428 src_end = src_start + submap_len;
5429 src_map = tmp_entry->object.sub_map;
5430 vm_map_lock(src_map);
5431 vm_map_unlock(ptr->parent_map);
5432 if (!vm_map_lookup_entry(
5433 src_map, src_start, &tmp_entry))
5434 RETURN(KERN_INVALID_ADDRESS);
5435 map_share = TRUE;
5436 if(!tmp_entry->is_sub_map)
5437 vm_map_clip_start(src_map, tmp_entry, src_start);
5438 src_entry = tmp_entry;
5439 }
0b4e3aa0
A
5440 if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
5441 (tmp_entry->object.vm_object->phys_contiguous)) {
5442 /* This is not, cannot be supported for now */
5443 /* we need a description of the caching mode */
5444 /* reflected in the object before we can */
5445 /* support copyin, and then the support will */
5446 /* be for direct copy */
5447 RETURN(KERN_PROTECTION_FAILURE);
5448 }
1c79356b
A
5449 /*
5450 * Create a new address map entry to hold the result.
5451 * Fill in the fields from the appropriate source entries.
5452 * We must unlock the source map to do this if we need
5453 * to allocate a map entry.
5454 */
5455 if (new_entry == VM_MAP_ENTRY_NULL) {
5456 version.main_timestamp = src_map->timestamp;
5457 vm_map_unlock(src_map);
5458
5459 new_entry = vm_map_copy_entry_create(copy);
5460
5461 vm_map_lock(src_map);
5462 if ((version.main_timestamp + 1) != src_map->timestamp) {
5463 if (!vm_map_lookup_entry(src_map, src_start,
5464 &tmp_entry)) {
5465 RETURN(KERN_INVALID_ADDRESS);
5466 }
5467 vm_map_clip_start(src_map, tmp_entry, src_start);
5468 continue; /* restart w/ new tmp_entry */
5469 }
5470 }
5471
5472 /*
5473 * Verify that the region can be read.
5474 */
5475 if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE &&
5476 !use_maxprot) ||
5477 (src_entry->max_protection & VM_PROT_READ) == 0)
5478 RETURN(KERN_PROTECTION_FAILURE);
5479
5480 /*
5481 * Clip against the endpoints of the entire region.
5482 */
5483
5484 vm_map_clip_end(src_map, src_entry, src_end);
5485
5486 src_size = src_entry->vme_end - src_start;
5487 src_object = src_entry->object.vm_object;
5488 src_offset = src_entry->offset;
5489 was_wired = (src_entry->wired_count != 0);
5490
5491 vm_map_entry_copy(new_entry, src_entry);
5492 new_entry->use_pmap = FALSE; /* clr address space specifics */
5493
5494 /*
5495 * Attempt non-blocking copy-on-write optimizations.
5496 */
5497
5498 if (src_destroy &&
5499 (src_object == VM_OBJECT_NULL ||
5500 (src_object->internal && !src_object->true_share
5501 && !map_share))) {
5502 /*
5503 * If we are destroying the source, and the object
5504 * is internal, we can move the object reference
5505 * from the source to the copy. The copy is
5506 * copy-on-write only if the source is.
5507 * We make another reference to the object, because
5508 * destroying the source entry will deallocate it.
5509 */
5510 vm_object_reference(src_object);
5511
5512 /*
5513 * Copy is always unwired. vm_map_copy_entry
5514 * set its wired count to zero.
5515 */
5516
5517 goto CopySuccessful;
5518 }
5519
5520
5521RestartCopy:
5522 XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
5523 src_object, new_entry, new_entry->object.vm_object,
5524 was_wired, 0);
5525 if (!was_wired &&
5526 vm_object_copy_quickly(
5527 &new_entry->object.vm_object,
5528 src_offset,
5529 src_size,
5530 &src_needs_copy,
5531 &new_entry_needs_copy)) {
5532
5533 new_entry->needs_copy = new_entry_needs_copy;
5534
5535 /*
5536 * Handle copy-on-write obligations
5537 */
5538
5539 if (src_needs_copy && !tmp_entry->needs_copy) {
5540 if (tmp_entry->is_shared ||
5541 tmp_entry->object.vm_object->true_share ||
5542 map_share) {
e3027f41 5543 vm_map_unlock(src_map);
1c79356b
A
5544 new_entry->object.vm_object =
5545 vm_object_copy_delayed(
5546 src_object,
5547 src_offset,
5548 src_size);
e3027f41
A
5549 /* dec ref gained in copy_quickly */
5550 vm_object_lock(src_object);
0b4e3aa0
A
5551 src_object->ref_count--;
5552 assert(src_object->ref_count > 0);
e3027f41
A
5553 vm_object_res_deallocate(src_object);
5554 vm_object_unlock(src_object);
5555 vm_map_lock(src_map);
5556 /*
5557 * it turns out that we have
5558 * finished our copy. No matter
5559 * what the state of the map
5560 * we will lock it again here
5561 * knowing that if there is
5562 * additional data to copy
5563 * it will be checked at
5564 * the top of the loop
5565 *
5566 * Don't do timestamp check
5567 */
5568
1c79356b
A
5569 } else {
5570 vm_object_pmap_protect(
5571 src_object,
5572 src_offset,
5573 src_size,
5574 (src_entry->is_shared ?
5575 PMAP_NULL
5576 : src_map->pmap),
5577 src_entry->vme_start,
5578 src_entry->protection &
5579 ~VM_PROT_WRITE);
5580
5581 tmp_entry->needs_copy = TRUE;
5582 }
5583 }
5584
5585 /*
5586 * The map has never been unlocked, so it's safe
5587 * to move to the next entry rather than doing
5588 * another lookup.
5589 */
5590
5591 goto CopySuccessful;
5592 }
5593
5594 new_entry->needs_copy = FALSE;
5595
5596 /*
5597 * Take an object reference, so that we may
5598 * release the map lock(s).
5599 */
5600
5601 assert(src_object != VM_OBJECT_NULL);
5602 vm_object_reference(src_object);
5603
5604 /*
5605 * Record the timestamp for later verification.
5606 * Unlock the map.
5607 */
5608
5609 version.main_timestamp = src_map->timestamp;
5610 vm_map_unlock(src_map);
5611
5612 /*
5613 * Perform the copy
5614 */
5615
5616 if (was_wired) {
5617 vm_object_lock(src_object);
5618 result = vm_object_copy_slowly(
5619 src_object,
5620 src_offset,
5621 src_size,
5622 THREAD_UNINT,
5623 &new_entry->object.vm_object);
5624 new_entry->offset = 0;
5625 new_entry->needs_copy = FALSE;
5626 } else {
5627 result = vm_object_copy_strategically(src_object,
5628 src_offset,
5629 src_size,
5630 &new_entry->object.vm_object,
5631 &new_entry->offset,
5632 &new_entry_needs_copy);
5633
5634 new_entry->needs_copy = new_entry_needs_copy;
5635
5636 }
5637
5638 if (result != KERN_SUCCESS &&
5639 result != KERN_MEMORY_RESTART_COPY) {
5640 vm_map_lock(src_map);
5641 RETURN(result);
5642 }
5643
5644 /*
5645 * Throw away the extra reference
5646 */
5647
5648 vm_object_deallocate(src_object);
5649
5650 /*
5651 * Verify that the map has not substantially
5652 * changed while the copy was being made.
5653 */
5654
5655 vm_map_lock(src_map); /* Increments timestamp once! */
5656
5657 if ((version.main_timestamp + 1) == src_map->timestamp)
5658 goto VerificationSuccessful;
5659
5660 /*
5661 * Simple version comparison failed.
5662 *
5663 * Retry the lookup and verify that the
5664 * same object/offset are still present.
5665 *
5666 * [Note: a memory manager that colludes with
5667 * the calling task can detect that we have
5668 * cheated. While the map was unlocked, the
5669 * mapping could have been changed and restored.]
5670 */
5671
5672 if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
5673 RETURN(KERN_INVALID_ADDRESS);
5674 }
5675
5676 src_entry = tmp_entry;
5677 vm_map_clip_start(src_map, src_entry, src_start);
5678
5679 if ((src_entry->protection & VM_PROT_READ == VM_PROT_NONE &&
5680 !use_maxprot) ||
5681 src_entry->max_protection & VM_PROT_READ == 0)
5682 goto VerificationFailed;
5683
5684 if (src_entry->vme_end < new_entry->vme_end)
5685 src_size = (new_entry->vme_end = src_entry->vme_end) - src_start;
5686
5687 if ((src_entry->object.vm_object != src_object) ||
5688 (src_entry->offset != src_offset) ) {
5689
5690 /*
5691 * Verification failed.
5692 *
5693 * Start over with this top-level entry.
5694 */
5695
5696 VerificationFailed: ;
5697
5698 vm_object_deallocate(new_entry->object.vm_object);
5699 tmp_entry = src_entry;
5700 continue;
5701 }
5702
5703 /*
5704 * Verification succeeded.
5705 */
5706
5707 VerificationSuccessful: ;
5708
5709 if (result == KERN_MEMORY_RESTART_COPY)
5710 goto RestartCopy;
5711
5712 /*
5713 * Copy succeeded.
5714 */
5715
5716 CopySuccessful: ;
5717
5718 /*
5719 * Link in the new copy entry.
5720 */
5721
5722 vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
5723 new_entry);
5724
5725 /*
5726 * Determine whether the entire region
5727 * has been copied.
5728 */
5729 src_start = new_entry->vme_end;
5730 new_entry = VM_MAP_ENTRY_NULL;
5731 while ((src_start >= src_end) && (src_end != 0)) {
5732 if (src_map != base_map) {
5733 submap_map_t *ptr;
5734
5735 ptr = parent_maps;
5736 assert(ptr != NULL);
5737 parent_maps = parent_maps->next;
5738 vm_map_lock(ptr->parent_map);
5739 vm_map_unlock(src_map);
5740 src_map = ptr->parent_map;
5741 src_start = ptr->base_start;
5742 src_end = ptr->base_end;
5743 if ((src_end > src_start) &&
5744 !vm_map_lookup_entry(
5745 src_map, src_start, &tmp_entry))
5746 RETURN(KERN_INVALID_ADDRESS);
5747 kfree((vm_offset_t)ptr, sizeof(submap_map_t));
5748 if(parent_maps == NULL)
5749 map_share = FALSE;
5750 src_entry = tmp_entry->vme_prev;
5751 } else
5752 break;
5753 }
5754 if ((src_start >= src_end) && (src_end != 0))
5755 break;
5756
5757 /*
5758 * Verify that there are no gaps in the region
5759 */
5760
5761 tmp_entry = src_entry->vme_next;
5762 if ((tmp_entry->vme_start != src_start) ||
5763 (tmp_entry == vm_map_to_entry(src_map)))
5764 RETURN(KERN_INVALID_ADDRESS);
5765 }
5766
5767 /*
5768 * If the source should be destroyed, do it now, since the
5769 * copy was successful.
5770 */
5771 if (src_destroy) {
5772 (void) vm_map_delete(src_map,
5773 trunc_page(src_addr),
5774 src_end,
5775 (src_map == kernel_map) ?
5776 VM_MAP_REMOVE_KUNWIRE :
5777 VM_MAP_NO_FLAGS);
5778 }
5779
5780 vm_map_unlock(src_map);
5781
5782 /* Fix-up start and end points in copy. This is necessary */
5783 /* when the various entries in the copy object were picked */
5784 /* up from different sub-maps */
5785
5786 tmp_entry = vm_map_copy_first_entry(copy);
5787 while (tmp_entry != vm_map_copy_to_entry(copy)) {
5788 tmp_entry->vme_end = copy_addr +
5789 (tmp_entry->vme_end - tmp_entry->vme_start);
5790 tmp_entry->vme_start = copy_addr;
5791 copy_addr += tmp_entry->vme_end - tmp_entry->vme_start;
5792 tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next;
5793 }
5794
5795 *copy_result = copy;
5796 return(KERN_SUCCESS);
5797
5798#undef RETURN
5799}
5800
5801/*
5802 * vm_map_copyin_object:
5803 *
5804 * Create a copy object from an object.
5805 * Our caller donates an object reference.
5806 */
5807
5808kern_return_t
5809vm_map_copyin_object(
5810 vm_object_t object,
5811 vm_object_offset_t offset, /* offset of region in object */
5812 vm_object_size_t size, /* size of region in object */
5813 vm_map_copy_t *copy_result) /* OUT */
5814{
5815 vm_map_copy_t copy; /* Resulting copy */
5816
5817 /*
5818 * We drop the object into a special copy object
5819 * that contains the object directly.
5820 */
5821
5822 copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
5823 copy->type = VM_MAP_COPY_OBJECT;
5824 copy->cpy_object = object;
5825 copy->cpy_index = 0;
5826 copy->offset = offset;
5827 copy->size = size;
5828
5829 *copy_result = copy;
5830 return(KERN_SUCCESS);
5831}
5832
1c79356b
A
5833void
5834vm_map_fork_share(
5835 vm_map_t old_map,
5836 vm_map_entry_t old_entry,
5837 vm_map_t new_map)
5838{
5839 vm_object_t object;
5840 vm_map_entry_t new_entry;
5841 kern_return_t result;
5842
5843 /*
5844 * New sharing code. New map entry
5845 * references original object. Internal
5846 * objects use asynchronous copy algorithm for
5847 * future copies. First make sure we have
5848 * the right object. If we need a shadow,
5849 * or someone else already has one, then
5850 * make a new shadow and share it.
5851 */
5852
5853 object = old_entry->object.vm_object;
5854 if (old_entry->is_sub_map) {
5855 assert(old_entry->wired_count == 0);
5856#ifndef i386
5857 if(old_entry->use_pmap) {
5858 result = pmap_nest(new_map->pmap,
5859 (old_entry->object.sub_map)->pmap,
5860 old_entry->vme_start,
5861 old_entry->vme_end - old_entry->vme_start);
5862 if(result)
5863 panic("vm_map_fork_share: pmap_nest failed!");
5864 }
5865#endif
5866 } else if (object == VM_OBJECT_NULL) {
5867 object = vm_object_allocate((vm_size_t)(old_entry->vme_end -
5868 old_entry->vme_start));
5869 old_entry->offset = 0;
5870 old_entry->object.vm_object = object;
5871 assert(!old_entry->needs_copy);
5872 } else if (object->copy_strategy !=
5873 MEMORY_OBJECT_COPY_SYMMETRIC) {
5874
5875 /*
5876 * We are already using an asymmetric
5877 * copy, and therefore we already have
5878 * the right object.
5879 */
5880
5881 assert(! old_entry->needs_copy);
5882 }
5883 else if (old_entry->needs_copy || /* case 1 */
5884 object->shadowed || /* case 2 */
5885 (!object->true_share && /* case 3 */
5886 !old_entry->is_shared &&
5887 (object->size >
5888 (vm_size_t)(old_entry->vme_end -
5889 old_entry->vme_start)))) {
5890
5891 /*
5892 * We need to create a shadow.
5893 * There are three cases here.
5894 * In the first case, we need to
5895 * complete a deferred symmetrical
5896 * copy that we participated in.
5897 * In the second and third cases,
5898 * we need to create the shadow so
5899 * that changes that we make to the
5900 * object do not interfere with
5901 * any symmetrical copies which
5902 * have occured (case 2) or which
5903 * might occur (case 3).
5904 *
5905 * The first case is when we had
5906 * deferred shadow object creation
5907 * via the entry->needs_copy mechanism.
5908 * This mechanism only works when
5909 * only one entry points to the source
5910 * object, and we are about to create
5911 * a second entry pointing to the
5912 * same object. The problem is that
5913 * there is no way of mapping from
5914 * an object to the entries pointing
5915 * to it. (Deferred shadow creation
5916 * works with one entry because occurs
5917 * at fault time, and we walk from the
5918 * entry to the object when handling
5919 * the fault.)
5920 *
5921 * The second case is when the object
5922 * to be shared has already been copied
5923 * with a symmetric copy, but we point
5924 * directly to the object without
5925 * needs_copy set in our entry. (This
5926 * can happen because different ranges
5927 * of an object can be pointed to by
5928 * different entries. In particular,
5929 * a single entry pointing to an object
5930 * can be split by a call to vm_inherit,
5931 * which, combined with task_create, can
5932 * result in the different entries
5933 * having different needs_copy values.)
5934 * The shadowed flag in the object allows
5935 * us to detect this case. The problem
5936 * with this case is that if this object
5937 * has or will have shadows, then we
5938 * must not perform an asymmetric copy
5939 * of this object, since such a copy
5940 * allows the object to be changed, which
5941 * will break the previous symmetrical
5942 * copies (which rely upon the object
5943 * not changing). In a sense, the shadowed
5944 * flag says "don't change this object".
5945 * We fix this by creating a shadow
5946 * object for this object, and sharing
5947 * that. This works because we are free
5948 * to change the shadow object (and thus
5949 * to use an asymmetric copy strategy);
5950 * this is also semantically correct,
5951 * since this object is temporary, and
5952 * therefore a copy of the object is
5953 * as good as the object itself. (This
5954 * is not true for permanent objects,
5955 * since the pager needs to see changes,
5956 * which won't happen if the changes
5957 * are made to a copy.)
5958 *
5959 * The third case is when the object
5960 * to be shared has parts sticking
5961 * outside of the entry we're working
5962 * with, and thus may in the future
5963 * be subject to a symmetrical copy.
5964 * (This is a preemptive version of
5965 * case 2.)
5966 */
5967
5968 assert(!(object->shadowed && old_entry->is_shared));
5969 vm_object_shadow(&old_entry->object.vm_object,
5970 &old_entry->offset,
5971 (vm_size_t) (old_entry->vme_end -
5972 old_entry->vme_start));
5973
5974 /*
5975 * If we're making a shadow for other than
5976 * copy on write reasons, then we have
5977 * to remove write permission.
5978 */
5979
5980/* CDY FIX this! page_protect! */
5981 if (!old_entry->needs_copy &&
5982 (old_entry->protection & VM_PROT_WRITE)) {
5983 if(old_entry->is_sub_map && old_entry->use_pmap) {
5984 pmap_protect(old_entry->object.sub_map->pmap,
5985 old_entry->vme_start,
5986 old_entry->vme_end,
5987 old_entry->protection & ~VM_PROT_WRITE);
5988 } else {
5989 pmap_protect(vm_map_pmap(old_map),
5990 old_entry->vme_start,
5991 old_entry->vme_end,
5992 old_entry->protection & ~VM_PROT_WRITE);
5993 }
5994 }
5995
5996 old_entry->needs_copy = FALSE;
5997 object = old_entry->object.vm_object;
5998 }
5999
6000 /*
6001 * If object was using a symmetric copy strategy,
6002 * change its copy strategy to the default
6003 * asymmetric copy strategy, which is copy_delay
6004 * in the non-norma case and copy_call in the
6005 * norma case. Bump the reference count for the
6006 * new entry.
6007 */
6008
6009 if(old_entry->is_sub_map) {
6010 vm_map_lock(old_entry->object.sub_map);
6011 vm_map_reference(old_entry->object.sub_map);
6012 vm_map_unlock(old_entry->object.sub_map);
6013 } else {
6014 vm_object_lock(object);
6015 object->ref_count++;
6016 vm_object_res_reference(object);
6017 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
6018 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
6019 }
6020 vm_object_unlock(object);
6021 }
6022
6023 /*
6024 * Clone the entry, using object ref from above.
6025 * Mark both entries as shared.
6026 */
6027
6028 new_entry = vm_map_entry_create(new_map);
6029 vm_map_entry_copy(new_entry, old_entry);
6030 old_entry->is_shared = TRUE;
6031 new_entry->is_shared = TRUE;
6032
6033 /*
6034 * Insert the entry into the new map -- we
6035 * know we're inserting at the end of the new
6036 * map.
6037 */
6038
6039 vm_map_entry_link(new_map, vm_map_last_entry(new_map), new_entry);
6040
6041 /*
6042 * Update the physical map
6043 */
6044
6045 if (old_entry->is_sub_map) {
6046 /* Bill Angell pmap support goes here */
6047 } else {
6048 pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start,
6049 old_entry->vme_end - old_entry->vme_start,
6050 old_entry->vme_start);
6051 }
6052}
6053
6054boolean_t
6055vm_map_fork_copy(
6056 vm_map_t old_map,
6057 vm_map_entry_t *old_entry_p,
6058 vm_map_t new_map)
6059{
6060 vm_map_entry_t old_entry = *old_entry_p;
6061 vm_size_t entry_size = old_entry->vme_end - old_entry->vme_start;
6062 vm_offset_t start = old_entry->vme_start;
6063 vm_map_copy_t copy;
6064 vm_map_entry_t last = vm_map_last_entry(new_map);
6065
6066 vm_map_unlock(old_map);
6067 /*
6068 * Use maxprot version of copyin because we
6069 * care about whether this memory can ever
6070 * be accessed, not just whether it's accessible
6071 * right now.
6072 */
6073 if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, &copy)
6074 != KERN_SUCCESS) {
6075 /*
6076 * The map might have changed while it
6077 * was unlocked, check it again. Skip
6078 * any blank space or permanently
6079 * unreadable region.
6080 */
6081 vm_map_lock(old_map);
6082 if (!vm_map_lookup_entry(old_map, start, &last) ||
6083 last->max_protection & VM_PROT_READ ==
6084 VM_PROT_NONE) {
6085 last = last->vme_next;
6086 }
6087 *old_entry_p = last;
6088
6089 /*
6090 * XXX For some error returns, want to
6091 * XXX skip to the next element. Note
6092 * that INVALID_ADDRESS and
6093 * PROTECTION_FAILURE are handled above.
6094 */
6095
6096 return FALSE;
6097 }
6098
6099 /*
6100 * Insert the copy into the new map
6101 */
6102
6103 vm_map_copy_insert(new_map, last, copy);
6104
6105 /*
6106 * Pick up the traversal at the end of
6107 * the copied region.
6108 */
6109
6110 vm_map_lock(old_map);
6111 start += entry_size;
6112 if (! vm_map_lookup_entry(old_map, start, &last)) {
6113 last = last->vme_next;
6114 } else {
6115 vm_map_clip_start(old_map, last, start);
6116 }
6117 *old_entry_p = last;
6118
6119 return TRUE;
6120}
6121
6122/*
6123 * vm_map_fork:
6124 *
6125 * Create and return a new map based on the old
6126 * map, according to the inheritance values on the
6127 * regions in that map.
6128 *
6129 * The source map must not be locked.
6130 */
6131vm_map_t
6132vm_map_fork(
6133 vm_map_t old_map)
6134{
6135 pmap_t new_pmap = pmap_create((vm_size_t) 0);
6136 vm_map_t new_map;
6137 vm_map_entry_t old_entry;
6138 vm_size_t new_size = 0, entry_size;
6139 vm_map_entry_t new_entry;
6140 boolean_t src_needs_copy;
6141 boolean_t new_entry_needs_copy;
6142
6143 vm_map_reference_swap(old_map);
6144 vm_map_lock(old_map);
6145
6146 new_map = vm_map_create(new_pmap,
6147 old_map->min_offset,
6148 old_map->max_offset,
6149 old_map->hdr.entries_pageable);
6150
6151 for (
6152 old_entry = vm_map_first_entry(old_map);
6153 old_entry != vm_map_to_entry(old_map);
6154 ) {
6155
6156 entry_size = old_entry->vme_end - old_entry->vme_start;
6157
6158 switch (old_entry->inheritance) {
6159 case VM_INHERIT_NONE:
6160 break;
6161
6162 case VM_INHERIT_SHARE:
6163 vm_map_fork_share(old_map, old_entry, new_map);
6164 new_size += entry_size;
6165 break;
6166
6167 case VM_INHERIT_COPY:
6168
6169 /*
6170 * Inline the copy_quickly case;
6171 * upon failure, fall back on call
6172 * to vm_map_fork_copy.
6173 */
6174
6175 if(old_entry->is_sub_map)
6176 break;
6177 if (old_entry->wired_count != 0) {
6178 goto slow_vm_map_fork_copy;
6179 }
6180
6181 new_entry = vm_map_entry_create(new_map);
6182 vm_map_entry_copy(new_entry, old_entry);
6183 /* clear address space specifics */
6184 new_entry->use_pmap = FALSE;
6185
6186 if (! vm_object_copy_quickly(
6187 &new_entry->object.vm_object,
6188 old_entry->offset,
6189 (old_entry->vme_end -
6190 old_entry->vme_start),
6191 &src_needs_copy,
6192 &new_entry_needs_copy)) {
6193 vm_map_entry_dispose(new_map, new_entry);
6194 goto slow_vm_map_fork_copy;
6195 }
6196
6197 /*
6198 * Handle copy-on-write obligations
6199 */
6200
6201 if (src_needs_copy && !old_entry->needs_copy) {
6202 vm_object_pmap_protect(
6203 old_entry->object.vm_object,
6204 old_entry->offset,
6205 (old_entry->vme_end -
6206 old_entry->vme_start),
6207 ((old_entry->is_shared
6208 || old_entry->is_sub_map)
6209 ? PMAP_NULL :
6210 old_map->pmap),
6211 old_entry->vme_start,
6212 old_entry->protection & ~VM_PROT_WRITE);
6213
6214 old_entry->needs_copy = TRUE;
6215 }
6216 new_entry->needs_copy = new_entry_needs_copy;
6217
6218 /*
6219 * Insert the entry at the end
6220 * of the map.
6221 */
6222
6223 vm_map_entry_link(new_map, vm_map_last_entry(new_map),
6224 new_entry);
6225 new_size += entry_size;
6226 break;
6227
6228 slow_vm_map_fork_copy:
6229 if (vm_map_fork_copy(old_map, &old_entry, new_map)) {
6230 new_size += entry_size;
6231 }
6232 continue;
6233 }
6234 old_entry = old_entry->vme_next;
6235 }
6236
6237 new_map->size = new_size;
6238 vm_map_unlock(old_map);
6239 vm_map_deallocate(old_map);
6240
6241 return(new_map);
6242}
6243
6244
6245/*
6246 * vm_map_lookup_locked:
6247 *
6248 * Finds the VM object, offset, and
6249 * protection for a given virtual address in the
6250 * specified map, assuming a page fault of the
6251 * type specified.
6252 *
6253 * Returns the (object, offset, protection) for
6254 * this address, whether it is wired down, and whether
6255 * this map has the only reference to the data in question.
6256 * In order to later verify this lookup, a "version"
6257 * is returned.
6258 *
6259 * The map MUST be locked by the caller and WILL be
6260 * locked on exit. In order to guarantee the
6261 * existence of the returned object, it is returned
6262 * locked.
6263 *
6264 * If a lookup is requested with "write protection"
6265 * specified, the map may be changed to perform virtual
6266 * copying operations, although the data referenced will
6267 * remain the same.
6268 */
6269kern_return_t
6270vm_map_lookup_locked(
6271 vm_map_t *var_map, /* IN/OUT */
6272 register vm_offset_t vaddr,
6273 register vm_prot_t fault_type,
6274 vm_map_version_t *out_version, /* OUT */
6275 vm_object_t *object, /* OUT */
6276 vm_object_offset_t *offset, /* OUT */
6277 vm_prot_t *out_prot, /* OUT */
6278 boolean_t *wired, /* OUT */
6279 int *behavior, /* OUT */
6280 vm_object_offset_t *lo_offset, /* OUT */
6281 vm_object_offset_t *hi_offset, /* OUT */
6282 vm_map_t *pmap_map)
6283{
6284 vm_map_entry_t entry;
6285 register vm_map_t map = *var_map;
6286 vm_map_t old_map = *var_map;
6287 vm_map_t cow_sub_map_parent = VM_MAP_NULL;
6288 vm_offset_t cow_parent_vaddr;
6289 vm_offset_t old_start;
6290 vm_offset_t old_end;
6291 register vm_prot_t prot;
6292
6293 *pmap_map = map;
6294 RetryLookup: ;
6295
6296 /*
6297 * If the map has an interesting hint, try it before calling
6298 * full blown lookup routine.
6299 */
6300
6301 mutex_lock(&map->s_lock);
6302 entry = map->hint;
6303 mutex_unlock(&map->s_lock);
6304
6305 if ((entry == vm_map_to_entry(map)) ||
6306 (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
6307 vm_map_entry_t tmp_entry;
6308
6309 /*
6310 * Entry was either not a valid hint, or the vaddr
6311 * was not contained in the entry, so do a full lookup.
6312 */
6313 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
6314 if((cow_sub_map_parent) && (cow_sub_map_parent != map))
6315 vm_map_unlock(cow_sub_map_parent);
6316 if((*pmap_map != map)
6317 && (*pmap_map != cow_sub_map_parent))
6318 vm_map_unlock(*pmap_map);
6319 return KERN_INVALID_ADDRESS;
6320 }
6321
6322 entry = tmp_entry;
6323 }
6324 if(map == old_map) {
6325 old_start = entry->vme_start;
6326 old_end = entry->vme_end;
6327 }
6328
6329 /*
6330 * Handle submaps. Drop lock on upper map, submap is
6331 * returned locked.
6332 */
6333
6334submap_recurse:
6335 if (entry->is_sub_map) {
6336 vm_offset_t local_vaddr;
6337 vm_offset_t end_delta;
6338 vm_offset_t start_delta;
6339 vm_offset_t object_start_delta;
6340 vm_map_entry_t submap_entry;
6341 boolean_t mapped_needs_copy=FALSE;
6342
6343 local_vaddr = vaddr;
6344
6345 if ((!entry->needs_copy) && (entry->use_pmap)) {
6346 /* if pmap_map equals map we unlock below */
6347 if ((*pmap_map != map) &&
6348 (*pmap_map != cow_sub_map_parent))
6349 vm_map_unlock(*pmap_map);
6350 *pmap_map = entry->object.sub_map;
6351 }
6352
6353 if(entry->needs_copy) {
6354 if (!mapped_needs_copy) {
6355 if (vm_map_lock_read_to_write(map)) {
6356 vm_map_lock_read(map);
6357 if(*pmap_map == entry->object.sub_map)
6358 *pmap_map = map;
6359 goto RetryLookup;
6360 }
6361 vm_map_lock_read(entry->object.sub_map);
6362 cow_sub_map_parent = map;
6363 /* reset base to map before cow object */
6364 /* this is the map which will accept */
6365 /* the new cow object */
6366 old_start = entry->vme_start;
6367 old_end = entry->vme_end;
6368 cow_parent_vaddr = vaddr;
6369 mapped_needs_copy = TRUE;
6370 } else {
6371 vm_map_lock_read(entry->object.sub_map);
6372 if((cow_sub_map_parent != map) &&
6373 (*pmap_map != map))
6374 vm_map_unlock(map);
6375 }
6376 } else {
6377 vm_map_lock_read(entry->object.sub_map);
6378 /* leave map locked if it is a target */
6379 /* cow sub_map above otherwise, just */
6380 /* follow the maps down to the object */
6381 /* here we unlock knowing we are not */
6382 /* revisiting the map. */
6383 if((*pmap_map != map) && (map != cow_sub_map_parent))
6384 vm_map_unlock_read(map);
6385 }
6386
6387 *var_map = map = entry->object.sub_map;
6388
6389 /* calculate the offset in the submap for vaddr */
6390 local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
6391
6392RetrySubMap:
6393 if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) {
6394 if((cow_sub_map_parent) && (cow_sub_map_parent != map)){
6395 vm_map_unlock(cow_sub_map_parent);
6396 }
6397 if((*pmap_map != map)
6398 && (*pmap_map != cow_sub_map_parent)) {
6399 vm_map_unlock(*pmap_map);
6400 }
6401 *pmap_map = map;
6402 return KERN_INVALID_ADDRESS;
6403 }
6404 /* find the attenuated shadow of the underlying object */
6405 /* on our target map */
6406
6407 /* in english the submap object may extend beyond the */
6408 /* region mapped by the entry or, may only fill a portion */
6409 /* of it. For our purposes, we only care if the object */
6410 /* doesn't fill. In this case the area which will */
6411 /* ultimately be clipped in the top map will only need */
6412 /* to be as big as the portion of the underlying entry */
6413 /* which is mapped */
6414 start_delta = submap_entry->vme_start > entry->offset ?
6415 submap_entry->vme_start - entry->offset : 0;
6416
6417 end_delta =
6418 (entry->offset + start_delta + (old_end - old_start)) <=
6419 submap_entry->vme_end ?
6420 0 : (entry->offset +
6421 (old_end - old_start))
6422 - submap_entry->vme_end;
6423
6424 old_start += start_delta;
6425 old_end -= end_delta;
6426
6427 if(submap_entry->is_sub_map) {
6428 entry = submap_entry;
6429 vaddr = local_vaddr;
6430 goto submap_recurse;
6431 }
6432
6433 if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) {
6434
6435 vm_object_t copy_object;
6436 vm_offset_t local_start;
6437 vm_offset_t local_end;
0b4e3aa0 6438 boolean_t copied_slowly = FALSE;
1c79356b
A
6439
6440 if (vm_map_lock_read_to_write(map)) {
6441 vm_map_lock_read(map);
6442 old_start -= start_delta;
6443 old_end += end_delta;
6444 goto RetrySubMap;
6445 }
0b4e3aa0
A
6446
6447
1c79356b
A
6448 if (submap_entry->object.vm_object == VM_OBJECT_NULL) {
6449 submap_entry->object.vm_object =
6450 vm_object_allocate(
6451 (vm_size_t)
6452 (submap_entry->vme_end
6453 - submap_entry->vme_start));
6454 submap_entry->offset = 0;
6455 }
6456 local_start = local_vaddr -
6457 (cow_parent_vaddr - old_start);
6458 local_end = local_vaddr +
6459 (old_end - cow_parent_vaddr);
6460 vm_map_clip_start(map, submap_entry, local_start);
6461 vm_map_clip_end(map, submap_entry, local_end);
6462
6463 /* This is the COW case, lets connect */
6464 /* an entry in our space to the underlying */
6465 /* object in the submap, bypassing the */
6466 /* submap. */
0b4e3aa0
A
6467
6468
6469 if(submap_entry->wired_count != 0) {
6470 vm_object_lock(
6471 submap_entry->object.vm_object);
6472 vm_object_copy_slowly(
6473 submap_entry->object.vm_object,
6474 submap_entry->offset,
6475 submap_entry->vme_end -
6476 submap_entry->vme_start,
6477 FALSE,
6478 &copy_object);
6479 copied_slowly = TRUE;
6480 } else {
1c79356b 6481
0b4e3aa0
A
6482 /* set up shadow object */
6483 copy_object = submap_entry->object.vm_object;
6484 vm_object_reference(copy_object);
6485 submap_entry->object.vm_object->shadowed = TRUE;
6486 submap_entry->needs_copy = TRUE;
6487 vm_object_pmap_protect(
6488 submap_entry->object.vm_object,
1c79356b
A
6489 submap_entry->offset,
6490 submap_entry->vme_end -
6491 submap_entry->vme_start,
6492 submap_entry->is_shared ?
6493 PMAP_NULL : map->pmap,
6494 submap_entry->vme_start,
6495 submap_entry->protection &
6496 ~VM_PROT_WRITE);
0b4e3aa0 6497 }
1c79356b
A
6498
6499
6500 /* This works diffently than the */
6501 /* normal submap case. We go back */
6502 /* to the parent of the cow map and*/
6503 /* clip out the target portion of */
6504 /* the sub_map, substituting the */
6505 /* new copy object, */
6506
6507 vm_map_unlock(map);
6508 local_start = old_start;
6509 local_end = old_end;
6510 map = cow_sub_map_parent;
6511 *var_map = cow_sub_map_parent;
6512 vaddr = cow_parent_vaddr;
6513 cow_sub_map_parent = NULL;
6514
6515 if(!vm_map_lookup_entry(map,
6516 vaddr, &entry)) {
6517 vm_object_deallocate(
6518 copy_object);
6519 vm_map_lock_write_to_read(map);
6520 return KERN_INVALID_ADDRESS;
6521 }
6522
6523 /* clip out the portion of space */
6524 /* mapped by the sub map which */
6525 /* corresponds to the underlying */
6526 /* object */
6527 vm_map_clip_start(map, entry, local_start);
6528 vm_map_clip_end(map, entry, local_end);
6529
6530
6531 /* substitute copy object for */
6532 /* shared map entry */
6533 vm_map_deallocate(entry->object.sub_map);
6534 entry->is_sub_map = FALSE;
1c79356b 6535 entry->object.vm_object = copy_object;
1c79356b
A
6536
6537 entry->protection |= VM_PROT_WRITE;
6538 entry->max_protection |= VM_PROT_WRITE;
0b4e3aa0
A
6539 if(copied_slowly) {
6540 entry->offset = 0;
6541 entry->needs_copy = FALSE;
6542 entry->is_shared = FALSE;
6543 } else {
6544 entry->offset = submap_entry->offset;
6545 entry->needs_copy = TRUE;
6546 if(entry->inheritance == VM_INHERIT_SHARE)
6547 entry->inheritance = VM_INHERIT_COPY;
6548 if (map != old_map)
6549 entry->is_shared = TRUE;
6550 }
1c79356b 6551 if(entry->inheritance == VM_INHERIT_SHARE)
0b4e3aa0 6552 entry->inheritance = VM_INHERIT_COPY;
1c79356b
A
6553
6554 vm_map_lock_write_to_read(map);
6555 } else {
6556 if((cow_sub_map_parent)
6557 && (cow_sub_map_parent != *pmap_map)
6558 && (cow_sub_map_parent != map)) {
6559 vm_map_unlock(cow_sub_map_parent);
6560 }
6561 entry = submap_entry;
6562 vaddr = local_vaddr;
6563 }
6564 }
6565
6566 /*
6567 * Check whether this task is allowed to have
6568 * this page.
6569 */
6570
6571 prot = entry->protection;
6572 if ((fault_type & (prot)) != fault_type) {
6573 if (*pmap_map != map) {
6574 vm_map_unlock(*pmap_map);
6575 }
6576 *pmap_map = map;
6577 return KERN_PROTECTION_FAILURE;
6578 }
6579
6580 /*
6581 * If this page is not pageable, we have to get
6582 * it for all possible accesses.
6583 */
6584
6585 if (*wired = (entry->wired_count != 0))
6586 prot = fault_type = entry->protection;
6587
6588 /*
6589 * If the entry was copy-on-write, we either ...
6590 */
6591
6592 if (entry->needs_copy) {
6593 /*
6594 * If we want to write the page, we may as well
6595 * handle that now since we've got the map locked.
6596 *
6597 * If we don't need to write the page, we just
6598 * demote the permissions allowed.
6599 */
6600
6601 if (fault_type & VM_PROT_WRITE || *wired) {
6602 /*
6603 * Make a new object, and place it in the
6604 * object chain. Note that no new references
6605 * have appeared -- one just moved from the
6606 * map to the new object.
6607 */
6608
6609 if (vm_map_lock_read_to_write(map)) {
6610 vm_map_lock_read(map);
6611 goto RetryLookup;
6612 }
6613 vm_object_shadow(&entry->object.vm_object,
6614 &entry->offset,
6615 (vm_size_t) (entry->vme_end -
6616 entry->vme_start));
6617
6618 entry->object.vm_object->shadowed = TRUE;
6619 entry->needs_copy = FALSE;
6620 vm_map_lock_write_to_read(map);
6621 }
6622 else {
6623 /*
6624 * We're attempting to read a copy-on-write
6625 * page -- don't allow writes.
6626 */
6627
6628 prot &= (~VM_PROT_WRITE);
6629 }
6630 }
6631
6632 /*
6633 * Create an object if necessary.
6634 */
6635 if (entry->object.vm_object == VM_OBJECT_NULL) {
6636
6637 if (vm_map_lock_read_to_write(map)) {
6638 vm_map_lock_read(map);
6639 goto RetryLookup;
6640 }
6641
6642 entry->object.vm_object = vm_object_allocate(
6643 (vm_size_t)(entry->vme_end - entry->vme_start));
6644 entry->offset = 0;
6645 vm_map_lock_write_to_read(map);
6646 }
6647
6648 /*
6649 * Return the object/offset from this entry. If the entry
6650 * was copy-on-write or empty, it has been fixed up. Also
6651 * return the protection.
6652 */
6653
6654 *offset = (vaddr - entry->vme_start) + entry->offset;
6655 *object = entry->object.vm_object;
6656 *out_prot = prot;
6657 *behavior = entry->behavior;
6658 *lo_offset = entry->offset;
6659 *hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
6660
6661 /*
6662 * Lock the object to prevent it from disappearing
6663 */
6664
6665 vm_object_lock(*object);
6666
6667 /*
6668 * Save the version number
6669 */
6670
6671 out_version->main_timestamp = map->timestamp;
6672
6673 return KERN_SUCCESS;
6674}
6675
6676
6677/*
6678 * vm_map_verify:
6679 *
6680 * Verifies that the map in question has not changed
6681 * since the given version. If successful, the map
6682 * will not change until vm_map_verify_done() is called.
6683 */
6684boolean_t
6685vm_map_verify(
6686 register vm_map_t map,
6687 register vm_map_version_t *version) /* REF */
6688{
6689 boolean_t result;
6690
6691 vm_map_lock_read(map);
6692 result = (map->timestamp == version->main_timestamp);
6693
6694 if (!result)
6695 vm_map_unlock_read(map);
6696
6697 return(result);
6698}
6699
6700/*
6701 * vm_map_verify_done:
6702 *
6703 * Releases locks acquired by a vm_map_verify.
6704 *
6705 * This is now a macro in vm/vm_map.h. It does a
6706 * vm_map_unlock_read on the map.
6707 */
6708
6709
6710/*
6711 * vm_region:
6712 *
6713 * User call to obtain information about a region in
6714 * a task's address map. Currently, only one flavor is
6715 * supported.
6716 *
6717 * XXX The reserved and behavior fields cannot be filled
6718 * in until the vm merge from the IK is completed, and
6719 * vm_reserve is implemented.
6720 *
6721 * XXX Dependency: syscall_vm_region() also supports only one flavor.
6722 */
6723
6724kern_return_t
6725vm_region(
6726 vm_map_t map,
6727 vm_offset_t *address, /* IN/OUT */
6728 vm_size_t *size, /* OUT */
6729 vm_region_flavor_t flavor, /* IN */
6730 vm_region_info_t info, /* OUT */
6731 mach_msg_type_number_t *count, /* IN/OUT */
6732 ipc_port_t *object_name) /* OUT */
6733{
6734 vm_map_entry_t tmp_entry;
6735 register
6736 vm_map_entry_t entry;
6737 register
6738 vm_offset_t start;
6739 vm_region_basic_info_t basic;
6740 vm_region_extended_info_t extended;
6741 vm_region_top_info_t top;
6742
6743 if (map == VM_MAP_NULL)
6744 return(KERN_INVALID_ARGUMENT);
6745
6746 switch (flavor) {
6747
6748 case VM_REGION_BASIC_INFO:
6749 {
6750 if (*count < VM_REGION_BASIC_INFO_COUNT)
6751 return(KERN_INVALID_ARGUMENT);
6752
6753 basic = (vm_region_basic_info_t) info;
6754 *count = VM_REGION_BASIC_INFO_COUNT;
6755
6756 vm_map_lock_read(map);
6757
6758 start = *address;
6759 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
6760 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
6761 vm_map_unlock_read(map);
6762 return(KERN_INVALID_ADDRESS);
6763 }
6764 } else {
6765 entry = tmp_entry;
6766 }
6767
6768 start = entry->vme_start;
6769
6770 basic->offset = entry->offset;
6771 basic->protection = entry->protection;
6772 basic->inheritance = entry->inheritance;
6773 basic->max_protection = entry->max_protection;
6774 basic->behavior = entry->behavior;
6775 basic->user_wired_count = entry->user_wired_count;
6776 basic->reserved = entry->is_sub_map;
6777 *address = start;
6778 *size = (entry->vme_end - start);
6779
6780 if (object_name) *object_name = IP_NULL;
6781 if (entry->is_sub_map) {
6782 basic->shared = FALSE;
6783 } else {
6784 basic->shared = entry->is_shared;
6785 }
6786
6787 vm_map_unlock_read(map);
6788 return(KERN_SUCCESS);
6789 }
6790 case VM_REGION_EXTENDED_INFO:
6791 {
6792
6793 if (*count < VM_REGION_EXTENDED_INFO_COUNT)
6794 return(KERN_INVALID_ARGUMENT);
6795
6796 extended = (vm_region_extended_info_t) info;
6797 *count = VM_REGION_EXTENDED_INFO_COUNT;
6798
6799 vm_map_lock_read(map);
6800
6801 start = *address;
6802 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
6803 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
6804 vm_map_unlock_read(map);
6805 return(KERN_INVALID_ADDRESS);
6806 }
6807 } else {
6808 entry = tmp_entry;
6809 }
6810 start = entry->vme_start;
6811
6812 extended->protection = entry->protection;
6813 extended->user_tag = entry->alias;
6814 extended->pages_resident = 0;
6815 extended->pages_swapped_out = 0;
6816 extended->pages_shared_now_private = 0;
0b4e3aa0 6817 extended->pages_dirtied = 0;
1c79356b
A
6818 extended->external_pager = 0;
6819 extended->shadow_depth = 0;
6820
6821 vm_region_walk(entry, extended, entry->offset, entry->vme_end - start, map, start);
6822
6823 if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED)
6824 extended->share_mode = SM_PRIVATE;
6825
6826 if (object_name)
6827 *object_name = IP_NULL;
6828 *address = start;
6829 *size = (entry->vme_end - start);
6830
6831 vm_map_unlock_read(map);
6832 return(KERN_SUCCESS);
6833 }
6834 case VM_REGION_TOP_INFO:
6835 {
6836
6837 if (*count < VM_REGION_TOP_INFO_COUNT)
6838 return(KERN_INVALID_ARGUMENT);
6839
6840 top = (vm_region_top_info_t) info;
6841 *count = VM_REGION_TOP_INFO_COUNT;
6842
6843 vm_map_lock_read(map);
6844
6845 start = *address;
6846 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
6847 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
6848 vm_map_unlock_read(map);
6849 return(KERN_INVALID_ADDRESS);
6850 }
6851 } else {
6852 entry = tmp_entry;
6853
6854 }
6855 start = entry->vme_start;
6856
6857 top->private_pages_resident = 0;
6858 top->shared_pages_resident = 0;
6859
6860 vm_region_top_walk(entry, top);
6861
6862 if (object_name)
6863 *object_name = IP_NULL;
6864 *address = start;
6865 *size = (entry->vme_end - start);
6866
6867 vm_map_unlock_read(map);
6868 return(KERN_SUCCESS);
6869 }
6870 default:
6871 return(KERN_INVALID_ARGUMENT);
6872 }
6873}
6874
6875/*
6876 * vm_region_recurse: A form of vm_region which follows the
6877 * submaps in a target map
6878 *
6879 */
6880
6881kern_return_t
6882vm_region_recurse(
6883 vm_map_t map,
6884 vm_offset_t *address, /* IN/OUT */
6885 vm_size_t *size, /* OUT */
6886 natural_t *nesting_depth, /* IN/OUT */
6887 vm_region_recurse_info_t info, /* IN/OUT */
6888 mach_msg_type_number_t *count) /* IN/OUT */
6889{
6890 vm_map_entry_t tmp_entry;
6891 register
6892 vm_map_entry_t entry;
6893 register
6894 vm_offset_t start;
6895
6896 unsigned int recurse_count;
6897 vm_map_t submap;
6898 vm_map_t base_map;
6899 vm_map_entry_t base_entry;
6900 vm_offset_t base_next;
6901 vm_offset_t base_addr;
6902 vm_offset_t baddr_start_delta;
6903 vm_region_submap_info_t submap_info;
6904 vm_region_extended_info_data_t extended;
6905
6906 if (map == VM_MAP_NULL)
6907 return(KERN_INVALID_ARGUMENT);
6908
6909 submap_info = (vm_region_submap_info_t) info;
6910 *count = VM_REGION_SUBMAP_INFO_COUNT;
6911
6912 if (*count < VM_REGION_SUBMAP_INFO_COUNT)
6913 return(KERN_INVALID_ARGUMENT);
6914
6915 start = *address;
6916 base_map = map;
6917 recurse_count = *nesting_depth;
6918
6919LOOKUP_NEXT_BASE_ENTRY:
6920 vm_map_lock_read(map);
6921 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
6922 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
6923 vm_map_unlock_read(map);
6924 return(KERN_INVALID_ADDRESS);
6925 }
6926 } else {
6927 entry = tmp_entry;
6928 }
6929 *size = entry->vme_end - entry->vme_start;
6930 start = entry->vme_start;
6931 base_addr = start;
6932 baddr_start_delta = *address - start;
6933 base_next = entry->vme_end;
6934 base_entry = entry;
6935
6936 while(entry->is_sub_map && recurse_count) {
6937 recurse_count--;
6938 vm_map_lock_read(entry->object.sub_map);
6939
6940
6941 if(entry == base_entry) {
6942 start = entry->offset;
6943 start += *address - entry->vme_start;
6944 }
6945
6946 submap = entry->object.sub_map;
6947 vm_map_unlock_read(map);
6948 map = submap;
6949
6950 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
6951 if ((entry = tmp_entry->vme_next)
6952 == vm_map_to_entry(map)) {
6953 vm_map_unlock_read(map);
6954 map = base_map;
6955 start = base_next;
6956 recurse_count = 0;
6957 *nesting_depth = 0;
6958 goto LOOKUP_NEXT_BASE_ENTRY;
6959 }
6960 } else {
6961 entry = tmp_entry;
6962
6963 }
6964 if(start <= entry->vme_start) {
6965 vm_offset_t old_start = start;
6966 if(baddr_start_delta) {
6967 base_addr += (baddr_start_delta);
6968 *size -= baddr_start_delta;
6969 baddr_start_delta = 0;
6970 }
6971 if(base_next <=
6972 (base_addr += (entry->vme_start - start))) {
6973 vm_map_unlock_read(map);
6974 map = base_map;
6975 start = base_next;
6976 recurse_count = 0;
6977 *nesting_depth = 0;
6978 goto LOOKUP_NEXT_BASE_ENTRY;
6979 }
6980 *size -= entry->vme_start - start;
6981 if (*size > (entry->vme_end - entry->vme_start)) {
6982 *size = entry->vme_end - entry->vme_start;
6983 }
6984 start = 0;
6985 } else {
6986 if(baddr_start_delta) {
6987 if((start - entry->vme_start)
6988 < baddr_start_delta) {
6989 base_addr += start - entry->vme_start;
6990 *size -= start - entry->vme_start;
6991 } else {
6992 base_addr += baddr_start_delta;
6993 *size += baddr_start_delta;
6994 }
6995 baddr_start_delta = 0;
6996 }
6997 base_addr += entry->vme_start;
6998 if(base_addr >= base_next) {
6999 vm_map_unlock_read(map);
7000 map = base_map;
7001 start = base_next;
7002 recurse_count = 0;
7003 *nesting_depth = 0;
7004 goto LOOKUP_NEXT_BASE_ENTRY;
7005 }
7006 if (*size > (entry->vme_end - start))
7007 *size = entry->vme_end - start;
7008
7009 start = entry->vme_start - start;
7010 }
7011
7012 start += entry->offset;
7013
7014 }
7015 *nesting_depth -= recurse_count;
7016 if(entry != base_entry) {
7017 start = entry->vme_start + (start - entry->offset);
7018 }
7019
7020
7021 submap_info->user_tag = entry->alias;
7022 submap_info->offset = entry->offset;
7023 submap_info->protection = entry->protection;
7024 submap_info->inheritance = entry->inheritance;
7025 submap_info->max_protection = entry->max_protection;
7026 submap_info->behavior = entry->behavior;
7027 submap_info->user_wired_count = entry->user_wired_count;
7028 submap_info->is_submap = entry->is_sub_map;
7029 submap_info->object_id = (vm_offset_t)entry->object.vm_object;
7030 *address = base_addr;
7031
7032
7033 extended.pages_resident = 0;
7034 extended.pages_swapped_out = 0;
7035 extended.pages_shared_now_private = 0;
0b4e3aa0 7036 extended.pages_dirtied = 0;
1c79356b
A
7037 extended.external_pager = 0;
7038 extended.shadow_depth = 0;
7039
7040 if(!entry->is_sub_map) {
7041 vm_region_walk(entry, &extended, entry->offset,
7042 entry->vme_end - start, map, start);
7043 submap_info->share_mode = extended.share_mode;
7044 if (extended.external_pager && extended.ref_count == 2
7045 && extended.share_mode == SM_SHARED)
7046 submap_info->share_mode = SM_PRIVATE;
7047 submap_info->ref_count = extended.ref_count;
7048 } else {
7049 if(entry->use_pmap)
7050 submap_info->share_mode = SM_TRUESHARED;
7051 else
7052 submap_info->share_mode = SM_PRIVATE;
7053 submap_info->ref_count = entry->object.sub_map->ref_count;
7054 }
7055
7056 submap_info->pages_resident = extended.pages_resident;
7057 submap_info->pages_swapped_out = extended.pages_swapped_out;
7058 submap_info->pages_shared_now_private =
7059 extended.pages_shared_now_private;
0b4e3aa0 7060 submap_info->pages_dirtied = extended.pages_dirtied;
1c79356b
A
7061 submap_info->external_pager = extended.external_pager;
7062 submap_info->shadow_depth = extended.shadow_depth;
7063
7064 vm_map_unlock_read(map);
7065 return(KERN_SUCCESS);
7066}
7067
7068/*
7069 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7070 * Goes away after regular vm_region_recurse function migrates to
7071 * 64 bits
7072 * vm_region_recurse: A form of vm_region which follows the
7073 * submaps in a target map
7074 *
7075 */
7076
7077kern_return_t
7078vm_region_recurse_64(
7079 vm_map_t map,
7080 vm_offset_t *address, /* IN/OUT */
7081 vm_size_t *size, /* OUT */
7082 natural_t *nesting_depth, /* IN/OUT */
7083 vm_region_recurse_info_t info, /* IN/OUT */
7084 mach_msg_type_number_t *count) /* IN/OUT */
7085{
7086 vm_map_entry_t tmp_entry;
7087 register
7088 vm_map_entry_t entry;
7089 register
7090 vm_offset_t start;
7091
7092 unsigned int recurse_count;
7093 vm_map_t submap;
7094 vm_map_t base_map;
7095 vm_map_entry_t base_entry;
7096 vm_offset_t base_next;
7097 vm_offset_t base_addr;
7098 vm_offset_t baddr_start_delta;
7099 vm_region_submap_info_64_t submap_info;
7100 vm_region_extended_info_data_t extended;
7101
7102 if (map == VM_MAP_NULL)
7103 return(KERN_INVALID_ARGUMENT);
7104
7105 submap_info = (vm_region_submap_info_64_t) info;
7106 *count = VM_REGION_SUBMAP_INFO_COUNT;
7107
7108 if (*count < VM_REGION_SUBMAP_INFO_COUNT)
7109 return(KERN_INVALID_ARGUMENT);
7110
7111 start = *address;
7112 base_map = map;
7113 recurse_count = *nesting_depth;
7114
7115LOOKUP_NEXT_BASE_ENTRY:
7116 vm_map_lock_read(map);
7117 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
7118 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
7119 vm_map_unlock_read(map);
7120 return(KERN_INVALID_ADDRESS);
7121 }
7122 } else {
7123 entry = tmp_entry;
7124 }
7125 *size = entry->vme_end - entry->vme_start;
7126 start = entry->vme_start;
7127 base_addr = start;
7128 baddr_start_delta = *address - start;
7129 base_next = entry->vme_end;
7130 base_entry = entry;
7131
7132 while(entry->is_sub_map && recurse_count) {
7133 recurse_count--;
7134 vm_map_lock_read(entry->object.sub_map);
7135
7136
7137 if(entry == base_entry) {
7138 start = entry->offset;
7139 start += *address - entry->vme_start;
7140 }
7141
7142 submap = entry->object.sub_map;
7143 vm_map_unlock_read(map);
7144 map = submap;
7145
7146 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
7147 if ((entry = tmp_entry->vme_next)
7148 == vm_map_to_entry(map)) {
7149 vm_map_unlock_read(map);
7150 map = base_map;
7151 start = base_next;
7152 recurse_count = 0;
7153 *nesting_depth = 0;
7154 goto LOOKUP_NEXT_BASE_ENTRY;
7155 }
7156 } else {
7157 entry = tmp_entry;
7158
7159 }
7160 if(start <= entry->vme_start) {
7161 vm_offset_t old_start = start;
7162 if(baddr_start_delta) {
7163 base_addr += (baddr_start_delta);
7164 *size -= baddr_start_delta;
7165 baddr_start_delta = 0;
7166 }
7167 if(base_next <=
7168 (base_addr += (entry->vme_start - start))) {
7169 vm_map_unlock_read(map);
7170 map = base_map;
7171 start = base_next;
7172 recurse_count = 0;
7173 *nesting_depth = 0;
7174 goto LOOKUP_NEXT_BASE_ENTRY;
7175 }
7176 *size -= entry->vme_start - start;
7177 if (*size > (entry->vme_end - entry->vme_start)) {
7178 *size = entry->vme_end - entry->vme_start;
7179 }
7180 start = 0;
7181 } else {
7182 if(baddr_start_delta) {
7183 if((start - entry->vme_start)
7184 < baddr_start_delta) {
7185 base_addr += start - entry->vme_start;
7186 *size -= start - entry->vme_start;
7187 } else {
7188 base_addr += baddr_start_delta;
7189 *size += baddr_start_delta;
7190 }
7191 baddr_start_delta = 0;
7192 }
7193 base_addr += entry->vme_start;
7194 if(base_addr >= base_next) {
7195 vm_map_unlock_read(map);
7196 map = base_map;
7197 start = base_next;
7198 recurse_count = 0;
7199 *nesting_depth = 0;
7200 goto LOOKUP_NEXT_BASE_ENTRY;
7201 }
7202 if (*size > (entry->vme_end - start))
7203 *size = entry->vme_end - start;
7204
7205 start = entry->vme_start - start;
7206 }
7207
7208 start += entry->offset;
7209
7210 }
7211 *nesting_depth -= recurse_count;
7212 if(entry != base_entry) {
7213 start = entry->vme_start + (start - entry->offset);
7214 }
7215
7216
7217 submap_info->user_tag = entry->alias;
7218 submap_info->offset = entry->offset;
7219 submap_info->protection = entry->protection;
7220 submap_info->inheritance = entry->inheritance;
7221 submap_info->max_protection = entry->max_protection;
7222 submap_info->behavior = entry->behavior;
7223 submap_info->user_wired_count = entry->user_wired_count;
7224 submap_info->is_submap = entry->is_sub_map;
7225 submap_info->object_id = (vm_offset_t)entry->object.vm_object;
7226 *address = base_addr;
7227
7228
7229 extended.pages_resident = 0;
7230 extended.pages_swapped_out = 0;
7231 extended.pages_shared_now_private = 0;
0b4e3aa0 7232 extended.pages_dirtied = 0;
1c79356b
A
7233 extended.external_pager = 0;
7234 extended.shadow_depth = 0;
7235
7236 if(!entry->is_sub_map) {
7237 vm_region_walk(entry, &extended, entry->offset,
7238 entry->vme_end - start, map, start);
7239 submap_info->share_mode = extended.share_mode;
7240 if (extended.external_pager && extended.ref_count == 2
7241 && extended.share_mode == SM_SHARED)
7242 submap_info->share_mode = SM_PRIVATE;
7243 submap_info->ref_count = extended.ref_count;
7244 } else {
7245 if(entry->use_pmap)
7246 submap_info->share_mode = SM_TRUESHARED;
7247 else
7248 submap_info->share_mode = SM_PRIVATE;
7249 submap_info->ref_count = entry->object.sub_map->ref_count;
7250 }
7251
7252 submap_info->pages_resident = extended.pages_resident;
7253 submap_info->pages_swapped_out = extended.pages_swapped_out;
7254 submap_info->pages_shared_now_private =
7255 extended.pages_shared_now_private;
0b4e3aa0 7256 submap_info->pages_dirtied = extended.pages_dirtied;
1c79356b
A
7257 submap_info->external_pager = extended.external_pager;
7258 submap_info->shadow_depth = extended.shadow_depth;
7259
7260 vm_map_unlock_read(map);
7261 return(KERN_SUCCESS);
7262}
7263
7264
7265/*
7266 * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
7267 * Goes away after regular vm_region function migrates to
7268 * 64 bits
7269 */
7270
7271
7272kern_return_t
7273vm_region_64(
7274 vm_map_t map,
7275 vm_offset_t *address, /* IN/OUT */
7276 vm_size_t *size, /* OUT */
7277 vm_region_flavor_t flavor, /* IN */
7278 vm_region_info_t info, /* OUT */
7279 mach_msg_type_number_t *count, /* IN/OUT */
7280 ipc_port_t *object_name) /* OUT */
7281{
7282 vm_map_entry_t tmp_entry;
7283 register
7284 vm_map_entry_t entry;
7285 register
7286 vm_offset_t start;
7287 vm_region_basic_info_64_t basic;
7288 vm_region_extended_info_t extended;
7289 vm_region_top_info_t top;
7290
7291 if (map == VM_MAP_NULL)
7292 return(KERN_INVALID_ARGUMENT);
7293
7294 switch (flavor) {
7295
7296 case VM_REGION_BASIC_INFO:
7297 {
7298 if (*count < VM_REGION_BASIC_INFO_COUNT)
7299 return(KERN_INVALID_ARGUMENT);
7300
7301 basic = (vm_region_basic_info_64_t) info;
7302 *count = VM_REGION_BASIC_INFO_COUNT;
7303
7304 vm_map_lock_read(map);
7305
7306 start = *address;
7307 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
7308 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
7309 vm_map_unlock_read(map);
7310 return(KERN_INVALID_ADDRESS);
7311 }
7312 } else {
7313 entry = tmp_entry;
7314 }
7315
7316 start = entry->vme_start;
7317
7318 basic->offset = entry->offset;
7319 basic->protection = entry->protection;
7320 basic->inheritance = entry->inheritance;
7321 basic->max_protection = entry->max_protection;
7322 basic->behavior = entry->behavior;
7323 basic->user_wired_count = entry->user_wired_count;
7324 basic->reserved = entry->is_sub_map;
7325 *address = start;
7326 *size = (entry->vme_end - start);
7327
7328 if (object_name) *object_name = IP_NULL;
7329 if (entry->is_sub_map) {
7330 basic->shared = FALSE;
7331 } else {
7332 basic->shared = entry->is_shared;
7333 }
7334
7335 vm_map_unlock_read(map);
7336 return(KERN_SUCCESS);
7337 }
7338 case VM_REGION_EXTENDED_INFO:
7339 {
7340
7341 if (*count < VM_REGION_EXTENDED_INFO_COUNT)
7342 return(KERN_INVALID_ARGUMENT);
7343
7344 extended = (vm_region_extended_info_t) info;
7345 *count = VM_REGION_EXTENDED_INFO_COUNT;
7346
7347 vm_map_lock_read(map);
7348
7349 start = *address;
7350 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
7351 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
7352 vm_map_unlock_read(map);
7353 return(KERN_INVALID_ADDRESS);
7354 }
7355 } else {
7356 entry = tmp_entry;
7357 }
7358 start = entry->vme_start;
7359
7360 extended->protection = entry->protection;
7361 extended->user_tag = entry->alias;
7362 extended->pages_resident = 0;
7363 extended->pages_swapped_out = 0;
7364 extended->pages_shared_now_private = 0;
0b4e3aa0 7365 extended->pages_dirtied = 0;
1c79356b
A
7366 extended->external_pager = 0;
7367 extended->shadow_depth = 0;
7368
7369 vm_region_walk(entry, extended, entry->offset, entry->vme_end - start, map, start);
7370
7371 if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED)
7372 extended->share_mode = SM_PRIVATE;
7373
7374 if (object_name)
7375 *object_name = IP_NULL;
7376 *address = start;
7377 *size = (entry->vme_end - start);
7378
7379 vm_map_unlock_read(map);
7380 return(KERN_SUCCESS);
7381 }
7382 case VM_REGION_TOP_INFO:
7383 {
7384
7385 if (*count < VM_REGION_TOP_INFO_COUNT)
7386 return(KERN_INVALID_ARGUMENT);
7387
7388 top = (vm_region_top_info_t) info;
7389 *count = VM_REGION_TOP_INFO_COUNT;
7390
7391 vm_map_lock_read(map);
7392
7393 start = *address;
7394 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
7395 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
7396 vm_map_unlock_read(map);
7397 return(KERN_INVALID_ADDRESS);
7398 }
7399 } else {
7400 entry = tmp_entry;
7401
7402 }
7403 start = entry->vme_start;
7404
7405 top->private_pages_resident = 0;
7406 top->shared_pages_resident = 0;
7407
7408 vm_region_top_walk(entry, top);
7409
7410 if (object_name)
7411 *object_name = IP_NULL;
7412 *address = start;
7413 *size = (entry->vme_end - start);
7414
7415 vm_map_unlock_read(map);
7416 return(KERN_SUCCESS);
7417 }
7418 default:
7419 return(KERN_INVALID_ARGUMENT);
7420 }
7421}
7422
7423void
7424vm_region_top_walk(
7425 vm_map_entry_t entry,
7426 vm_region_top_info_t top)
7427{
7428 register struct vm_object *obj, *tmp_obj;
0b4e3aa0 7429 register int ref_count;
1c79356b
A
7430
7431 if (entry->object.vm_object == 0) {
7432 top->share_mode = SM_EMPTY;
7433 top->ref_count = 0;
7434 top->obj_id = 0;
7435 return;
7436 }
7437 if (entry->is_sub_map)
7438 vm_region_top_walk((vm_map_entry_t)entry->object.sub_map, top);
7439 else {
7440 obj = entry->object.vm_object;
7441
7442 vm_object_lock(obj);
7443
0b4e3aa0
A
7444 if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
7445 ref_count--;
7446
1c79356b 7447 if (obj->shadow) {
0b4e3aa0 7448 if (ref_count == 1)
1c79356b
A
7449 top->private_pages_resident = obj->resident_page_count;
7450 else
7451 top->shared_pages_resident = obj->resident_page_count;
0b4e3aa0 7452 top->ref_count = ref_count;
1c79356b
A
7453 top->share_mode = SM_COW;
7454
7455 while (tmp_obj = obj->shadow) {
7456 vm_object_lock(tmp_obj);
7457 vm_object_unlock(obj);
7458 obj = tmp_obj;
7459
0b4e3aa0
A
7460 if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
7461 ref_count--;
7462
1c79356b 7463 top->shared_pages_resident += obj->resident_page_count;
0b4e3aa0 7464 top->ref_count += ref_count - 1;
1c79356b
A
7465 }
7466 } else {
7467 if (entry->needs_copy) {
7468 top->share_mode = SM_COW;
7469 top->shared_pages_resident = obj->resident_page_count;
7470 } else {
0b4e3aa0
A
7471 if (ref_count == 1 ||
7472 (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) {
1c79356b
A
7473 top->share_mode = SM_PRIVATE;
7474 top->private_pages_resident = obj->resident_page_count;
7475 } else {
7476 top->share_mode = SM_SHARED;
7477 top->shared_pages_resident = obj->resident_page_count;
7478 }
7479 }
0b4e3aa0 7480 top->ref_count = ref_count;
1c79356b
A
7481 }
7482 top->obj_id = (int)obj;
7483
7484 vm_object_unlock(obj);
7485 }
7486}
7487
7488void
7489vm_region_walk(
7490 vm_map_entry_t entry,
7491 vm_region_extended_info_t extended,
7492 vm_object_offset_t offset,
7493 vm_offset_t range,
7494 vm_map_t map,
7495 vm_offset_t va)
7496{
7497 register struct vm_object *obj, *tmp_obj;
7498 register vm_offset_t last_offset;
7499 register int i;
0b4e3aa0 7500 register int ref_count;
1c79356b
A
7501 void vm_region_look_for_page();
7502
0b4e3aa0
A
7503 if ((entry->object.vm_object == 0) ||
7504 (entry->object.vm_object->phys_contiguous)) {
1c79356b
A
7505 extended->share_mode = SM_EMPTY;
7506 extended->ref_count = 0;
7507 return;
7508 }
7509 if (entry->is_sub_map)
7510 vm_region_walk((vm_map_entry_t)entry->object.sub_map, extended, offset + entry->offset,
7511 range, map, va);
7512 else {
7513 obj = entry->object.vm_object;
7514
7515 vm_object_lock(obj);
7516
0b4e3aa0
A
7517 if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
7518 ref_count--;
7519
1c79356b 7520 for (last_offset = offset + range; offset < last_offset; offset += PAGE_SIZE_64, va += PAGE_SIZE)
0b4e3aa0 7521 vm_region_look_for_page(obj, extended, offset, ref_count, 0, map, va);
1c79356b
A
7522
7523 if (extended->shadow_depth || entry->needs_copy)
7524 extended->share_mode = SM_COW;
7525 else {
0b4e3aa0 7526 if (ref_count == 1)
1c79356b
A
7527 extended->share_mode = SM_PRIVATE;
7528 else {
7529 if (obj->true_share)
7530 extended->share_mode = SM_TRUESHARED;
7531 else
7532 extended->share_mode = SM_SHARED;
7533 }
7534 }
0b4e3aa0 7535 extended->ref_count = ref_count - extended->shadow_depth;
1c79356b
A
7536
7537 for (i = 0; i < extended->shadow_depth; i++) {
7538 if ((tmp_obj = obj->shadow) == 0)
7539 break;
7540 vm_object_lock(tmp_obj);
7541 vm_object_unlock(obj);
0b4e3aa0
A
7542
7543 if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress)
7544 ref_count--;
7545
7546 extended->ref_count += ref_count;
1c79356b
A
7547 obj = tmp_obj;
7548 }
7549 vm_object_unlock(obj);
7550
7551 if (extended->share_mode == SM_SHARED) {
7552 register vm_map_entry_t cur;
7553 register vm_map_entry_t last;
7554 int my_refs;
7555
7556 obj = entry->object.vm_object;
7557 last = vm_map_to_entry(map);
7558 my_refs = 0;
7559
0b4e3aa0
A
7560 if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
7561 ref_count--;
1c79356b
A
7562 for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next)
7563 my_refs += vm_region_count_obj_refs(cur, obj);
7564
0b4e3aa0 7565 if (my_refs == ref_count)
1c79356b
A
7566 extended->share_mode = SM_PRIVATE_ALIASED;
7567 else if (my_refs > 1)
7568 extended->share_mode = SM_SHARED_ALIASED;
7569 }
7570 }
7571}
7572
7573
0b4e3aa0
A
7574/* object is locked on entry and locked on return */
7575
1c79356b
A
7576
7577void
7578vm_region_look_for_page(
7579 vm_object_t object,
7580 vm_region_extended_info_t extended,
7581 vm_object_offset_t offset,
7582 int max_refcnt,
7583 int depth,
7584 vm_map_t map,
7585 vm_offset_t va)
7586{
7587 register vm_page_t p;
7588 register vm_object_t shadow;
0b4e3aa0
A
7589 register int ref_count;
7590 vm_object_t caller_object;
1c79356b
A
7591
7592 shadow = object->shadow;
0b4e3aa0 7593 caller_object = object;
1c79356b 7594
0b4e3aa0
A
7595
7596 while (TRUE) {
1c79356b 7597
0b4e3aa0
A
7598 if ( !(object->pager_trusted) && !(object->internal))
7599 extended->external_pager = 1;
1c79356b 7600
0b4e3aa0
A
7601 if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
7602 if (shadow && (max_refcnt == 1))
7603 extended->pages_shared_now_private++;
1c79356b 7604
0b4e3aa0
A
7605 if (p->dirty || pmap_is_modified(p->phys_addr))
7606 extended->pages_dirtied++;
7607 extended->pages_resident++;
1c79356b 7608
0b4e3aa0
A
7609 if(object != caller_object)
7610 vm_object_unlock(object);
1c79356b 7611
0b4e3aa0
A
7612 return;
7613 }
7614 if (object->existence_map) {
7615 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) {
1c79356b 7616
0b4e3aa0 7617 extended->pages_swapped_out++;
1c79356b 7618
0b4e3aa0
A
7619 if(object != caller_object)
7620 vm_object_unlock(object);
7621
7622 return;
7623 }
7624 }
7625 if (shadow) {
7626 vm_object_lock(shadow);
7627
7628 if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
7629 ref_count--;
7630
7631 if (++depth > extended->shadow_depth)
7632 extended->shadow_depth = depth;
7633
7634 if (ref_count > max_refcnt)
7635 max_refcnt = ref_count;
7636
7637 if(object != caller_object)
7638 vm_object_unlock(object);
7639
7640 object = shadow;
7641 shadow = object->shadow;
7642 offset = offset + object->shadow_offset;
7643 continue;
7644 }
7645 if(object != caller_object)
7646 vm_object_unlock(object);
7647 break;
1c79356b
A
7648 }
7649}
7650
7651
7652vm_region_count_obj_refs(
7653 vm_map_entry_t entry,
7654 vm_object_t object)
7655{
7656 register int ref_count;
7657 register vm_object_t chk_obj;
7658 register vm_object_t tmp_obj;
7659
7660 if (entry->object.vm_object == 0)
7661 return(0);
7662
7663 if (entry->is_sub_map)
7664 ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object);
7665 else {
7666 ref_count = 0;
7667
7668 chk_obj = entry->object.vm_object;
7669 vm_object_lock(chk_obj);
7670
7671 while (chk_obj) {
7672 if (chk_obj == object)
7673 ref_count++;
7674 if (tmp_obj = chk_obj->shadow)
7675 vm_object_lock(tmp_obj);
7676 vm_object_unlock(chk_obj);
7677
7678 chk_obj = tmp_obj;
7679 }
7680 }
7681 return(ref_count);
7682}
7683
7684
7685/*
7686 * Routine: vm_map_simplify
7687 *
7688 * Description:
7689 * Attempt to simplify the map representation in
7690 * the vicinity of the given starting address.
7691 * Note:
7692 * This routine is intended primarily to keep the
7693 * kernel maps more compact -- they generally don't
7694 * benefit from the "expand a map entry" technology
7695 * at allocation time because the adjacent entry
7696 * is often wired down.
7697 */
7698void
7699vm_map_simplify(
7700 vm_map_t map,
7701 vm_offset_t start)
7702{
7703 vm_map_entry_t this_entry;
7704 vm_map_entry_t prev_entry;
7705 vm_map_entry_t next_entry;
7706
7707 vm_map_lock(map);
7708 if (
7709 (vm_map_lookup_entry(map, start, &this_entry)) &&
7710 ((prev_entry = this_entry->vme_prev) != vm_map_to_entry(map)) &&
7711
7712 (prev_entry->vme_end == this_entry->vme_start) &&
7713
7714 (prev_entry->is_shared == FALSE) &&
7715 (prev_entry->is_sub_map == FALSE) &&
7716
7717 (this_entry->is_shared == FALSE) &&
7718 (this_entry->is_sub_map == FALSE) &&
7719
7720 (prev_entry->inheritance == this_entry->inheritance) &&
7721 (prev_entry->protection == this_entry->protection) &&
7722 (prev_entry->max_protection == this_entry->max_protection) &&
7723 (prev_entry->behavior == this_entry->behavior) &&
7724 (prev_entry->wired_count == this_entry->wired_count) &&
7725 (prev_entry->user_wired_count == this_entry->user_wired_count)&&
7726 (prev_entry->in_transition == FALSE) &&
7727 (this_entry->in_transition == FALSE) &&
7728
7729 (prev_entry->needs_copy == this_entry->needs_copy) &&
7730
7731 (prev_entry->object.vm_object == this_entry->object.vm_object)&&
7732 ((prev_entry->offset +
7733 (prev_entry->vme_end - prev_entry->vme_start))
7734 == this_entry->offset)
7735 ) {
7736 SAVE_HINT(map, prev_entry);
7737 vm_map_entry_unlink(map, this_entry);
7738 prev_entry->vme_end = this_entry->vme_end;
7739 UPDATE_FIRST_FREE(map, map->first_free);
7740 vm_object_deallocate(this_entry->object.vm_object);
7741 vm_map_entry_dispose(map, this_entry);
7742 counter(c_vm_map_simplified_lower++);
7743 }
7744 if (
7745 (vm_map_lookup_entry(map, start, &this_entry)) &&
7746 ((next_entry = this_entry->vme_next) != vm_map_to_entry(map)) &&
7747
7748 (next_entry->vme_start == this_entry->vme_end) &&
7749
7750 (next_entry->is_shared == FALSE) &&
7751 (next_entry->is_sub_map == FALSE) &&
7752
7753 (next_entry->is_shared == FALSE) &&
7754 (next_entry->is_sub_map == FALSE) &&
7755
7756 (next_entry->inheritance == this_entry->inheritance) &&
7757 (next_entry->protection == this_entry->protection) &&
7758 (next_entry->max_protection == this_entry->max_protection) &&
7759 (next_entry->behavior == this_entry->behavior) &&
7760 (next_entry->wired_count == this_entry->wired_count) &&
7761 (next_entry->user_wired_count == this_entry->user_wired_count)&&
7762 (this_entry->in_transition == FALSE) &&
7763 (next_entry->in_transition == FALSE) &&
7764
7765 (next_entry->needs_copy == this_entry->needs_copy) &&
7766
7767 (next_entry->object.vm_object == this_entry->object.vm_object)&&
7768 ((this_entry->offset +
7769 (this_entry->vme_end - this_entry->vme_start))
7770 == next_entry->offset)
7771 ) {
7772 vm_map_entry_unlink(map, next_entry);
7773 this_entry->vme_end = next_entry->vme_end;
7774 UPDATE_FIRST_FREE(map, map->first_free);
7775 vm_object_deallocate(next_entry->object.vm_object);
7776 vm_map_entry_dispose(map, next_entry);
7777 counter(c_vm_map_simplified_upper++);
7778 }
7779 counter(c_vm_map_simplify_called++);
7780 vm_map_unlock(map);
7781}
7782
7783
7784/*
7785 * Routine: vm_map_machine_attribute
7786 * Purpose:
7787 * Provide machine-specific attributes to mappings,
7788 * such as cachability etc. for machines that provide
7789 * them. NUMA architectures and machines with big/strange
7790 * caches will use this.
7791 * Note:
7792 * Responsibilities for locking and checking are handled here,
7793 * everything else in the pmap module. If any non-volatile
7794 * information must be kept, the pmap module should handle
7795 * it itself. [This assumes that attributes do not
7796 * need to be inherited, which seems ok to me]
7797 */
7798kern_return_t
7799vm_map_machine_attribute(
7800 vm_map_t map,
7801 vm_offset_t address,
7802 vm_size_t size,
7803 vm_machine_attribute_t attribute,
7804 vm_machine_attribute_val_t* value) /* IN/OUT */
7805{
7806 kern_return_t ret;
7807
7808 if (address < vm_map_min(map) ||
7809 (address + size) > vm_map_max(map))
7810 return KERN_INVALID_ADDRESS;
7811
7812 vm_map_lock(map);
7813
7814 ret = pmap_attribute(map->pmap, address, size, attribute, value);
7815
7816 vm_map_unlock(map);
7817
7818 return ret;
7819}
7820
7821/*
7822 * vm_map_behavior_set:
7823 *
7824 * Sets the paging reference behavior of the specified address
7825 * range in the target map. Paging reference behavior affects
7826 * how pagein operations resulting from faults on the map will be
7827 * clustered.
7828 */
7829kern_return_t
7830vm_map_behavior_set(
7831 vm_map_t map,
7832 vm_offset_t start,
7833 vm_offset_t end,
7834 vm_behavior_t new_behavior)
7835{
7836 register vm_map_entry_t entry;
7837 vm_map_entry_t temp_entry;
7838
7839 XPR(XPR_VM_MAP,
7840 "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d",
7841 (integer_t)map, start, end, new_behavior, 0);
7842
7843 switch (new_behavior) {
7844 case VM_BEHAVIOR_DEFAULT:
7845 case VM_BEHAVIOR_RANDOM:
7846 case VM_BEHAVIOR_SEQUENTIAL:
7847 case VM_BEHAVIOR_RSEQNTL:
7848 break;
7849 default:
7850 return(KERN_INVALID_ARGUMENT);
7851 }
7852
7853 vm_map_lock(map);
7854
7855 /*
7856 * The entire address range must be valid for the map.
7857 * Note that vm_map_range_check() does a
7858 * vm_map_lookup_entry() internally and returns the
7859 * entry containing the start of the address range if
7860 * the entire range is valid.
7861 */
7862 if (vm_map_range_check(map, start, end, &temp_entry)) {
7863 entry = temp_entry;
7864 vm_map_clip_start(map, entry, start);
7865 }
7866 else {
7867 vm_map_unlock(map);
7868 return(KERN_INVALID_ADDRESS);
7869 }
7870
7871 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
7872 vm_map_clip_end(map, entry, end);
7873
7874 entry->behavior = new_behavior;
7875
7876 entry = entry->vme_next;
7877 }
7878
7879 vm_map_unlock(map);
7880 return(KERN_SUCCESS);
7881}
7882
7883
1c79356b
A
7884#include <mach_kdb.h>
7885#if MACH_KDB
7886#include <ddb/db_output.h>
7887#include <vm/vm_print.h>
7888
7889#define printf db_printf
7890
7891/*
7892 * Forward declarations for internal functions.
7893 */
7894extern void vm_map_links_print(
7895 struct vm_map_links *links);
7896
7897extern void vm_map_header_print(
7898 struct vm_map_header *header);
7899
7900extern void vm_map_entry_print(
7901 vm_map_entry_t entry);
7902
7903extern void vm_follow_entry(
7904 vm_map_entry_t entry);
7905
7906extern void vm_follow_map(
7907 vm_map_t map);
7908
7909/*
7910 * vm_map_links_print: [ debug ]
7911 */
7912void
7913vm_map_links_print(
7914 struct vm_map_links *links)
7915{
7916 iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
7917 links->prev,
7918 links->next,
7919 links->start,
7920 links->end);
7921}
7922
7923/*
7924 * vm_map_header_print: [ debug ]
7925 */
7926void
7927vm_map_header_print(
7928 struct vm_map_header *header)
7929{
7930 vm_map_links_print(&header->links);
7931 iprintf("nentries=0x%x, %sentries_pageable\n",
7932 header->nentries,
7933 (header->entries_pageable ? "" : "!"));
7934}
7935
7936/*
7937 * vm_follow_entry: [ debug ]
7938 */
7939void
7940vm_follow_entry(
7941 vm_map_entry_t entry)
7942{
7943 extern int db_indent;
7944 int shadows;
7945
7946 iprintf("map entry 0x%x:\n", entry);
7947
7948 db_indent += 2;
7949
7950 shadows = vm_follow_object(entry->object.vm_object);
7951 iprintf("Total objects : %d\n",shadows);
7952
7953 db_indent -= 2;
7954}
7955
7956/*
7957 * vm_map_entry_print: [ debug ]
7958 */
7959void
7960vm_map_entry_print(
7961 register vm_map_entry_t entry)
7962{
7963 extern int db_indent;
7964 static char *inheritance_name[4] = { "share", "copy", "none", "?"};
7965 static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" };
7966
7967 iprintf("map entry 0x%x:\n", entry);
7968
7969 db_indent += 2;
7970
7971 vm_map_links_print(&entry->links);
7972
7973 iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
7974 entry->vme_start,
7975 entry->vme_end,
7976 entry->protection,
7977 entry->max_protection,
7978 inheritance_name[(entry->inheritance & 0x3)]);
7979
7980 iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
7981 behavior_name[(entry->behavior & 0x3)],
7982 entry->wired_count,
7983 entry->user_wired_count);
7984 iprintf("%sin_transition, %sneeds_wakeup\n",
7985 (entry->in_transition ? "" : "!"),
7986 (entry->needs_wakeup ? "" : "!"));
7987
7988 if (entry->is_sub_map) {
7989 iprintf("submap=0x%x, offset=0x%x\n",
7990 entry->object.sub_map,
7991 entry->offset);
7992 } else {
7993 iprintf("object=0x%x, offset=0x%x, ",
7994 entry->object.vm_object,
7995 entry->offset);
7996 printf("%sis_shared, %sneeds_copy\n",
7997 (entry->is_shared ? "" : "!"),
7998 (entry->needs_copy ? "" : "!"));
7999 }
8000
8001 db_indent -= 2;
8002}
8003
8004/*
8005 * vm_follow_map: [ debug ]
8006 */
8007void
8008vm_follow_map(
8009 vm_map_t map)
8010{
8011 register vm_map_entry_t entry;
8012 extern int db_indent;
8013
8014 iprintf("task map 0x%x:\n", map);
8015
8016 db_indent += 2;
8017
8018 for (entry = vm_map_first_entry(map);
8019 entry && entry != vm_map_to_entry(map);
8020 entry = entry->vme_next) {
8021 vm_follow_entry(entry);
8022 }
8023
8024 db_indent -= 2;
8025}
8026
8027/*
8028 * vm_map_print: [ debug ]
8029 */
8030void
8031vm_map_print(
8032 register vm_map_t map)
8033{
8034 register vm_map_entry_t entry;
8035 extern int db_indent;
8036 char *swstate;
8037
8038 iprintf("task map 0x%x:\n", map);
8039
8040 db_indent += 2;
8041
8042 vm_map_header_print(&map->hdr);
8043
8044 iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
8045 map->pmap,
8046 map->size,
8047 map->ref_count,
8048 map->hint,
8049 map->first_free);
8050
8051 iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
8052 (map->wait_for_space ? "" : "!"),
8053 (map->wiring_required ? "" : "!"),
8054 map->timestamp);
8055
8056#if TASK_SWAPPER
8057 switch (map->sw_state) {
8058 case MAP_SW_IN:
8059 swstate = "SW_IN";
8060 break;
8061 case MAP_SW_OUT:
8062 swstate = "SW_OUT";
8063 break;
8064 default:
8065 swstate = "????";
8066 break;
8067 }
8068 iprintf("res=%d, sw_state=%s\n", map->res_count, swstate);
8069#endif /* TASK_SWAPPER */
8070
8071 for (entry = vm_map_first_entry(map);
8072 entry && entry != vm_map_to_entry(map);
8073 entry = entry->vme_next) {
8074 vm_map_entry_print(entry);
8075 }
8076
8077 db_indent -= 2;
8078}
8079
8080/*
8081 * Routine: vm_map_copy_print
8082 * Purpose:
8083 * Pretty-print a copy object for ddb.
8084 */
8085
8086void
8087vm_map_copy_print(
8088 vm_map_copy_t copy)
8089{
8090 extern int db_indent;
8091 int i, npages;
8092 vm_map_entry_t entry;
8093
8094 printf("copy object 0x%x\n", copy);
8095
8096 db_indent += 2;
8097
8098 iprintf("type=%d", copy->type);
8099 switch (copy->type) {
8100 case VM_MAP_COPY_ENTRY_LIST:
8101 printf("[entry_list]");
8102 break;
8103
8104 case VM_MAP_COPY_OBJECT:
8105 printf("[object]");
8106 break;
8107
1c79356b
A
8108 case VM_MAP_COPY_KERNEL_BUFFER:
8109 printf("[kernel_buffer]");
8110 break;
8111
8112 default:
8113 printf("[bad type]");
8114 break;
8115 }
8116 printf(", offset=0x%x", copy->offset);
8117 printf(", size=0x%x\n", copy->size);
8118
8119 switch (copy->type) {
8120 case VM_MAP_COPY_ENTRY_LIST:
8121 vm_map_header_print(&copy->cpy_hdr);
8122 for (entry = vm_map_copy_first_entry(copy);
8123 entry && entry != vm_map_copy_to_entry(copy);
8124 entry = entry->vme_next) {
8125 vm_map_entry_print(entry);
8126 }
8127 break;
8128
8129 case VM_MAP_COPY_OBJECT:
8130 iprintf("object=0x%x\n", copy->cpy_object);
8131 break;
8132
8133 case VM_MAP_COPY_KERNEL_BUFFER:
8134 iprintf("kernel buffer=0x%x", copy->cpy_kdata);
8135 printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size);
8136 break;
8137
1c79356b
A
8138 }
8139
8140 db_indent -=2;
8141}
8142
8143/*
8144 * db_vm_map_total_size(map) [ debug ]
8145 *
8146 * return the total virtual size (in bytes) of the map
8147 */
8148vm_size_t
8149db_vm_map_total_size(
8150 vm_map_t map)
8151{
8152 vm_map_entry_t entry;
8153 vm_size_t total;
8154
8155 total = 0;
8156 for (entry = vm_map_first_entry(map);
8157 entry != vm_map_to_entry(map);
8158 entry = entry->vme_next) {
8159 total += entry->vme_end - entry->vme_start;
8160 }
8161
8162 return total;
8163}
8164
8165#endif /* MACH_KDB */
8166
8167/*
8168 * Routine: vm_map_entry_insert
8169 *
8170 * Descritpion: This routine inserts a new vm_entry in a locked map.
8171 */
8172vm_map_entry_t
8173vm_map_entry_insert(
8174 vm_map_t map,
8175 vm_map_entry_t insp_entry,
8176 vm_offset_t start,
8177 vm_offset_t end,
8178 vm_object_t object,
8179 vm_object_offset_t offset,
8180 boolean_t needs_copy,
8181 boolean_t is_shared,
8182 boolean_t in_transition,
8183 vm_prot_t cur_protection,
8184 vm_prot_t max_protection,
8185 vm_behavior_t behavior,
8186 vm_inherit_t inheritance,
8187 unsigned wired_count)
8188{
8189 vm_map_entry_t new_entry;
8190
8191 assert(insp_entry != (vm_map_entry_t)0);
8192
8193 new_entry = vm_map_entry_create(map);
8194
8195 new_entry->vme_start = start;
8196 new_entry->vme_end = end;
8197 assert(page_aligned(new_entry->vme_start));
8198 assert(page_aligned(new_entry->vme_end));
8199
8200 new_entry->object.vm_object = object;
8201 new_entry->offset = offset;
8202 new_entry->is_shared = is_shared;
8203 new_entry->is_sub_map = FALSE;
8204 new_entry->needs_copy = needs_copy;
8205 new_entry->in_transition = in_transition;
8206 new_entry->needs_wakeup = FALSE;
8207 new_entry->inheritance = inheritance;
8208 new_entry->protection = cur_protection;
8209 new_entry->max_protection = max_protection;
8210 new_entry->behavior = behavior;
8211 new_entry->wired_count = wired_count;
8212 new_entry->user_wired_count = 0;
8213 new_entry->use_pmap = FALSE;
8214
8215 /*
8216 * Insert the new entry into the list.
8217 */
8218
8219 vm_map_entry_link(map, insp_entry, new_entry);
8220 map->size += end - start;
8221
8222 /*
8223 * Update the free space hint and the lookup hint.
8224 */
8225
8226 SAVE_HINT(map, new_entry);
8227 return new_entry;
8228}
8229
8230/*
8231 * Routine: vm_remap_extract
8232 *
8233 * Descritpion: This routine returns a vm_entry list from a map.
8234 */
8235kern_return_t
8236vm_remap_extract(
8237 vm_map_t map,
8238 vm_offset_t addr,
8239 vm_size_t size,
8240 boolean_t copy,
8241 struct vm_map_header *map_header,
8242 vm_prot_t *cur_protection,
8243 vm_prot_t *max_protection,
8244 /* What, no behavior? */
8245 vm_inherit_t inheritance,
8246 boolean_t pageable)
8247{
8248 kern_return_t result;
8249 vm_size_t mapped_size;
8250 vm_size_t tmp_size;
8251 vm_map_entry_t src_entry; /* result of last map lookup */
8252 vm_map_entry_t new_entry;
8253 vm_object_offset_t offset;
8254 vm_offset_t map_address;
8255 vm_offset_t src_start; /* start of entry to map */
8256 vm_offset_t src_end; /* end of region to be mapped */
8257 vm_object_t object;
8258 vm_map_version_t version;
8259 boolean_t src_needs_copy;
8260 boolean_t new_entry_needs_copy;
8261
8262 assert(map != VM_MAP_NULL);
8263 assert(size != 0 && size == round_page(size));
8264 assert(inheritance == VM_INHERIT_NONE ||
8265 inheritance == VM_INHERIT_COPY ||
8266 inheritance == VM_INHERIT_SHARE);
8267
8268 /*
8269 * Compute start and end of region.
8270 */
8271 src_start = trunc_page(addr);
8272 src_end = round_page(src_start + size);
8273
8274 /*
8275 * Initialize map_header.
8276 */
8277 map_header->links.next = (struct vm_map_entry *)&map_header->links;
8278 map_header->links.prev = (struct vm_map_entry *)&map_header->links;
8279 map_header->nentries = 0;
8280 map_header->entries_pageable = pageable;
8281
8282 *cur_protection = VM_PROT_ALL;
8283 *max_protection = VM_PROT_ALL;
8284
8285 map_address = 0;
8286 mapped_size = 0;
8287 result = KERN_SUCCESS;
8288
8289 /*
8290 * The specified source virtual space might correspond to
8291 * multiple map entries, need to loop on them.
8292 */
8293 vm_map_lock(map);
8294 while (mapped_size != size) {
8295 vm_size_t entry_size;
8296
8297 /*
8298 * Find the beginning of the region.
8299 */
8300 if (! vm_map_lookup_entry(map, src_start, &src_entry)) {
8301 result = KERN_INVALID_ADDRESS;
8302 break;
8303 }
8304
8305 if (src_start < src_entry->vme_start ||
8306 (mapped_size && src_start != src_entry->vme_start)) {
8307 result = KERN_INVALID_ADDRESS;
8308 break;
8309 }
8310
8311 if(src_entry->is_sub_map) {
8312 result = KERN_INVALID_ADDRESS;
8313 break;
8314 }
8315
8316 tmp_size = size - mapped_size;
8317 if (src_end > src_entry->vme_end)
8318 tmp_size -= (src_end - src_entry->vme_end);
8319
8320 entry_size = (vm_size_t)(src_entry->vme_end -
8321 src_entry->vme_start);
8322
8323 if(src_entry->is_sub_map) {
8324 vm_map_reference(src_entry->object.sub_map);
8325 } else {
8326 object = src_entry->object.vm_object;
8327
8328 if (object == VM_OBJECT_NULL) {
8329 object = vm_object_allocate(entry_size);
8330 src_entry->offset = 0;
8331 src_entry->object.vm_object = object;
8332 } else if (object->copy_strategy !=
8333 MEMORY_OBJECT_COPY_SYMMETRIC) {
8334 /*
8335 * We are already using an asymmetric
8336 * copy, and therefore we already have
8337 * the right object.
8338 */
8339 assert(!src_entry->needs_copy);
8340 } else if (src_entry->needs_copy || object->shadowed ||
8341 (object->internal && !object->true_share &&
8342 !src_entry->is_shared &&
8343 object->size > entry_size)) {
8344
8345 vm_object_shadow(&src_entry->object.vm_object,
8346 &src_entry->offset,
8347 entry_size);
8348
8349 if (!src_entry->needs_copy &&
8350 (src_entry->protection & VM_PROT_WRITE)) {
8351 pmap_protect(vm_map_pmap(map),
8352 src_entry->vme_start,
8353 src_entry->vme_end,
8354 src_entry->protection &
8355 ~VM_PROT_WRITE);
8356 }
8357
8358 object = src_entry->object.vm_object;
8359 src_entry->needs_copy = FALSE;
8360 }
8361
8362
8363 vm_object_lock(object);
8364 object->ref_count++; /* object ref. for new entry */
8365 VM_OBJ_RES_INCR(object);
8366 if (object->copy_strategy ==
8367 MEMORY_OBJECT_COPY_SYMMETRIC) {
8368 object->copy_strategy =
8369 MEMORY_OBJECT_COPY_DELAY;
8370 }
8371 vm_object_unlock(object);
8372 }
8373
8374 offset = src_entry->offset + (src_start - src_entry->vme_start);
8375
8376 new_entry = _vm_map_entry_create(map_header);
8377 vm_map_entry_copy(new_entry, src_entry);
8378 new_entry->use_pmap = FALSE; /* clr address space specifics */
8379
8380 new_entry->vme_start = map_address;
8381 new_entry->vme_end = map_address + tmp_size;
8382 new_entry->inheritance = inheritance;
8383 new_entry->offset = offset;
8384
8385 /*
8386 * The new region has to be copied now if required.
8387 */
8388 RestartCopy:
8389 if (!copy) {
8390 src_entry->is_shared = TRUE;
8391 new_entry->is_shared = TRUE;
8392 if (!(new_entry->is_sub_map))
8393 new_entry->needs_copy = FALSE;
8394
8395 } else if (src_entry->is_sub_map) {
8396 /* make this a COW sub_map if not already */
8397 new_entry->needs_copy = TRUE;
8398 } else if (src_entry->wired_count == 0 &&
8399 vm_object_copy_quickly(&new_entry->object.vm_object,
8400 new_entry->offset,
8401 (new_entry->vme_end -
8402 new_entry->vme_start),
8403 &src_needs_copy,
8404 &new_entry_needs_copy)) {
8405
8406 new_entry->needs_copy = new_entry_needs_copy;
8407 new_entry->is_shared = FALSE;
8408
8409 /*
8410 * Handle copy_on_write semantics.
8411 */
8412 if (src_needs_copy && !src_entry->needs_copy) {
8413 vm_object_pmap_protect(object,
8414 offset,
8415 entry_size,
8416 (src_entry->is_shared ?
8417 PMAP_NULL : map->pmap),
8418 src_entry->vme_start,
8419 src_entry->protection &
8420 ~VM_PROT_WRITE);
8421
8422 src_entry->needs_copy = TRUE;
8423 }
8424 /*
8425 * Throw away the old object reference of the new entry.
8426 */
8427 vm_object_deallocate(object);
8428
8429 } else {
8430 new_entry->is_shared = FALSE;
8431
8432 /*
8433 * The map can be safely unlocked since we
8434 * already hold a reference on the object.
8435 *
8436 * Record the timestamp of the map for later
8437 * verification, and unlock the map.
8438 */
8439 version.main_timestamp = map->timestamp;
8440 vm_map_unlock(map);
8441
8442 /*
8443 * Perform the copy.
8444 */
8445 if (src_entry->wired_count > 0) {
8446 vm_object_lock(object);
8447 result = vm_object_copy_slowly(
8448 object,
8449 offset,
8450 entry_size,
8451 THREAD_UNINT,
8452 &new_entry->object.vm_object);
8453
8454 new_entry->offset = 0;
8455 new_entry->needs_copy = FALSE;
8456 } else {
8457 result = vm_object_copy_strategically(
8458 object,
8459 offset,
8460 entry_size,
8461 &new_entry->object.vm_object,
8462 &new_entry->offset,
8463 &new_entry_needs_copy);
8464
8465 new_entry->needs_copy = new_entry_needs_copy;
8466 }
8467
8468 /*
8469 * Throw away the old object reference of the new entry.
8470 */
8471 vm_object_deallocate(object);
8472
8473 if (result != KERN_SUCCESS &&
8474 result != KERN_MEMORY_RESTART_COPY) {
8475 _vm_map_entry_dispose(map_header, new_entry);
8476 break;
8477 }
8478
8479 /*
8480 * Verify that the map has not substantially
8481 * changed while the copy was being made.
8482 */
8483
8484 vm_map_lock(map); /* Increments timestamp once! */
8485 if (version.main_timestamp + 1 != map->timestamp) {
8486 /*
8487 * Simple version comparison failed.
8488 *
8489 * Retry the lookup and verify that the
8490 * same object/offset are still present.
8491 */
8492 vm_object_deallocate(new_entry->
8493 object.vm_object);
8494 _vm_map_entry_dispose(map_header, new_entry);
8495 if (result == KERN_MEMORY_RESTART_COPY)
8496 result = KERN_SUCCESS;
8497 continue;
8498 }
8499
8500 if (result == KERN_MEMORY_RESTART_COPY) {
8501 vm_object_reference(object);
8502 goto RestartCopy;
8503 }
8504 }
8505
8506 _vm_map_entry_link(map_header,
8507 map_header->links.prev, new_entry);
8508
8509 *cur_protection &= src_entry->protection;
8510 *max_protection &= src_entry->max_protection;
8511
8512 map_address += tmp_size;
8513 mapped_size += tmp_size;
8514 src_start += tmp_size;
8515
8516 } /* end while */
8517
8518 vm_map_unlock(map);
8519 if (result != KERN_SUCCESS) {
8520 /*
8521 * Free all allocated elements.
8522 */
8523 for (src_entry = map_header->links.next;
8524 src_entry != (struct vm_map_entry *)&map_header->links;
8525 src_entry = new_entry) {
8526 new_entry = src_entry->vme_next;
8527 _vm_map_entry_unlink(map_header, src_entry);
8528 vm_object_deallocate(src_entry->object.vm_object);
8529 _vm_map_entry_dispose(map_header, src_entry);
8530 }
8531 }
8532 return result;
8533}
8534
8535/*
8536 * Routine: vm_remap
8537 *
8538 * Map portion of a task's address space.
8539 * Mapped region must not overlap more than
8540 * one vm memory object. Protections and
8541 * inheritance attributes remain the same
8542 * as in the original task and are out parameters.
8543 * Source and Target task can be identical
8544 * Other attributes are identical as for vm_map()
8545 */
8546kern_return_t
8547vm_remap(
8548 vm_map_t target_map,
8549 vm_offset_t *address,
8550 vm_size_t size,
8551 vm_offset_t mask,
8552 boolean_t anywhere,
8553 vm_map_t src_map,
8554 vm_offset_t memory_address,
8555 boolean_t copy,
8556 vm_prot_t *cur_protection,
8557 vm_prot_t *max_protection,
8558 vm_inherit_t inheritance)
8559{
8560 kern_return_t result;
8561 vm_map_entry_t entry;
8562 vm_map_entry_t insp_entry;
8563 vm_map_entry_t new_entry;
8564 struct vm_map_header map_header;
8565
8566 if (target_map == VM_MAP_NULL)
8567 return KERN_INVALID_ARGUMENT;
8568
8569 switch (inheritance) {
8570 case VM_INHERIT_NONE:
8571 case VM_INHERIT_COPY:
8572 case VM_INHERIT_SHARE:
8573 if (size != 0 && src_map != VM_MAP_NULL)
8574 break;
8575 /*FALL THRU*/
8576 default:
8577 return KERN_INVALID_ARGUMENT;
8578 }
8579
8580 size = round_page(size);
8581
8582 result = vm_remap_extract(src_map, memory_address,
8583 size, copy, &map_header,
8584 cur_protection,
8585 max_protection,
8586 inheritance,
8587 target_map->hdr.
8588 entries_pageable);
1c79356b
A
8589
8590 if (result != KERN_SUCCESS) {
8591 return result;
8592 }
8593
8594 /*
8595 * Allocate/check a range of free virtual address
8596 * space for the target
8597 */
8598 *address = trunc_page(*address);
8599 vm_map_lock(target_map);
8600 result = vm_remap_range_allocate(target_map, address, size,
8601 mask, anywhere, &insp_entry);
8602
8603 for (entry = map_header.links.next;
8604 entry != (struct vm_map_entry *)&map_header.links;
8605 entry = new_entry) {
8606 new_entry = entry->vme_next;
8607 _vm_map_entry_unlink(&map_header, entry);
8608 if (result == KERN_SUCCESS) {
8609 entry->vme_start += *address;
8610 entry->vme_end += *address;
8611 vm_map_entry_link(target_map, insp_entry, entry);
8612 insp_entry = entry;
8613 } else {
8614 if (!entry->is_sub_map) {
8615 vm_object_deallocate(entry->object.vm_object);
8616 } else {
8617 vm_map_deallocate(entry->object.sub_map);
8618 }
8619 _vm_map_entry_dispose(&map_header, entry);
8620 }
8621 }
8622
8623 if (result == KERN_SUCCESS) {
8624 target_map->size += size;
8625 SAVE_HINT(target_map, insp_entry);
8626 }
8627 vm_map_unlock(target_map);
8628
8629 if (result == KERN_SUCCESS && target_map->wiring_required)
8630 result = vm_map_wire(target_map, *address,
8631 *address + size, *cur_protection, TRUE);
8632 return result;
8633}
8634
8635/*
8636 * Routine: vm_remap_range_allocate
8637 *
8638 * Description:
8639 * Allocate a range in the specified virtual address map.
8640 * returns the address and the map entry just before the allocated
8641 * range
8642 *
8643 * Map must be locked.
8644 */
8645
8646kern_return_t
8647vm_remap_range_allocate(
8648 vm_map_t map,
8649 vm_offset_t *address, /* IN/OUT */
8650 vm_size_t size,
8651 vm_offset_t mask,
8652 boolean_t anywhere,
8653 vm_map_entry_t *map_entry) /* OUT */
8654{
8655 register vm_map_entry_t entry;
8656 register vm_offset_t start;
8657 register vm_offset_t end;
8658 kern_return_t result = KERN_SUCCESS;
8659
8660 StartAgain: ;
8661
8662 start = *address;
8663
8664 if (anywhere)
8665 {
8666 /*
8667 * Calculate the first possible address.
8668 */
8669
8670 if (start < map->min_offset)
8671 start = map->min_offset;
8672 if (start > map->max_offset)
8673 return(KERN_NO_SPACE);
8674
8675 /*
8676 * Look for the first possible address;
8677 * if there's already something at this
8678 * address, we have to start after it.
8679 */
8680
8681 assert(first_free_is_valid(map));
8682 if (start == map->min_offset) {
8683 if ((entry = map->first_free) != vm_map_to_entry(map))
8684 start = entry->vme_end;
8685 } else {
8686 vm_map_entry_t tmp_entry;
8687 if (vm_map_lookup_entry(map, start, &tmp_entry))
8688 start = tmp_entry->vme_end;
8689 entry = tmp_entry;
8690 }
8691
8692 /*
8693 * In any case, the "entry" always precedes
8694 * the proposed new region throughout the
8695 * loop:
8696 */
8697
8698 while (TRUE) {
8699 register vm_map_entry_t next;
8700
8701 /*
8702 * Find the end of the proposed new region.
8703 * Be sure we didn't go beyond the end, or
8704 * wrap around the address.
8705 */
8706
8707 end = ((start + mask) & ~mask);
8708 if (end < start)
8709 return(KERN_NO_SPACE);
8710 start = end;
8711 end += size;
8712
8713 if ((end > map->max_offset) || (end < start)) {
8714 if (map->wait_for_space) {
8715 if (size <= (map->max_offset -
8716 map->min_offset)) {
8717 assert_wait((event_t) map, THREAD_INTERRUPTIBLE);
8718 vm_map_unlock(map);
8719 thread_block((void (*)(void))0);
8720 vm_map_lock(map);
8721 goto StartAgain;
8722 }
8723 }
8724
8725 return(KERN_NO_SPACE);
8726 }
8727
8728 /*
8729 * If there are no more entries, we must win.
8730 */
8731
8732 next = entry->vme_next;
8733 if (next == vm_map_to_entry(map))
8734 break;
8735
8736 /*
8737 * If there is another entry, it must be
8738 * after the end of the potential new region.
8739 */
8740
8741 if (next->vme_start >= end)
8742 break;
8743
8744 /*
8745 * Didn't fit -- move to the next entry.
8746 */
8747
8748 entry = next;
8749 start = entry->vme_end;
8750 }
8751 *address = start;
8752 } else {
8753 vm_map_entry_t temp_entry;
8754
8755 /*
8756 * Verify that:
8757 * the address doesn't itself violate
8758 * the mask requirement.
8759 */
8760
8761 if ((start & mask) != 0)
8762 return(KERN_NO_SPACE);
8763
8764
8765 /*
8766 * ... the address is within bounds
8767 */
8768
8769 end = start + size;
8770
8771 if ((start < map->min_offset) ||
8772 (end > map->max_offset) ||
8773 (start >= end)) {
8774 return(KERN_INVALID_ADDRESS);
8775 }
8776
8777 /*
8778 * ... the starting address isn't allocated
8779 */
8780
8781 if (vm_map_lookup_entry(map, start, &temp_entry))
8782 return(KERN_NO_SPACE);
8783
8784 entry = temp_entry;
8785
8786 /*
8787 * ... the next region doesn't overlap the
8788 * end point.
8789 */
8790
8791 if ((entry->vme_next != vm_map_to_entry(map)) &&
8792 (entry->vme_next->vme_start < end))
8793 return(KERN_NO_SPACE);
8794 }
8795 *map_entry = entry;
8796 return(KERN_SUCCESS);
8797}
8798
8799/*
8800 * vm_map_switch:
8801 *
8802 * Set the address map for the current thr_act to the specified map
8803 */
8804
8805vm_map_t
8806vm_map_switch(
8807 vm_map_t map)
8808{
8809 int mycpu;
8810 thread_act_t thr_act = current_act();
8811 vm_map_t oldmap = thr_act->map;
8812
8813 mp_disable_preemption();
8814 mycpu = cpu_number();
8815
8816 /*
8817 * Deactivate the current map and activate the requested map
8818 */
8819 PMAP_SWITCH_USER(thr_act, map, mycpu);
8820
8821 mp_enable_preemption();
8822 return(oldmap);
8823}
8824
8825
8826/*
8827 * Routine: vm_map_write_user
8828 *
8829 * Description:
8830 * Copy out data from a kernel space into space in the
8831 * destination map. The space must already exist in the
8832 * destination map.
8833 * NOTE: This routine should only be called by threads
8834 * which can block on a page fault. i.e. kernel mode user
8835 * threads.
8836 *
8837 */
8838kern_return_t
8839vm_map_write_user(
8840 vm_map_t map,
8841 vm_offset_t src_addr,
8842 vm_offset_t dst_addr,
8843 vm_size_t size)
8844{
8845 thread_act_t thr_act = current_act();
8846 kern_return_t kr = KERN_SUCCESS;
8847
8848 if(thr_act->map == map) {
8849 if (copyout((char *)src_addr, (char *)dst_addr, size)) {
8850 kr = KERN_INVALID_ADDRESS;
8851 }
8852 } else {
8853 vm_map_t oldmap;
8854
8855 /* take on the identity of the target map while doing */
8856 /* the transfer */
8857
8858 vm_map_reference(map);
8859 oldmap = vm_map_switch(map);
8860 if (copyout((char *)src_addr, (char *)dst_addr, size)) {
8861 kr = KERN_INVALID_ADDRESS;
8862 }
8863 vm_map_switch(oldmap);
8864 vm_map_deallocate(map);
8865 }
8866 return kr;
8867}
8868
8869/*
8870 * Routine: vm_map_read_user
8871 *
8872 * Description:
8873 * Copy in data from a user space source map into the
8874 * kernel map. The space must already exist in the
8875 * kernel map.
8876 * NOTE: This routine should only be called by threads
8877 * which can block on a page fault. i.e. kernel mode user
8878 * threads.
8879 *
8880 */
8881kern_return_t
8882vm_map_read_user(
8883 vm_map_t map,
8884 vm_offset_t src_addr,
8885 vm_offset_t dst_addr,
8886 vm_size_t size)
8887{
8888 thread_act_t thr_act = current_act();
8889 kern_return_t kr = KERN_SUCCESS;
8890
8891 if(thr_act->map == map) {
8892 if (copyin((char *)src_addr, (char *)dst_addr, size)) {
8893 kr = KERN_INVALID_ADDRESS;
8894 }
8895 } else {
8896 vm_map_t oldmap;
8897
8898 /* take on the identity of the target map while doing */
8899 /* the transfer */
8900
8901 vm_map_reference(map);
8902 oldmap = vm_map_switch(map);
8903 if (copyin((char *)src_addr, (char *)dst_addr, size)) {
8904 kr = KERN_INVALID_ADDRESS;
8905 }
8906 vm_map_switch(oldmap);
8907 vm_map_deallocate(map);
8908 }
8909 return kr;
8910}
8911
8912/* Takes existing source and destination sub-maps and clones the contents of */
8913/* the source map */
8914
8915kern_return_t
8916vm_region_clone(
8917 ipc_port_t src_region,
8918 ipc_port_t dst_region)
8919{
8920 vm_named_entry_t src_object;
8921 vm_named_entry_t dst_object;
8922 vm_map_t src_map;
8923 vm_map_t dst_map;
8924 vm_offset_t addr;
8925 vm_offset_t max_off;
8926 vm_map_entry_t entry;
8927 vm_map_entry_t new_entry;
8928 vm_map_entry_t insert_point;
8929
8930 src_object = (vm_named_entry_t)src_region->ip_kobject;
8931 dst_object = (vm_named_entry_t)dst_region->ip_kobject;
8932 if((!src_object->is_sub_map) || (!dst_object->is_sub_map)) {
8933 return KERN_INVALID_ARGUMENT;
8934 }
8935 src_map = (vm_map_t)src_object->backing.map;
8936 dst_map = (vm_map_t)dst_object->backing.map;
8937 /* destination map is assumed to be unavailable to any other */
8938 /* activity. i.e. it is new */
8939 vm_map_lock(src_map);
8940 if((src_map->min_offset != dst_map->min_offset)
8941 || (src_map->max_offset != dst_map->max_offset)) {
8942 vm_map_unlock(src_map);
8943 return KERN_INVALID_ARGUMENT;
8944 }
8945 addr = src_map->min_offset;
8946 vm_map_lookup_entry(dst_map, addr, &entry);
8947 if(entry == vm_map_to_entry(dst_map)) {
8948 entry = entry->vme_next;
8949 }
8950 if(entry == vm_map_to_entry(dst_map)) {
8951 max_off = src_map->max_offset;
8952 } else {
8953 max_off = entry->vme_start;
8954 }
8955 vm_map_lookup_entry(src_map, addr, &entry);
8956 if(entry == vm_map_to_entry(src_map)) {
8957 entry = entry->vme_next;
8958 }
8959 vm_map_lookup_entry(dst_map, addr, &insert_point);
8960 while((entry != vm_map_to_entry(src_map)) &&
8961 (entry->vme_end <= max_off)) {
8962 addr = entry->vme_start;
8963 new_entry = vm_map_entry_create(dst_map);
8964 vm_map_entry_copy(new_entry, entry);
8965 vm_map_entry_link(dst_map, insert_point, new_entry);
8966 insert_point = new_entry;
8967 if (entry->object.vm_object != VM_OBJECT_NULL) {
8968 if (new_entry->is_sub_map) {
8969 vm_map_reference(new_entry->object.sub_map);
8970 } else {
8971 vm_object_reference(
8972 new_entry->object.vm_object);
8973 }
8974 }
8975 dst_map->size += new_entry->vme_end - new_entry->vme_start;
8976 entry = entry->vme_next;
8977 }
8978 vm_map_unlock(src_map);
8979 return KERN_SUCCESS;
8980}
8981
8982/*
8983 * Export routines to other components for the things we access locally through
8984 * macros.
8985 */
8986#undef current_map
8987vm_map_t
8988current_map(void)
8989{
8990 return (current_map_fast());
8991}
8992
8993/*
8994 * vm_map_check_protection:
8995 *
8996 * Assert that the target map allows the specified
8997 * privilege on the entire address region given.
8998 * The entire region must be allocated.
8999 */
9000boolean_t vm_map_check_protection(map, start, end, protection)
9001 register vm_map_t map;
9002 register vm_offset_t start;
9003 register vm_offset_t end;
9004 register vm_prot_t protection;
9005{
9006 register vm_map_entry_t entry;
9007 vm_map_entry_t tmp_entry;
9008
9009 vm_map_lock(map);
9010
9011 if (start < vm_map_min(map) || end > vm_map_max(map) || start > end)
9012 {
9013 vm_map_unlock(map);
9014 return (FALSE);
9015 }
9016
9017 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
9018 vm_map_unlock(map);
9019 return(FALSE);
9020 }
9021
9022 entry = tmp_entry;
9023
9024 while (start < end) {
9025 if (entry == vm_map_to_entry(map)) {
9026 vm_map_unlock(map);
9027 return(FALSE);
9028 }
9029
9030 /*
9031 * No holes allowed!
9032 */
9033
9034 if (start < entry->vme_start) {
9035 vm_map_unlock(map);
9036 return(FALSE);
9037 }
9038
9039 /*
9040 * Check protection associated with entry.
9041 */
9042
9043 if ((entry->protection & protection) != protection) {
9044 vm_map_unlock(map);
9045 return(FALSE);
9046 }
9047
9048 /* go to next entry */
9049
9050 start = entry->vme_end;
9051 entry = entry->vme_next;
9052 }
9053 vm_map_unlock(map);
9054 return(TRUE);
9055}