]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <vm/vm_map_store_rb.h> | |
30 | ||
31 | RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare); | |
32 | ||
33 | #define VME_FOR_STORE( store) \ | |
34 | (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links))) | |
35 | ||
36 | void | |
37 | vm_map_store_init_rb( struct vm_map_header* hdr ) | |
38 | { | |
39 | RB_INIT(&(hdr->rb_head_store)); | |
40 | } | |
41 | ||
42 | int rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent) | |
43 | { | |
44 | vm_map_entry_t vme_c; | |
45 | vm_map_entry_t vme_p; | |
46 | ||
47 | vme_c = VME_FOR_STORE(node); | |
48 | vme_p = VME_FOR_STORE(parent); | |
49 | if (vme_c->vme_start < vme_p->vme_start) | |
50 | return -1; | |
51 | if (vme_c->vme_start >= vme_p->vme_end) | |
52 | return 1; | |
53 | return 0; | |
54 | } | |
55 | ||
56 | void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry) | |
57 | { | |
58 | struct vm_map_header hdr = map->hdr; | |
59 | struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); | |
60 | vm_map_entry_t cur = *vm_entry; | |
61 | ||
62 | rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store)); | |
63 | if(rb_entry == NULL) | |
64 | panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme); | |
65 | else | |
66 | panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry))); | |
67 | } | |
68 | ||
69 | ||
70 | boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) | |
71 | { | |
72 | struct vm_map_header hdr = map->hdr; | |
73 | struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); | |
74 | vm_map_entry_t cur = vm_map_to_entry(map); | |
75 | vm_map_entry_t prev = VM_MAP_ENTRY_NULL; | |
76 | ||
77 | while (rb_entry != (struct vm_map_store*)NULL) { | |
78 | cur = VME_FOR_STORE(rb_entry); | |
79 | if(cur == VM_MAP_ENTRY_NULL) | |
80 | panic("no entry"); | |
81 | if (address >= cur->vme_start) { | |
82 | if (address < cur->vme_end) { | |
83 | *vm_entry = cur; | |
84 | return TRUE; | |
85 | } | |
86 | rb_entry = RB_RIGHT(rb_entry, entry); | |
87 | prev = cur; | |
88 | } else { | |
89 | rb_entry = RB_LEFT(rb_entry, entry); | |
90 | } | |
91 | } | |
92 | if( prev == VM_MAP_ENTRY_NULL){ | |
93 | prev = vm_map_to_entry(map); | |
94 | } | |
95 | *vm_entry = prev; | |
96 | return FALSE; | |
97 | } | |
98 | ||
99 | void vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry) | |
100 | { | |
101 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
102 | struct vm_map_store *store = &(entry->store); | |
103 | struct vm_map_store *tmp_store; | |
104 | if((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) { | |
105 | panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end, | |
106 | (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); | |
107 | } | |
108 | } | |
109 | ||
110 | void vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry) | |
111 | { | |
112 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
113 | struct vm_map_store *rb_entry; | |
114 | struct vm_map_store *store = &(entry->store); | |
115 | ||
116 | rb_entry = RB_FIND( rb_head, rbh, store); | |
117 | if(rb_entry == NULL) | |
118 | panic("NO ENTRY TO DELETE"); | |
119 | RB_REMOVE( rb_head, rbh, store ); | |
120 | } | |
121 | ||
122 | void vm_map_store_copy_insert_rb( vm_map_t map, __unused vm_map_entry_t after_where, vm_map_copy_t copy) | |
123 | { | |
124 | struct vm_map_header *mapHdr = &(map->hdr); | |
125 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
126 | struct vm_map_store *store; | |
127 | vm_map_entry_t entry = vm_map_copy_first_entry(copy); | |
128 | int inserted=0, nentries = copy->cpy_hdr.nentries; | |
129 | ||
130 | while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { | |
131 | vm_map_entry_t prev = entry; | |
132 | store = &(entry->store); | |
133 | if( RB_INSERT( rb_head, rbh, store ) != NULL){ | |
134 | panic("VMSCIR1: INSERT FAILED: %d: %p, %p, %p, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx",inserted, prev, entry, vm_map_copy_to_entry(copy), | |
135 | (uintptr_t)prev->vme_start, (uintptr_t)prev->vme_end, (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end, | |
136 | (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_start, (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_end); | |
137 | } else { | |
39236c6e A |
138 | #if MAP_ENTRY_INSERTION_DEBUG |
139 | fastbacktrace(&entry->vme_insertion_bt[0], | |
140 | (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t))); | |
141 | #endif | |
6d2010ae A |
142 | entry = entry->vme_next; |
143 | inserted++; | |
144 | nentries--; | |
145 | } | |
146 | } | |
147 | } | |
148 | ||
149 | void | |
150 | vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries ) | |
151 | { | |
152 | struct vm_map_header *mapHdr = &(copy->cpy_hdr); | |
153 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
154 | struct vm_map_store *store; | |
155 | int deleted=0; | |
156 | ||
157 | while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { | |
158 | store = &(entry->store); | |
159 | RB_REMOVE( rb_head, rbh, store ); | |
160 | entry = entry->vme_next; | |
161 | deleted++; | |
162 | nentries--; | |
163 | } | |
164 | } | |
165 | ||
166 | void update_first_free_rb( __unused vm_map_t map, __unused vm_map_entry_t entry) | |
167 | { | |
168 | return ; | |
169 | } | |
170 |