]> git.saurik.com Git - apple/xnu.git/blob - libsa/malloc.c
xnu-1228.5.20.tar.gz
[apple/xnu.git] / libsa / malloc.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29
30 #include <mach/mach_types.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/queue.h>
34 #include <kern/kalloc.h>
35 #include <kern/lock.h>
36 #include <kern/assert.h>
37 #include <kern/zalloc.h>
38 #include <kern/debug.h>
39
40 #include <vm/vm_kern.h>
41
42 #include "libsa/malloc.h"
43
44 /*********************************************************************
45 * Structure for a client memory block. Contains linked-list pointers,
46 * a size field giving the TOTAL size of the block, including this
47 * header, and the address of the client's block. The client block
48 * field is guaranteed to lie on a 16-byte boundary.
49 *********************************************************************/
50 typedef struct malloc_block {
51
52 struct malloc_block *malFwd;
53 struct malloc_block *malBwd;
54 void *malActl;
55 unsigned int malSize;
56 } malloc_block;
57
58 static malloc_block malAnchor = {&malAnchor, &malAnchor, NULL, 0};
59
60 static int malInited = 0;
61 static mutex_t *malloc_lock;
62
63 __private_extern__
64 void * malloc(size_t size) {
65
66 unsigned int nsize;
67 unsigned int nmem, rmem;
68 malloc_block *amem;
69
70 assert(malInited);
71
72 nsize = size + sizeof(malloc_block) + 15; /* Make sure we get enough to fit */
73
74 nmem = (unsigned int)kalloc(nsize); /* Get some */
75 if(!nmem) { /* Got any? */
76 panic("malloc: no memory for a %08X sized request\n", nsize);
77 }
78
79 rmem = (nmem + 15) & -16; /* Round to 16 byte boundary */
80 amem = (malloc_block *)rmem; /* Point to the block */
81 amem->malActl = (void *)nmem; /* Set the actual address */
82 amem->malSize = nsize; /* Size */
83
84 mutex_lock(malloc_lock);
85
86 amem->malFwd = malAnchor.malFwd; /* Move anchor to our forward */
87 amem->malBwd = &malAnchor; /* We point back to anchor */
88 malAnchor.malFwd->malBwd = amem; /* The old forward's back points to us */
89 malAnchor.malFwd = amem; /* Now we point the anchor to us */
90
91 mutex_unlock(malloc_lock); /* Unlock now */
92
93 return (void *)(rmem + 16); /* Return the block */
94
95 } /* malloc() */
96
97
98 /*********************************************************************
99 * free()
100 *
101 *********************************************************************/
102 __private_extern__
103 void free(void * address) {
104
105
106 malloc_block *amem, *fore, *aft;
107
108 if(!(unsigned int)address) return; /* Leave if they try to free nothing */
109
110
111 amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to the header */
112
113 mutex_lock(malloc_lock);
114
115 fore = amem->malFwd; /* Get the guy in front */
116 aft = amem->malBwd; /* And the guy behind */
117 fore->malBwd = aft; /* The next guy's previous is now my previous */
118 aft->malFwd = fore; /* The previous guy's forward is now mine */
119
120 mutex_unlock(malloc_lock); /* Unlock now */
121
122 kfree(amem->malActl, amem->malSize); /* Toss it */
123
124 return;
125
126 } /* free() */
127
128 /*********************************************************************
129 * malloc_reset()
130 *
131 * Allocate the mutual exclusion lock that protect malloc's data.
132 *********************************************************************/
133 __private_extern__ void
134 malloc_init(void)
135 {
136 malloc_lock = mutex_alloc(0);
137 malInited = 1;
138 }
139
140
141 /*********************************************************************
142 * malloc_reset()
143 *
144 * Walks through the list of VM-allocated regions, destroying them
145 * all. Any subsequent access by clients to allocated data will cause
146 * a segmentation fault.
147 *********************************************************************/
148 __private_extern__
149 void malloc_reset(void) {
150
151 malloc_block *amem, *bmem;
152
153 mutex_lock(malloc_lock);
154
155 amem = malAnchor.malFwd; /* Get the first one */
156
157 while(amem != &malAnchor) { /* Go until we hit the anchor */
158
159 bmem = amem->malFwd; /* Next one */
160 kfree(amem->malActl, amem->malSize); /* Toss it */
161 amem = bmem; /* Skip to it */
162
163 }
164
165 malAnchor.malFwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */
166 malAnchor.malBwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */
167
168 mutex_unlock(malloc_lock); /* Unlock now */
169
170 mutex_free(malloc_lock);
171
172 #ifdef MALLOC_RESET_GC
173 /* Force garbage collection of zones, since we've thrashed through a lot of memory */
174 zone_gc();
175 #endif
176
177 return;
178
179 } /* malloc_reset() */
180
181
182 /*********************************************************************
183 * realloc()
184 *
185 * This function simply allocates a new block and copies the existing
186 * data into it. Nothing too clever here, as cleanup and efficient
187 * memory usage are not important in this allocator package.
188 *********************************************************************/
189 __private_extern__
190 void * realloc(void * address, size_t new_client_size) {
191 void * new_address;
192 malloc_block *amem;
193
194 amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to allocation block */
195
196 new_address = malloc(new_client_size); /* get a new one */
197 if(!new_address) { /* Did we get it? */
198 panic("realloc: can not reallocate one of %08X size\n", new_client_size);
199 }
200
201 memcpy(new_address, address, amem->malSize - sizeof(malloc_block)); /* Copy the old in */
202
203 free(address); /* Toss the old one */
204
205 return new_address;
206
207 } /* realloc() */
208
209 #ifdef MALLOC_KLD_VM_ALLOCATE
210 #undef vm_allocate
211 #undef vm_deallocate
212
213 /*
214 * Wrap vm_allocate calls made by kld in malloc/free so that the memory
215 * is all released when we jettison kld. Make other VM calls used by kld
216 * no-op, since we don't need them.
217 */
218 __private_extern__
219 kern_return_t vm_allocate(vm_map_t target_task, vm_address_t *address, vm_size_t size, int flags)
220 {
221 assert(flags & VM_FLAGS_ANYWHERE);
222 assert(target_task == kernel_map);
223
224 *address = (vm_address_t)malloc(size);
225 bzero(*address, size);
226
227 return KERN_SUCCESS;
228 }
229
230 __private_extern__
231 kern_return_t vm_deallocate(vm_map_t target_task, vm_address_t address, vm_size_t size)
232 {
233 free(address);
234 return KERN_SUCCESS;
235 }
236
237 __private_extern__
238 kern_return_t vm_protect(vm_map_t target_task, vm_address_t address, vm_size_t size, boolean_t set_maximum, vm_prot_t new_protection)
239 {
240 return KERN_SUCCESS;
241 }
242
243 __private_extern__
244 kern_return_t vm_msync(vm_map_t target_task, vm_address_t address, vm_size_t size, vm_sync_t sync_flags)
245 {
246 return KERN_SUCCESS;
247 }
248 #endif