]> git.saurik.com Git - apple/xnu.git/blame - libsa/malloc.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / libsa / malloc.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
1c79356b
A
28#include <string.h>
29
91447636
A
30#include <mach/mach_types.h>
31
32#include <kern/kern_types.h>
55e303ae
A
33#include <kern/queue.h>
34#include <kern/kalloc.h>
35#include <kern/lock.h>
36#include <kern/assert.h>
2d21ac55
A
37#include <kern/zalloc.h>
38#include <kern/debug.h>
91447636 39
55e303ae 40#include <vm/vm_kern.h>
de355530 41
55e303ae 42#include "libsa/malloc.h"
1c79356b 43
1c79356b
A
44/*********************************************************************
45* Structure for a client memory block. Contains linked-list pointers,
46* a size field giving the TOTAL size of the block, including this
47* header, and the address of the client's block. The client block
48* field is guaranteed to lie on a 16-byte boundary.
49*********************************************************************/
50typedef struct malloc_block {
1c79356b 51
55e303ae
A
52 struct malloc_block *malFwd;
53 struct malloc_block *malBwd;
91447636 54 void *malActl;
55e303ae 55 unsigned int malSize;
55e303ae 56} malloc_block;
de355530 57
2d21ac55 58static malloc_block malAnchor = {&malAnchor, &malAnchor, NULL, 0};
de355530 59
55e303ae
A
60static int malInited = 0;
61static mutex_t *malloc_lock;
de355530 62
1c79356b
A
63__private_extern__
64void * malloc(size_t size) {
de355530 65
55e303ae
A
66 unsigned int nsize;
67 unsigned int nmem, rmem;
68 malloc_block *amem;
69
70 assert(malInited);
71
72 nsize = size + sizeof(malloc_block) + 15; /* Make sure we get enough to fit */
73
74 nmem = (unsigned int)kalloc(nsize); /* Get some */
75 if(!nmem) { /* Got any? */
76 panic("malloc: no memory for a %08X sized request\n", nsize);
77 }
78
79 rmem = (nmem + 15) & -16; /* Round to 16 byte boundary */
80 amem = (malloc_block *)rmem; /* Point to the block */
2d21ac55 81 amem->malActl = (void *)nmem; /* Set the actual address */
55e303ae
A
82 amem->malSize = nsize; /* Size */
83
84 mutex_lock(malloc_lock);
85
86 amem->malFwd = malAnchor.malFwd; /* Move anchor to our forward */
87 amem->malBwd = &malAnchor; /* We point back to anchor */
88 malAnchor.malFwd->malBwd = amem; /* The old forward's back points to us */
89 malAnchor.malFwd = amem; /* Now we point the anchor to us */
90
91 mutex_unlock(malloc_lock); /* Unlock now */
92
93 return (void *)(rmem + 16); /* Return the block */
1c79356b
A
94
95} /* malloc() */
96
97
98/*********************************************************************
99* free()
100*
1c79356b
A
101*********************************************************************/
102__private_extern__
103void free(void * address) {
1c79356b 104
1c79356b 105
55e303ae
A
106 malloc_block *amem, *fore, *aft;
107
108 if(!(unsigned int)address) return; /* Leave if they try to free nothing */
109
110
111 amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to the header */
1c79356b 112
55e303ae 113 mutex_lock(malloc_lock);
1c79356b 114
55e303ae
A
115 fore = amem->malFwd; /* Get the guy in front */
116 aft = amem->malBwd; /* And the guy behind */
117 fore->malBwd = aft; /* The next guy's previous is now my previous */
118 aft->malFwd = fore; /* The previous guy's forward is now mine */
de355530 119
55e303ae
A
120 mutex_unlock(malloc_lock); /* Unlock now */
121
122 kfree(amem->malActl, amem->malSize); /* Toss it */
de355530 123
55e303ae 124 return;
de355530
A
125
126} /* free() */
d7e50217 127
55e303ae
A
128/*********************************************************************
129* malloc_reset()
130*
131* Allocate the mutual exclusion lock that protect malloc's data.
132*********************************************************************/
133__private_extern__ void
134malloc_init(void)
135{
91447636 136 malloc_lock = mutex_alloc(0);
55e303ae
A
137 malInited = 1;
138}
139
1c79356b
A
140
141/*********************************************************************
142* malloc_reset()
143*
144* Walks through the list of VM-allocated regions, destroying them
145* all. Any subsequent access by clients to allocated data will cause
146* a segmentation fault.
147*********************************************************************/
148__private_extern__
149void malloc_reset(void) {
55e303ae
A
150
151 malloc_block *amem, *bmem;
152
153 mutex_lock(malloc_lock);
154
91447636 155 amem = malAnchor.malFwd; /* Get the first one */
55e303ae 156
91447636 157 while(amem != &malAnchor) { /* Go until we hit the anchor */
55e303ae 158
91447636
A
159 bmem = amem->malFwd; /* Next one */
160 kfree(amem->malActl, amem->malSize); /* Toss it */
161 amem = bmem; /* Skip to it */
55e303ae
A
162
163 }
164
165 malAnchor.malFwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */
166 malAnchor.malBwd = (struct malloc_block *) 0x666; /* Cause a fault if we try again */
167
168 mutex_unlock(malloc_lock); /* Unlock now */
169
170 mutex_free(malloc_lock);
2d21ac55
A
171
172#ifdef MALLOC_RESET_GC
173 /* Force garbage collection of zones, since we've thrashed through a lot of memory */
174 zone_gc();
175#endif
176
1c79356b
A
177 return;
178
179} /* malloc_reset() */
180
181
182/*********************************************************************
183* realloc()
184*
185* This function simply allocates a new block and copies the existing
186* data into it. Nothing too clever here, as cleanup and efficient
187* memory usage are not important in this allocator package.
188*********************************************************************/
189__private_extern__
190void * realloc(void * address, size_t new_client_size) {
1c79356b 191 void * new_address;
55e303ae
A
192 malloc_block *amem;
193
194 amem = (malloc_block *)((unsigned int)address - sizeof(malloc_block)); /* Point to allocation block */
195
196 new_address = malloc(new_client_size); /* get a new one */
197 if(!new_address) { /* Did we get it? */
198 panic("realloc: can not reallocate one of %08X size\n", new_client_size);
199 }
200
201 memcpy(new_address, address, amem->malSize - sizeof(malloc_block)); /* Copy the old in */
202
203 free(address); /* Toss the old one */
204
205 return new_address;
1c79356b
A
206
207} /* realloc() */
208
2d21ac55
A
209#ifdef MALLOC_KLD_VM_ALLOCATE
210#undef vm_allocate
211#undef vm_deallocate
212
213/*
214 * Wrap vm_allocate calls made by kld in malloc/free so that the memory
215 * is all released when we jettison kld. Make other VM calls used by kld
216 * no-op, since we don't need them.
217 */
218__private_extern__
219kern_return_t vm_allocate(vm_map_t target_task, vm_address_t *address, vm_size_t size, int flags)
220{
221 assert(flags & VM_FLAGS_ANYWHERE);
222 assert(target_task == kernel_map);
223
224 *address = (vm_address_t)malloc(size);
225 bzero(*address, size);
226
227 return KERN_SUCCESS;
228}
1c79356b 229
2d21ac55
A
230__private_extern__
231kern_return_t vm_deallocate(vm_map_t target_task, vm_address_t address, vm_size_t size)
232{
233 free(address);
234 return KERN_SUCCESS;
235}
236
237__private_extern__
238kern_return_t vm_protect(vm_map_t target_task, vm_address_t address, vm_size_t size, boolean_t set_maximum, vm_prot_t new_protection)
239{
240 return KERN_SUCCESS;
241}
242
243__private_extern__
244kern_return_t vm_msync(vm_map_t target_task, vm_address_t address, vm_size_t size, vm_sync_t sync_flags)
245{
246 return KERN_SUCCESS;
247}
248#endif