]>
git.saurik.com Git - apple/xnu.git/blob - libsa/malloc.c
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
32 #include <kern/kern_types.h>
33 #include <kern/queue.h>
34 #include <kern/kalloc.h>
35 #include <kern/lock.h>
36 #include <kern/assert.h>
37 #include <kern/zalloc.h>
38 #include <kern/debug.h>
40 #include <vm/vm_kern.h>
42 #include "libsa/malloc.h"
44 /*********************************************************************
45 * Structure for a client memory block. Contains linked-list pointers,
46 * a size field giving the TOTAL size of the block, including this
47 * header, and the address of the client's block. The client block
48 * field is guaranteed to lie on a 16-byte boundary.
49 *********************************************************************/
50 typedef struct malloc_block
{
52 struct malloc_block
*malFwd
;
53 struct malloc_block
*malBwd
;
58 static malloc_block malAnchor
= {&malAnchor
, &malAnchor
, NULL
, 0};
60 static int malInited
= 0;
61 static mutex_t
*malloc_lock
;
64 void * malloc(size_t size
) {
67 unsigned int nmem
, rmem
;
72 nsize
= size
+ sizeof(malloc_block
) + 15; /* Make sure we get enough to fit */
74 nmem
= (unsigned int)kalloc(nsize
); /* Get some */
75 if(!nmem
) { /* Got any? */
76 panic("malloc: no memory for a %08X sized request\n", nsize
);
79 rmem
= (nmem
+ 15) & -16; /* Round to 16 byte boundary */
80 amem
= (malloc_block
*)rmem
; /* Point to the block */
81 amem
->malActl
= (void *)nmem
; /* Set the actual address */
82 amem
->malSize
= nsize
; /* Size */
84 mutex_lock(malloc_lock
);
86 amem
->malFwd
= malAnchor
.malFwd
; /* Move anchor to our forward */
87 amem
->malBwd
= &malAnchor
; /* We point back to anchor */
88 malAnchor
.malFwd
->malBwd
= amem
; /* The old forward's back points to us */
89 malAnchor
.malFwd
= amem
; /* Now we point the anchor to us */
91 mutex_unlock(malloc_lock
); /* Unlock now */
93 return (void *)(rmem
+ 16); /* Return the block */
98 /*********************************************************************
101 *********************************************************************/
103 void free(void * address
) {
106 malloc_block
*amem
, *fore
, *aft
;
108 if(!(unsigned int)address
) return; /* Leave if they try to free nothing */
111 amem
= (malloc_block
*)((unsigned int)address
- sizeof(malloc_block
)); /* Point to the header */
113 mutex_lock(malloc_lock
);
115 fore
= amem
->malFwd
; /* Get the guy in front */
116 aft
= amem
->malBwd
; /* And the guy behind */
117 fore
->malBwd
= aft
; /* The next guy's previous is now my previous */
118 aft
->malFwd
= fore
; /* The previous guy's forward is now mine */
120 mutex_unlock(malloc_lock
); /* Unlock now */
122 kfree(amem
->malActl
, amem
->malSize
); /* Toss it */
128 /*********************************************************************
131 * Allocate the mutual exclusion lock that protect malloc's data.
132 *********************************************************************/
133 __private_extern__
void
136 malloc_lock
= mutex_alloc(0);
141 /*********************************************************************
144 * Walks through the list of VM-allocated regions, destroying them
145 * all. Any subsequent access by clients to allocated data will cause
146 * a segmentation fault.
147 *********************************************************************/
149 void malloc_reset(void) {
151 malloc_block
*amem
, *bmem
;
153 mutex_lock(malloc_lock
);
155 amem
= malAnchor
.malFwd
; /* Get the first one */
157 while(amem
!= &malAnchor
) { /* Go until we hit the anchor */
159 bmem
= amem
->malFwd
; /* Next one */
160 kfree(amem
->malActl
, amem
->malSize
); /* Toss it */
161 amem
= bmem
; /* Skip to it */
165 malAnchor
.malFwd
= (struct malloc_block
*) 0x666; /* Cause a fault if we try again */
166 malAnchor
.malBwd
= (struct malloc_block
*) 0x666; /* Cause a fault if we try again */
168 mutex_unlock(malloc_lock
); /* Unlock now */
170 mutex_free(malloc_lock
);
172 #ifdef MALLOC_RESET_GC
173 /* Force garbage collection of zones, since we've thrashed through a lot of memory */
179 } /* malloc_reset() */
182 /*********************************************************************
185 * This function simply allocates a new block and copies the existing
186 * data into it. Nothing too clever here, as cleanup and efficient
187 * memory usage are not important in this allocator package.
188 *********************************************************************/
190 void * realloc(void * address
, size_t new_client_size
) {
194 amem
= (malloc_block
*)((unsigned int)address
- sizeof(malloc_block
)); /* Point to allocation block */
196 new_address
= malloc(new_client_size
); /* get a new one */
197 if(!new_address
) { /* Did we get it? */
198 panic("realloc: can not reallocate one of %08X size\n", new_client_size
);
201 memcpy(new_address
, address
, amem
->malSize
- sizeof(malloc_block
)); /* Copy the old in */
203 free(address
); /* Toss the old one */
209 #ifdef MALLOC_KLD_VM_ALLOCATE
214 * Wrap vm_allocate calls made by kld in malloc/free so that the memory
215 * is all released when we jettison kld. Make other VM calls used by kld
216 * no-op, since we don't need them.
219 kern_return_t
vm_allocate(vm_map_t target_task
, vm_address_t
*address
, vm_size_t size
, int flags
)
221 assert(flags
& VM_FLAGS_ANYWHERE
);
222 assert(target_task
== kernel_map
);
224 *address
= (vm_address_t
)malloc(size
);
225 bzero(*address
, size
);
231 kern_return_t
vm_deallocate(vm_map_t target_task
, vm_address_t address
, vm_size_t size
)
238 kern_return_t
vm_protect(vm_map_t target_task
, vm_address_t address
, vm_size_t size
, boolean_t set_maximum
, vm_prot_t new_protection
)
244 kern_return_t
vm_msync(vm_map_t target_task
, vm_address_t address
, vm_size_t size
, vm_sync_t sync_flags
)