]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_malloc.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / bsd / kern / kern_malloc.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1987, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <kern/zalloc.h>
71 #include <kern/kalloc.h>
72
73 #include <sys/malloc.h>
74 #include <sys/sysctl.h>
75
76 #include <libkern/libkern.h>
77
78 ZONE_VIEW_DEFINE(ZV_NAMEI, "vfs.namei", KHEAP_ID_DATA_BUFFERS, MAXPATHLEN);
79
80 static void *
81 __MALLOC_ext(
82 size_t size,
83 int type,
84 int flags,
85 vm_allocation_site_t *site,
86 kalloc_heap_t heap)
87 {
88 void *addr = NULL;
89
90 if (type >= M_LAST) {
91 panic("_malloc TYPE");
92 }
93
94 if (size == 0) {
95 return NULL;
96 }
97
98 static_assert(sizeof(vm_size_t) == sizeof(size_t));
99 static_assert(M_WAITOK == Z_WAITOK);
100 static_assert(M_NOWAIT == Z_NOWAIT);
101 static_assert(M_ZERO == Z_ZERO);
102
103 addr = kalloc_ext(heap, size,
104 flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr;
105 if (__probable(addr)) {
106 return addr;
107 }
108
109 if (flags & (M_NOWAIT | M_NULL)) {
110 return NULL;
111 }
112
113 /*
114 * We get here when the caller told us to block waiting for memory, but
115 * kalloc said there's no memory left to get. Generally, this means there's a
116 * leak or the caller asked for an impossibly large amount of memory. If the caller
117 * is expecting a NULL return code then it should explicitly set the flag M_NULL.
118 * If the caller isn't expecting a NULL return code, we just panic. This is less
119 * than ideal, but returning NULL when the caller isn't expecting it doesn't help
120 * since the majority of callers don't check the return value and will just
121 * dereference the pointer and trap anyway. We may as well get a more
122 * descriptive message out while we can.
123 */
124 panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size);
125 }
126
127 void *
128 __MALLOC(size_t size, int type, int flags, vm_allocation_site_t *site)
129 {
130 return __MALLOC_ext(size, type, flags, site, KHEAP_DEFAULT);
131 }
132
133 void *
134 __REALLOC(
135 void *addr,
136 size_t size,
137 int type __unused,
138 int flags,
139 vm_allocation_site_t *site)
140 {
141 addr = kheap_realloc_addr(KHEAP_DEFAULT, addr, size,
142 flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr;
143
144 if (__probable(addr)) {
145 return addr;
146 }
147
148 if (flags & (M_NOWAIT | M_NULL)) {
149 return NULL;
150 }
151
152 panic("_REALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size);
153 }
154
155 void *
156 _MALLOC_external(size_t size, int type, int flags);
157 void *
158 _MALLOC_external(size_t size, int type, int flags)
159 {
160 static vm_allocation_site_t site = {
161 .tag = VM_KERN_MEMORY_KALLOC,
162 .flags = VM_TAG_BT,
163 };
164 return __MALLOC_ext(size, type, flags, &site, KHEAP_KEXT);
165 }
166
167 void
168 _FREE_external(void *addr, int type);
169 void
170 _FREE_external(void *addr, int type __unused)
171 {
172 /*
173 * hashinit and other functions allocate on behalf of kexts and do not have
174 * a matching hashdestroy, so we sadly have to allow this for now.
175 */
176 kheap_free_addr(KHEAP_ANY, addr);
177 }
178
179 void
180 _FREE_ZONE_external(void *elem, size_t size, int type);
181 void
182 _FREE_ZONE_external(void *elem, size_t size, int type __unused)
183 {
184 (kheap_free)(KHEAP_KEXT, elem, size);
185 }
186
187 #if DEBUG || DEVELOPMENT
188
189 extern unsigned int zone_map_jetsam_limit;
190
191 static int
192 sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS
193 {
194 #pragma unused(oidp, arg1, arg2)
195 int oldval = 0, val = 0, error = 0;
196
197 oldval = zone_map_jetsam_limit;
198 error = sysctl_io_number(req, oldval, sizeof(int), &val, NULL);
199 if (error || !req->newptr) {
200 return error;
201 }
202
203 if (val <= 0 || val > 100) {
204 printf("sysctl_zone_map_jetsam_limit: new jetsam limit value is invalid.\n");
205 return EINVAL;
206 }
207
208 zone_map_jetsam_limit = val;
209 return 0;
210 }
211
212 SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
213 sysctl_zone_map_jetsam_limit, "I", "Zone map jetsam limit");
214
215
216 extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
217
218 static int
219 sysctl_zone_map_size_and_capacity SYSCTL_HANDLER_ARGS
220 {
221 #pragma unused(oidp, arg1, arg2)
222 uint64_t zstats[2];
223 get_zone_map_size(&zstats[0], &zstats[1]);
224
225 return SYSCTL_OUT(req, &zstats, sizeof(zstats));
226 }
227
228 SYSCTL_PROC(_kern, OID_AUTO, zone_map_size_and_capacity,
229 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
230 0, 0, &sysctl_zone_map_size_and_capacity, "Q", "Current size and capacity of the zone map");
231
232
233 extern boolean_t run_zone_test(void);
234
235 static int
236 sysctl_run_zone_test SYSCTL_HANDLER_ARGS
237 {
238 #pragma unused(oidp, arg1, arg2)
239 /* require setting this sysctl to prevent sysctl -a from running this */
240 if (!req->newptr) {
241 return 0;
242 }
243
244 int ret_val = run_zone_test();
245 return SYSCTL_OUT(req, &ret_val, sizeof(ret_val));
246 }
247
248 SYSCTL_PROC(_kern, OID_AUTO, run_zone_test,
249 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED,
250 0, 0, &sysctl_run_zone_test, "I", "Test zone allocator KPI");
251
252 #endif /* DEBUG || DEVELOPMENT */
253
254 #if CONFIG_ZLEAKS
255
256 SYSCTL_DECL(_kern_zleak);
257 SYSCTL_NODE(_kern, OID_AUTO, zleak, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "zleak");
258
259 /*
260 * kern.zleak.active
261 *
262 * Show the status of the zleak subsystem (0 = enabled, 1 = active,
263 * and -1 = failed), and if enabled, allow it to be activated immediately.
264 */
265 static int
266 sysctl_zleak_active SYSCTL_HANDLER_ARGS
267 {
268 #pragma unused(arg1, arg2)
269 int oldval, val, error;
270
271 val = oldval = get_zleak_state();
272 error = sysctl_handle_int(oidp, &val, 0, req);
273 if (error || !req->newptr) {
274 return error;
275 }
276 /*
277 * Can only be activated if it's off (and not failed.)
278 * Cannot be deactivated once it's on.
279 */
280 if (val == 1 && oldval == 0) {
281 kern_return_t kr = zleak_activate();
282
283 if (KERN_SUCCESS != kr) {
284 printf("zleak_active: failed to activate "
285 "live zone leak debugging (%d).\n", kr);
286 }
287 }
288 if (val == 0 && oldval == 1) {
289 printf("zleak_active: active, cannot be disabled.\n");
290 return EINVAL;
291 }
292 return 0;
293 }
294
295 SYSCTL_PROC(_kern_zleak, OID_AUTO, active,
296 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
297 0, 0, sysctl_zleak_active, "I", "zleak activity");
298
299 /*
300 * kern.zleak.max_zonemap_size
301 *
302 * Read the value of the maximum zonemap size in bytes; useful
303 * as the maximum size that zleak.global_threshold and
304 * zleak.zone_threshold should be set to.
305 */
306 static int
307 sysctl_zleak_max_zonemap_size SYSCTL_HANDLER_ARGS
308 {
309 uint64_t zmap_max_size = *(vm_size_t *)arg1;
310
311 return sysctl_handle_quad(oidp, &zmap_max_size, arg2, req);
312 }
313
314 SYSCTL_PROC(_kern_zleak, OID_AUTO, max_zonemap_size,
315 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
316 &zleak_max_zonemap_size, 0,
317 sysctl_zleak_max_zonemap_size, "Q", "zleak max zonemap size");
318
319
320 static int
321 sysctl_zleak_threshold SYSCTL_HANDLER_ARGS
322 {
323 #pragma unused(oidp, arg2)
324 int error;
325 uint64_t value = *(vm_size_t *)arg1;
326
327 error = sysctl_io_number(req, value, sizeof(value), &value, NULL);
328
329 if (error || !req->newptr) {
330 return error;
331 }
332
333 if (value > (uint64_t)zleak_max_zonemap_size) {
334 return ERANGE;
335 }
336
337 *(vm_size_t *)arg1 = value;
338 return 0;
339 }
340
341 /*
342 * kern.zleak.global_threshold
343 *
344 * Set the global zleak threshold size (in bytes). If the zone map
345 * grows larger than this value, zleaks are automatically activated.
346 *
347 * The default value is set in zleak_init().
348 */
349 SYSCTL_PROC(_kern_zleak, OID_AUTO, global_threshold,
350 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
351 &zleak_global_tracking_threshold, 0,
352 sysctl_zleak_threshold, "Q", "zleak global threshold");
353
354 /*
355 * kern.zleak.zone_threshold
356 *
357 * Set the per-zone threshold size (in bytes) above which any
358 * zone will automatically start zleak tracking.
359 *
360 * The default value is set in zleak_init().
361 *
362 * Setting this variable will have no effect until zleak tracking is
363 * activated (See above.)
364 */
365 SYSCTL_PROC(_kern_zleak, OID_AUTO, zone_threshold,
366 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
367 &zleak_per_zone_tracking_threshold, 0,
368 sysctl_zleak_threshold, "Q", "zleak per-zone threshold");
369
370 #endif /* CONFIG_ZLEAKS */
371
372 extern uint64_t get_zones_collectable_bytes(void);
373
374 static int
375 sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS
376 {
377 #pragma unused(oidp, arg1, arg2)
378 uint64_t zones_free_mem = get_zones_collectable_bytes();
379
380 return SYSCTL_OUT(req, &zones_free_mem, sizeof(zones_free_mem));
381 }
382
383 SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes,
384 CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED,
385 0, 0, &sysctl_zones_collectable_bytes, "Q", "Collectable memory in zones");
386
387
388 #if DEBUG || DEVELOPMENT
389
390 static int
391 sysctl_zone_gc_replenish_test SYSCTL_HANDLER_ARGS
392 {
393 #pragma unused(oidp, arg1, arg2)
394 /* require setting this sysctl to prevent sysctl -a from running this */
395 if (!req->newptr) {
396 return 0;
397 }
398
399 int ret_val = 0;
400 zone_gc_replenish_test();
401 return SYSCTL_OUT(req, &ret_val, sizeof(ret_val));
402 }
403
404 static int
405 sysctl_zone_alloc_replenish_test SYSCTL_HANDLER_ARGS
406 {
407 #pragma unused(oidp, arg1, arg2)
408 /* require setting this sysctl to prevent sysctl -a from running this */
409 if (!req->newptr) {
410 return 0;
411 }
412
413 int ret_val = 0;
414 zone_alloc_replenish_test();
415 return SYSCTL_OUT(req, &ret_val, sizeof(ret_val));
416 }
417
418 SYSCTL_PROC(_kern, OID_AUTO, zone_gc_replenish_test,
419 CTLTYPE_INT | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLFLAG_WR,
420 0, 0, &sysctl_zone_gc_replenish_test, "I", "Test zone GC replenish");
421 SYSCTL_PROC(_kern, OID_AUTO, zone_alloc_replenish_test,
422 CTLTYPE_INT | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLFLAG_WR,
423 0, 0, &sysctl_zone_alloc_replenish_test, "I", "Test zone alloc replenish");
424
425 #endif /* DEBUG || DEVELOPMENT */