]>
git.saurik.com Git - apple/network_cmds.git/blob - unbound/util/alloc.c
4b81beb4c4cb50f549bcbb829d52662d2f4cae5a
2 * util/alloc.c - memory allocation service.
4 * Copyright (c) 2007, NLnet Labs. All rights reserved.
6 * This software is open source.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * Neither the name of the NLNET LABS nor the names of its contributors may
20 * be used to endorse or promote products derived from this software without
21 * specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
29 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * This file contains memory allocation functions.
43 #include "util/alloc.h"
44 #include "util/regional.h"
45 #include "util/data/packed_rrset.h"
46 #include "util/fptr_wlist.h"
48 /** custom size of cached regional blocks */
49 #define ALLOC_REG_SIZE 16384
50 /** number of bits for ID part of uint64, rest for number of threads. */
51 #define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */
53 /** setup new special type */
55 alloc_setup_special(alloc_special_t
* t
)
57 memset(t
, 0, sizeof(*t
));
58 lock_rw_init(&t
->entry
.lock
);
62 /** prealloc some entries in the cache. To minimize contention.
63 * Result is 1 lock per alloc_max newly created entries.
64 * @param alloc: the structure to fill up.
67 prealloc(struct alloc_cache
* alloc
)
71 for(i
=0; i
<ALLOC_SPECIAL_MAX
; i
++) {
72 if(!(p
= (alloc_special_t
*)malloc(sizeof(alloc_special_t
)))) {
73 log_err("prealloc: out of memory");
76 alloc_setup_special(p
);
77 alloc_set_special_next(p
, alloc
->quar
);
83 /** prealloc region blocks */
85 prealloc_blocks(struct alloc_cache
* alloc
, size_t num
)
89 for(i
=0; i
<num
; i
++) {
90 r
= regional_create_custom(ALLOC_REG_SIZE
);
92 log_err("prealloc blocks: out of memory");
95 r
->next
= (char*)alloc
->reg_list
;
97 alloc
->num_reg_blocks
++;
102 alloc_init(struct alloc_cache
* alloc
, struct alloc_cache
* super
,
105 memset(alloc
, 0, sizeof(*alloc
));
106 alloc
->super
= super
;
107 alloc
->thread_num
= thread_num
;
108 alloc
->next_id
= (uint64_t)thread_num
; /* in steps, so that type */
109 alloc
->next_id
<<= THRNUM_SHIFT
; /* of *_id is used. */
110 alloc
->last_id
= 1; /* so no 64bit constants, */
111 alloc
->last_id
<<= THRNUM_SHIFT
; /* or implicit 'int' ops. */
112 alloc
->last_id
-= 1; /* for compiler portability. */
113 alloc
->last_id
|= alloc
->next_id
;
114 alloc
->next_id
+= 1; /* because id=0 is special. */
115 alloc
->max_reg_blocks
= 100;
116 alloc
->num_reg_blocks
= 0;
117 alloc
->reg_list
= NULL
;
118 alloc
->cleanup
= NULL
;
119 alloc
->cleanup_arg
= NULL
;
121 prealloc_blocks(alloc
, alloc
->max_reg_blocks
);
123 lock_quick_init(&alloc
->lock
);
124 lock_protect(&alloc
->lock
, alloc
, sizeof(*alloc
));
129 alloc_clear(struct alloc_cache
* alloc
)
131 alloc_special_t
* p
, *np
;
132 struct regional
* r
, *nr
;
136 lock_quick_destroy(&alloc
->lock
);
138 if(alloc
->super
&& alloc
->quar
) {
139 /* push entire list into super */
141 while(alloc_special_next(p
)) /* find last */
142 p
= alloc_special_next(p
);
143 lock_quick_lock(&alloc
->super
->lock
);
144 alloc_set_special_next(p
, alloc
->super
->quar
);
145 alloc
->super
->quar
= alloc
->quar
;
146 alloc
->super
->num_quar
+= alloc
->num_quar
;
147 lock_quick_unlock(&alloc
->super
->lock
);
152 np
= alloc_special_next(p
);
153 /* deinit special type */
154 lock_rw_destroy(&p
->entry
.lock
);
163 nr
= (struct regional
*)r
->next
;
167 alloc
->reg_list
= NULL
;
168 alloc
->num_reg_blocks
= 0;
172 alloc_get_id(struct alloc_cache
* alloc
)
174 uint64_t id
= alloc
->next_id
++;
175 if(id
== alloc
->last_id
) {
176 log_warn("rrset alloc: out of 64bit ids. Clearing cache.");
177 fptr_ok(fptr_whitelist_alloc_cleanup(alloc
->cleanup
));
178 (*alloc
->cleanup
)(alloc
->cleanup_arg
);
180 /* start back at first number */ /* like in alloc_init*/
181 alloc
->next_id
= (uint64_t)alloc
->thread_num
;
182 alloc
->next_id
<<= THRNUM_SHIFT
; /* in steps for comp. */
183 alloc
->next_id
+= 1; /* portability. */
184 /* and generate new and safe id */
185 id
= alloc
->next_id
++;
191 alloc_special_obtain(struct alloc_cache
* alloc
)
195 /* see if in local cache */
198 alloc
->quar
= alloc_special_next(p
);
200 p
->id
= alloc_get_id(alloc
);
203 /* see if in global cache */
205 /* could maybe grab alloc_max/2 entries in one go,
206 * but really, isn't that just as fast as this code? */
207 lock_quick_lock(&alloc
->super
->lock
);
208 if((p
= alloc
->super
->quar
)) {
209 alloc
->super
->quar
= alloc_special_next(p
);
210 alloc
->super
->num_quar
--;
212 lock_quick_unlock(&alloc
->super
->lock
);
214 p
->id
= alloc_get_id(alloc
);
220 if(!(p
= (alloc_special_t
*)malloc(sizeof(alloc_special_t
)))) {
221 log_err("alloc_special_obtain: out of memory");
224 alloc_setup_special(p
);
225 p
->id
= alloc_get_id(alloc
);
229 /** push mem and some more items to the super */
231 pushintosuper(struct alloc_cache
* alloc
, alloc_special_t
* mem
)
234 alloc_special_t
*p
= alloc
->quar
;
236 log_assert(alloc
&& alloc
->super
&&
237 alloc
->num_quar
>= ALLOC_SPECIAL_MAX
);
238 /* push ALLOC_SPECIAL_MAX/2 after mem */
239 alloc_set_special_next(mem
, alloc
->quar
);
240 for(i
=1; i
<ALLOC_SPECIAL_MAX
/2; i
++) {
241 p
= alloc_special_next(p
);
243 alloc
->quar
= alloc_special_next(p
);
244 alloc
->num_quar
-= ALLOC_SPECIAL_MAX
/2;
246 /* dump mem+list into the super quar list */
247 lock_quick_lock(&alloc
->super
->lock
);
248 alloc_set_special_next(p
, alloc
->super
->quar
);
249 alloc
->super
->quar
= mem
;
250 alloc
->super
->num_quar
+= ALLOC_SPECIAL_MAX
/2 + 1;
251 lock_quick_unlock(&alloc
->super
->lock
);
252 /* so 1 lock per mem+alloc/2 deletes */
256 alloc_special_release(struct alloc_cache
* alloc
, alloc_special_t
* mem
)
262 lock_quick_lock(&alloc
->lock
); /* superalloc needs locking */
265 alloc_special_clean(mem
);
266 if(alloc
->super
&& alloc
->num_quar
>= ALLOC_SPECIAL_MAX
) {
267 /* push it to the super structure */
268 pushintosuper(alloc
, mem
);
272 alloc_set_special_next(mem
, alloc
->quar
);
276 lock_quick_unlock(&alloc
->lock
);
281 alloc_stats(struct alloc_cache
* alloc
)
283 log_info("%salloc: %d in cache, %d blocks.", alloc
->super
?"":"sup",
284 (int)alloc
->num_quar
, (int)alloc
->num_reg_blocks
);
287 size_t alloc_get_mem(struct alloc_cache
* alloc
)
290 size_t s
= sizeof(*alloc
);
292 lock_quick_lock(&alloc
->lock
); /* superalloc needs locking */
294 s
+= sizeof(alloc_special_t
) * alloc
->num_quar
;
295 for(p
= alloc
->quar
; p
; p
= alloc_special_next(p
)) {
296 s
+= lock_get_mem(&p
->entry
.lock
);
298 s
+= alloc
->num_reg_blocks
* ALLOC_REG_SIZE
;
300 lock_quick_unlock(&alloc
->lock
);
306 alloc_reg_obtain(struct alloc_cache
* alloc
)
308 if(alloc
->num_reg_blocks
> 0) {
309 struct regional
* r
= alloc
->reg_list
;
310 alloc
->reg_list
= (struct regional
*)r
->next
;
312 alloc
->num_reg_blocks
--;
315 return regional_create_custom(ALLOC_REG_SIZE
);
319 alloc_reg_release(struct alloc_cache
* alloc
, struct regional
* r
)
321 if(alloc
->num_reg_blocks
>= alloc
->max_reg_blocks
) {
326 regional_free_all(r
);
327 log_assert(r
->next
== NULL
);
328 r
->next
= (char*)alloc
->reg_list
;
330 alloc
->num_reg_blocks
++;
334 alloc_set_id_cleanup(struct alloc_cache
* alloc
, void (*cleanup
)(void*),
337 alloc
->cleanup
= cleanup
;
338 alloc
->cleanup_arg
= arg
;
341 /** global debug value to keep track of total memory mallocs */
342 size_t unbound_mem_alloc
= 0;
343 /** global debug value to keep track of total memory frees */
344 size_t unbound_mem_freed
= 0;
345 #ifdef UNBOUND_ALLOC_STATS
346 /** special value to know if the memory is being tracked */
347 uint64_t mem_special
= (uint64_t)0xfeed43327766abcdLL
;
351 /** malloc with stats */
352 void *unbound_stat_malloc(size_t size
)
355 if(size
== 0) size
= 1;
356 res
= malloc(size
+16);
357 if(!res
) return NULL
;
358 unbound_mem_alloc
+= size
;
359 log_info("stat %p=malloc(%u)", res
+16, (unsigned)size
);
360 memcpy(res
, &size
, sizeof(size
));
361 memcpy(res
+8, &mem_special
, sizeof(mem_special
));
367 /** calloc with stats */
368 void *unbound_stat_calloc(size_t nmemb
, size_t size
)
370 size_t s
= (nmemb
*size
==0)?(size_t)1:nmemb
*size
;
371 void* res
= calloc(1, s
+16);
372 if(!res
) return NULL
;
373 log_info("stat %p=calloc(%u, %u)", res
+16, (unsigned)nmemb
, (unsigned)size
);
374 unbound_mem_alloc
+= s
;
375 memcpy(res
, &s
, sizeof(s
));
376 memcpy(res
+8, &mem_special
, sizeof(mem_special
));
382 /** free with stats */
383 void unbound_stat_free(void *ptr
)
387 if(memcmp(ptr
-8, &mem_special
, sizeof(mem_special
)) != 0) {
392 memcpy(&s
, ptr
, sizeof(s
));
393 log_info("stat free(%p) size %u", ptr
+16, (unsigned)s
);
395 unbound_mem_freed
+= s
;
401 /** realloc with stats */
402 void *unbound_stat_realloc(void *ptr
, size_t size
)
406 if(!ptr
) return unbound_stat_malloc(size
);
407 if(memcmp(ptr
-8, &mem_special
, sizeof(mem_special
)) != 0) {
408 return realloc(ptr
, size
);
411 unbound_stat_free(ptr
);
415 memcpy(&cursz
, ptr
, sizeof(cursz
));
417 /* nothing changes */
420 res
= malloc(size
+16);
421 if(!res
) return NULL
;
422 unbound_mem_alloc
+= size
;
423 unbound_mem_freed
+= cursz
;
424 log_info("stat realloc(%p, %u) from %u", ptr
+16, (unsigned)size
, (unsigned)cursz
);
426 memcpy(res
+16, ptr
+16, size
);
427 } else if(size
> cursz
) {
428 memcpy(res
+16, ptr
+16, cursz
);
432 memcpy(res
, &size
, sizeof(size
));
433 memcpy(res
+8, &mem_special
, sizeof(mem_special
));
437 /** log to file where alloc was done */
438 void *unbound_stat_malloc_log(size_t size
, const char* file
, int line
,
441 log_info("%s:%d %s malloc(%u)", file
, line
, func
, (unsigned)size
);
442 return unbound_stat_malloc(size
);
445 /** log to file where alloc was done */
446 void *unbound_stat_calloc_log(size_t nmemb
, size_t size
, const char* file
,
447 int line
, const char* func
)
449 log_info("%s:%d %s calloc(%u, %u)", file
, line
, func
,
450 (unsigned) nmemb
, (unsigned)size
);
451 return unbound_stat_calloc(nmemb
, size
);
454 /** log to file where free was done */
455 void unbound_stat_free_log(void *ptr
, const char* file
, int line
,
458 if(ptr
&& memcmp(ptr
-8, &mem_special
, sizeof(mem_special
)) == 0) {
460 memcpy(&s
, ptr
-16, sizeof(s
));
461 log_info("%s:%d %s free(%p) size %u",
462 file
, line
, func
, ptr
, (unsigned)s
);
464 log_info("%s:%d %s unmatched free(%p)", file
, line
, func
, ptr
);
465 unbound_stat_free(ptr
);
468 /** log to file where alloc was done */
469 void *unbound_stat_realloc_log(void *ptr
, size_t size
, const char* file
,
470 int line
, const char* func
)
472 log_info("%s:%d %s realloc(%p, %u)", file
, line
, func
,
473 ptr
, (unsigned)size
);
474 return unbound_stat_realloc(ptr
, size
);
477 #endif /* UNBOUND_ALLOC_STATS */
478 #ifdef UNBOUND_ALLOC_LITE
483 /** length of prefix and suffix */
484 static size_t lite_pad
= 16;
485 /** prefix value to check */
486 static char* lite_pre
= "checkfront123456";
487 /** suffix value to check */
488 static char* lite_post
= "checkafter123456";
490 void *unbound_stat_malloc_lite(size_t size
, const char* file
, int line
,
493 /* [prefix .. len .. actual data .. suffix] */
494 void* res
= malloc(size
+lite_pad
*2+sizeof(size_t));
495 if(!res
) return NULL
;
496 memmove(res
, lite_pre
, lite_pad
);
497 memmove(res
+lite_pad
, &size
, sizeof(size_t));
498 memset(res
+lite_pad
+sizeof(size_t), 0x1a, size
); /* init the memory */
499 memmove(res
+lite_pad
+size
+sizeof(size_t), lite_post
, lite_pad
);
500 return res
+lite_pad
+sizeof(size_t);
503 void *unbound_stat_calloc_lite(size_t nmemb
, size_t size
, const char* file
,
504 int line
, const char* func
)
506 size_t req
= nmemb
* size
;
507 void* res
= malloc(req
+lite_pad
*2+sizeof(size_t));
508 if(!res
) return NULL
;
509 memmove(res
, lite_pre
, lite_pad
);
510 memmove(res
+lite_pad
, &req
, sizeof(size_t));
511 memset(res
+lite_pad
+sizeof(size_t), 0, req
);
512 memmove(res
+lite_pad
+req
+sizeof(size_t), lite_post
, lite_pad
);
513 return res
+lite_pad
+sizeof(size_t);
516 void unbound_stat_free_lite(void *ptr
, const char* file
, int line
,
522 real
= ptr
-lite_pad
-sizeof(size_t);
523 if(memcmp(real
, lite_pre
, lite_pad
) != 0) {
524 log_err("free(): prefix failed %s:%d %s", file
, line
, func
);
525 log_hex("prefix here", real
, lite_pad
);
526 log_hex(" should be", lite_pre
, lite_pad
);
527 fatal_exit("alloc assertion failed");
529 memmove(&orig
, real
+lite_pad
, sizeof(size_t));
530 if(memcmp(real
+lite_pad
+orig
+sizeof(size_t), lite_post
, lite_pad
)!=0){
531 log_err("free(): suffix failed %s:%d %s", file
, line
, func
);
532 log_err("alloc size is %d", (int)orig
);
533 log_hex("suffix here", real
+lite_pad
+orig
+sizeof(size_t),
535 log_hex(" should be", lite_post
, lite_pad
);
536 fatal_exit("alloc assertion failed");
538 memset(real
, 0xdd, orig
+lite_pad
*2+sizeof(size_t)); /* mark it */
542 void *unbound_stat_realloc_lite(void *ptr
, size_t size
, const char* file
,
543 int line
, const char* func
)
545 /* always free and realloc (no growing) */
550 return unbound_stat_malloc_lite(size
, file
, line
, func
);
554 unbound_stat_free_lite(ptr
, file
, line
, func
);
557 /* change allocation size and copy */
558 real
= ptr
-lite_pad
-sizeof(size_t);
559 if(memcmp(real
, lite_pre
, lite_pad
) != 0) {
560 log_err("realloc(): prefix failed %s:%d %s", file
, line
, func
);
561 log_hex("prefix here", real
, lite_pad
);
562 log_hex(" should be", lite_pre
, lite_pad
);
563 fatal_exit("alloc assertion failed");
565 memmove(&orig
, real
+lite_pad
, sizeof(size_t));
566 if(memcmp(real
+lite_pad
+orig
+sizeof(size_t), lite_post
, lite_pad
)!=0){
567 log_err("realloc(): suffix failed %s:%d %s", file
, line
, func
);
568 log_err("alloc size is %d", (int)orig
);
569 log_hex("suffix here", real
+lite_pad
+orig
+sizeof(size_t),
571 log_hex(" should be", lite_post
, lite_pad
);
572 fatal_exit("alloc assertion failed");
574 /* new alloc and copy over */
575 newa
= unbound_stat_malloc_lite(size
, file
, line
, func
);
579 memmove(newa
, ptr
, orig
);
580 else memmove(newa
, ptr
, size
);
581 memset(real
, 0xdd, orig
+lite_pad
*2+sizeof(size_t)); /* mark it */
586 char* unbound_strdup_lite(const char* s
, const char* file
, int line
,
589 /* this routine is made to make sure strdup() uses the malloc_lite */
590 size_t l
= strlen(s
)+1;
591 char* n
= (char*)unbound_stat_malloc_lite(l
, file
, line
, func
);
597 char* unbound_lite_wrapstr(char* s
)
599 char* n
= unbound_strdup_lite(s
, __FILE__
, __LINE__
, __func__
);
604 #undef sldns_pkt2wire
605 sldns_status
unbound_lite_pkt2wire(uint8_t **dest
, const sldns_pkt
*p
,
610 sldns_status s
= sldns_pkt2wire(&md
, p
, &ms
);
612 *dest
= unbound_stat_malloc_lite(ms
, __FILE__
, __LINE__
,
615 if(!*dest
) { free(md
); return LDNS_STATUS_MEM_ERR
; }
616 memcpy(*dest
, md
, ms
);
626 int unbound_lite_i2d_DSA_SIG(DSA_SIG
* dsasig
, unsigned char** sig
)
628 unsigned char* n
= NULL
;
629 int r
= i2d_DSA_SIG(dsasig
, &n
);
631 *sig
= unbound_stat_malloc_lite((size_t)r
, __FILE__
, __LINE__
,
634 memcpy(*sig
, n
, (size_t)r
);
642 #endif /* UNBOUND_ALLOC_LITE */