]> git.saurik.com Git - apple/libplatform.git/blame - src/os/alloc_once.c
libplatform-126.50.8.tar.gz
[apple/libplatform.git] / src / os / alloc_once.c
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include "os/internal.h"
30#include "os/alloc_once_impl.h"
31#include <mach/mach_init.h>
32#include <mach/mach_vm.h>
33#include <mach/vm_statistics.h>
34
35#pragma mark -
36#pragma mark os_alloc
37
38typedef struct _os_alloc_heap_metadata_s {
39 size_t allocated_bytes;
40 void *prev;
41} _os_alloc_heap_metadata_s;
42
43#define allocation_size (2 * vm_page_size)
44#define usable (allocation_size-sizeof(_os_alloc_heap_metadata_s))
45static void * volatile _os_alloc_heap;
46
47/*
48 * Simple allocator that doesn't have to worry about ever freeing allocations.
49 *
50 * The heapptr entry of _os_alloc_once_metadata always points to the newest
51 * available heap page, or NULL if this is the first allocation. The heap has a
52 * small header at the top of each heap block, recording the currently
53 * allocated bytes and the pointer to the previous heap block.
54 *
55 * Ignoring the special case where the heapptr is NULL; in which case we always
56 * make a block. The allocator first atomically increments the allocated_bytes
57 * counter by sz and calculates the eventual base pointer. If base+sz is
58 * greater than allocation_size then we begin allocating a new page. Otherwise,
59 * base is returned.
60 *
61 * Page allocation vm_allocates a new page of allocation_size and then attempts
62 * to atomically cmpxchg that pointer with the current headptr. If successful,
63 * it links the previous page to the new heap block for debugging purposes and
64 * then reattempts allocation. If a thread loses the allocation race, it
65 * vm_deallocates the still-clean region and reattempts the whole allocation.
66 */
67
68static inline void*
69_os_alloc_alloc(void *heap, size_t sz)
70{
71 if (likely(heap)) {
72 _os_alloc_heap_metadata_s *metadata = (_os_alloc_heap_metadata_s*)heap;
73 size_t used = os_atomic_add(&metadata->allocated_bytes, sz, relaxed);
74 if (likely(used <= usable)) {
75 return ((char*)metadata + sizeof(_os_alloc_heap_metadata_s) +
76 used - sz);
77 }
78 }
79 /* This fall-through case is heap == NULL, or heap block is exhausted. */
80 return NULL;
81}
82
83OS_NOINLINE
84static void*
85_os_alloc_slow(void *heap, size_t sz)
86{
87 void *ptr;
88 do {
89 /*
90 * <rdar://problem/13208498> We allocate at PAGE_SIZE or above to ensure
91 * we don't land in the zero page *if* a binary has opted not to include
92 * the __PAGEZERO load command.
93 */
94 mach_vm_address_t heapblk = PAGE_SIZE;
95 kern_return_t kr;
96 kr = mach_vm_map(mach_task_self(), &heapblk, allocation_size,
97 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_OS_ALLOC_ONCE),
98 MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
99 VM_INHERIT_DEFAULT);
100 if (unlikely(kr)) {
101 __LIBPLATFORM_INTERNAL_CRASH__(kr, "Failed to allocate in os_alloc_once");
102 }
103 if (os_atomic_cmpxchg(&_os_alloc_heap, heap, (void*)heapblk, relaxed)) {
104 ((_os_alloc_heap_metadata_s*)heapblk)->prev = heap;
105 heap = (void*)heapblk;
106 } else {
107 mach_vm_deallocate(mach_task_self(), heapblk, allocation_size);
108 heap = _os_alloc_heap;
109 }
110 ptr = _os_alloc_alloc(heap, sz);
111 } while (unlikely(!ptr));
112 return ptr;
113}
114
115static inline void*
116_os_alloc2(size_t sz)
117{
118 void *heap, *ptr;
119 if (unlikely(!sz || sz > usable)) {
120 __LIBPLATFORM_CLIENT_CRASH__(sz, "Requested allocation size is invalid");
121 }
122 heap = _os_alloc_heap;
123 if (likely(ptr = _os_alloc_alloc(heap, sz))) {
124 return ptr;
125 }
126 return _os_alloc_slow(heap, sz);
127}
128
129#pragma mark -
130#pragma mark os_alloc_once
131
132typedef struct _os_alloc_once_ctxt_s {
133 struct _os_alloc_once_s *slot;
134 size_t sz;
135 os_function_t init;
136} _os_alloc_once_ctxt_s;
137
138static void
139_os_alloc(void *ctxt)
140{
141 _os_alloc_once_ctxt_s *c = ctxt;
142 c->slot->ptr = _os_alloc2((c->sz + 0xf) & ~0xfu);
143 if (c->init) {
144 c->init(c->slot->ptr);
145 }
146}
147
148void*
149_os_alloc_once(struct _os_alloc_once_s *slot, size_t sz, os_function_t init)
150{
151 _os_alloc_once_ctxt_s c = {
152 .slot = slot,
153 .sz = sz,
154 .init = init,
155 };
156 _os_once(&slot->once, &c, _os_alloc);
157 return slot->ptr;
158}