]> git.saurik.com Git - apple/libplatform.git/blame - src/os/alloc_once.c
libplatform-254.40.4.tar.gz
[apple/libplatform.git] / src / os / alloc_once.c
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include "os/internal.h"
e45b4692 30#include "resolver.h"
ada7c492
A
31#include "os/alloc_once_impl.h"
32#include <mach/mach_init.h>
33#include <mach/mach_vm.h>
34#include <mach/vm_statistics.h>
35
36#pragma mark -
37#pragma mark os_alloc
38
39typedef struct _os_alloc_heap_metadata_s {
40 size_t allocated_bytes;
41 void *prev;
42} _os_alloc_heap_metadata_s;
43
44#define allocation_size (2 * vm_page_size)
45#define usable (allocation_size-sizeof(_os_alloc_heap_metadata_s))
e45b4692
A
46OS_NOEXPORT void * volatile _os_alloc_heap;
47
48OS_ATOMIC_EXPORT void* _os_alloc_once(struct _os_alloc_once_s *slot, size_t sz,
49 os_function_t init);
50
51void * volatile _os_alloc_heap;
ada7c492
A
52
53/*
54 * Simple allocator that doesn't have to worry about ever freeing allocations.
55 *
56 * The heapptr entry of _os_alloc_once_metadata always points to the newest
57 * available heap page, or NULL if this is the first allocation. The heap has a
58 * small header at the top of each heap block, recording the currently
59 * allocated bytes and the pointer to the previous heap block.
60 *
61 * Ignoring the special case where the heapptr is NULL; in which case we always
62 * make a block. The allocator first atomically increments the allocated_bytes
63 * counter by sz and calculates the eventual base pointer. If base+sz is
64 * greater than allocation_size then we begin allocating a new page. Otherwise,
65 * base is returned.
66 *
67 * Page allocation vm_allocates a new page of allocation_size and then attempts
68 * to atomically cmpxchg that pointer with the current headptr. If successful,
69 * it links the previous page to the new heap block for debugging purposes and
70 * then reattempts allocation. If a thread loses the allocation race, it
71 * vm_deallocates the still-clean region and reattempts the whole allocation.
72 */
73
74static inline void*
75_os_alloc_alloc(void *heap, size_t sz)
76{
77 if (likely(heap)) {
78 _os_alloc_heap_metadata_s *metadata = (_os_alloc_heap_metadata_s*)heap;
79 size_t used = os_atomic_add(&metadata->allocated_bytes, sz, relaxed);
80 if (likely(used <= usable)) {
81 return ((char*)metadata + sizeof(_os_alloc_heap_metadata_s) +
82 used - sz);
83 }
84 }
85 /* This fall-through case is heap == NULL, or heap block is exhausted. */
86 return NULL;
87}
88
89OS_NOINLINE
90static void*
91_os_alloc_slow(void *heap, size_t sz)
92{
93 void *ptr;
94 do {
95 /*
96 * <rdar://problem/13208498> We allocate at PAGE_SIZE or above to ensure
97 * we don't land in the zero page *if* a binary has opted not to include
98 * the __PAGEZERO load command.
99 */
100 mach_vm_address_t heapblk = PAGE_SIZE;
101 kern_return_t kr;
102 kr = mach_vm_map(mach_task_self(), &heapblk, allocation_size,
103 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_OS_ALLOC_ONCE),
104 MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
105 VM_INHERIT_DEFAULT);
106 if (unlikely(kr)) {
107 __LIBPLATFORM_INTERNAL_CRASH__(kr, "Failed to allocate in os_alloc_once");
108 }
109 if (os_atomic_cmpxchg(&_os_alloc_heap, heap, (void*)heapblk, relaxed)) {
110 ((_os_alloc_heap_metadata_s*)heapblk)->prev = heap;
111 heap = (void*)heapblk;
112 } else {
113 mach_vm_deallocate(mach_task_self(), heapblk, allocation_size);
114 heap = _os_alloc_heap;
115 }
116 ptr = _os_alloc_alloc(heap, sz);
117 } while (unlikely(!ptr));
118 return ptr;
119}
120
121static inline void*
122_os_alloc2(size_t sz)
123{
124 void *heap, *ptr;
125 if (unlikely(!sz || sz > usable)) {
126 __LIBPLATFORM_CLIENT_CRASH__(sz, "Requested allocation size is invalid");
127 }
128 heap = _os_alloc_heap;
129 if (likely(ptr = _os_alloc_alloc(heap, sz))) {
130 return ptr;
131 }
132 return _os_alloc_slow(heap, sz);
133}
134
135#pragma mark -
136#pragma mark os_alloc_once
137
138typedef struct _os_alloc_once_ctxt_s {
139 struct _os_alloc_once_s *slot;
140 size_t sz;
141 os_function_t init;
142} _os_alloc_once_ctxt_s;
143
144static void
145_os_alloc(void *ctxt)
146{
147 _os_alloc_once_ctxt_s *c = ctxt;
148 c->slot->ptr = _os_alloc2((c->sz + 0xf) & ~0xfu);
149 if (c->init) {
150 c->init(c->slot->ptr);
151 }
152}
153
154void*
155_os_alloc_once(struct _os_alloc_once_s *slot, size_t sz, os_function_t init)
156{
157 _os_alloc_once_ctxt_s c = {
158 .slot = slot,
159 .sz = sz,
160 .init = init,
161 };
162 _os_once(&slot->once, &c, _os_alloc);
163 return slot->ptr;
164}