2 * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include "os/internal.h"
30 #include "os/alloc_once_impl.h"
31 #include <mach/mach_init.h>
32 #include <mach/mach_vm.h>
33 #include <mach/vm_statistics.h>
38 typedef struct _os_alloc_heap_metadata_s
{
39 size_t allocated_bytes
;
41 } _os_alloc_heap_metadata_s
;
43 #define allocation_size (2 * vm_page_size)
44 #define usable (allocation_size-sizeof(_os_alloc_heap_metadata_s))
45 static void * volatile _os_alloc_heap
;
48 * Simple allocator that doesn't have to worry about ever freeing allocations.
50 * The heapptr entry of _os_alloc_once_metadata always points to the newest
51 * available heap page, or NULL if this is the first allocation. The heap has a
52 * small header at the top of each heap block, recording the currently
53 * allocated bytes and the pointer to the previous heap block.
55 * Ignoring the special case where the heapptr is NULL; in which case we always
56 * make a block. The allocator first atomically increments the allocated_bytes
57 * counter by sz and calculates the eventual base pointer. If base+sz is
58 * greater than allocation_size then we begin allocating a new page. Otherwise,
61 * Page allocation vm_allocates a new page of allocation_size and then attempts
62 * to atomically cmpxchg that pointer with the current headptr. If successful,
63 * it links the previous page to the new heap block for debugging purposes and
64 * then reattempts allocation. If a thread loses the allocation race, it
65 * vm_deallocates the still-clean region and reattempts the whole allocation.
69 _os_alloc_alloc(void *heap
, size_t sz
)
72 _os_alloc_heap_metadata_s
*metadata
= (_os_alloc_heap_metadata_s
*)heap
;
73 size_t used
= os_atomic_add(&metadata
->allocated_bytes
, sz
, relaxed
);
74 if (likely(used
<= usable
)) {
75 return ((char*)metadata
+ sizeof(_os_alloc_heap_metadata_s
) +
79 /* This fall-through case is heap == NULL, or heap block is exhausted. */
85 _os_alloc_slow(void *heap
, size_t sz
)
90 * <rdar://problem/13208498> We allocate at PAGE_SIZE or above to ensure
91 * we don't land in the zero page *if* a binary has opted not to include
92 * the __PAGEZERO load command.
94 mach_vm_address_t heapblk
= PAGE_SIZE
;
96 kr
= mach_vm_map(mach_task_self(), &heapblk
, allocation_size
,
97 0, VM_FLAGS_ANYWHERE
| VM_MAKE_TAG(VM_MEMORY_OS_ALLOC_ONCE
),
98 MEMORY_OBJECT_NULL
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
101 __LIBPLATFORM_INTERNAL_CRASH__(kr
, "Failed to allocate in os_alloc_once");
103 if (os_atomic_cmpxchg(&_os_alloc_heap
, heap
, (void*)heapblk
, relaxed
)) {
104 ((_os_alloc_heap_metadata_s
*)heapblk
)->prev
= heap
;
105 heap
= (void*)heapblk
;
107 mach_vm_deallocate(mach_task_self(), heapblk
, allocation_size
);
108 heap
= _os_alloc_heap
;
110 ptr
= _os_alloc_alloc(heap
, sz
);
111 } while (unlikely(!ptr
));
116 _os_alloc2(size_t sz
)
119 if (unlikely(!sz
|| sz
> usable
)) {
120 __LIBPLATFORM_CLIENT_CRASH__(sz
, "Requested allocation size is invalid");
122 heap
= _os_alloc_heap
;
123 if (likely(ptr
= _os_alloc_alloc(heap
, sz
))) {
126 return _os_alloc_slow(heap
, sz
);
130 #pragma mark os_alloc_once
132 typedef struct _os_alloc_once_ctxt_s
{
133 struct _os_alloc_once_s
*slot
;
136 } _os_alloc_once_ctxt_s
;
139 _os_alloc(void *ctxt
)
141 _os_alloc_once_ctxt_s
*c
= ctxt
;
142 c
->slot
->ptr
= _os_alloc2((c
->sz
+ 0xf) & ~0xfu
);
144 c
->init(c
->slot
->ptr
);
149 _os_alloc_once(struct _os_alloc_once_s
*slot
, size_t sz
, os_function_t init
)
151 _os_alloc_once_ctxt_s c
= {
156 _os_once(&slot
->once
, &c
, _os_alloc
);