]> git.saurik.com Git - apple/objc4.git/blob - runtime/objc-zalloc.mm
objc4-818.2.tar.gz
[apple/objc4.git] / runtime / objc-zalloc.mm
1 /*
2 * Copyright (c) 2007 Apple Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 /**
25 * @file objc-zalloc.h
26 *
27 * "zone allocator" for objc.
28 *
29 * Provides packed allocation for data structures the runtime
30 * almost never frees.
31 */
32
33 #include "objc-private.h"
34 #include "objc-zalloc.h"
35
36 namespace objc {
37
38 void *AtomicQueue::pop()
39 {
40 AtomicQueue l1, l2;
41
42 l1.pair = pair; // non atomic on purpose
43
44 do {
45 if (l1.head == nullptr) {
46 return nullptr;
47 }
48 l2.head = l1.head->next;
49 l2.gen = l1.gen + 1;
50 } while (!atomic_pair.compare_exchange_weak(l1.pair, l2.pair, relaxed, relaxed));
51
52 return reinterpret_cast<void *>(l1.head);
53 }
54
55 void AtomicQueue::push_list(void *_head, void *_tail)
56 {
57 Entry *head = reinterpret_cast<Entry *>(_head);
58 Entry *tail = reinterpret_cast<Entry *>(_tail);
59 AtomicQueue l1, l2;
60
61 l1.pair = pair; // non atomic load on purpose
62 do {
63 tail->next = l1.head;
64 l2.head = head;
65 l2.gen = l1.gen + 1;
66 } while (!atomic_pair.compare_exchange_weak(l1.pair, l2.pair, release, relaxed));
67 }
68
69 template<class T>
70 constexpr inline
71 T gcd(T a, T b)
72 {
73 return b == 0 ? a : gcd(b, a % b);
74 }
75
76 template<class T>
77 AtomicQueue Zone<T, false>::_freelist;
78
79 template<class T>
80 T *Zone<T, false>::alloc_slow()
81 {
82 // our malloc aligns to 16 bytes and this code should be used for sizes
83 // small enough that this should always be an actual malloc bucket.
84 //
85 // The point of this code is *NOT* speed but optimal density
86 constexpr size_t n_elem = MALLOC_ALIGNMENT / gcd(sizeof(T), size_t{MALLOC_ALIGNMENT});
87 Element *slab = reinterpret_cast<Element *>(::calloc(n_elem, sizeof(T)));
88 for (size_t i = 1; i < n_elem - 1; i++) {
89 slab[i].next = &slab[i + 1];
90 }
91 _freelist.push_list(reinterpret_cast<void *>(&slab[1]),
92 reinterpret_cast<void *>(&slab[n_elem - 1]));
93 return reinterpret_cast<T *>(&slab[0]);
94 }
95
96 template<class T>
97 T *Zone<T, false>::alloc()
98 {
99 void *e = _freelist.pop();
100 if (e) {
101 __builtin_bzero(e, sizeof(void *));
102 return reinterpret_cast<T *>(e);
103 }
104 return alloc_slow();
105 }
106
107 template<class T>
108 void Zone<T, false>::free(T *ptr)
109 {
110 if (ptr) {
111 Element *e = reinterpret_cast<Element *>(ptr);
112 __builtin_bzero(e->buf, sizeof(e->buf));
113 _freelist.push(e);
114 }
115 }
116
117 #if __OBJC2__
118 #define ZoneInstantiate(type) \
119 template class Zone<type, sizeof(type) % MALLOC_ALIGNMENT == 0>
120
121 ZoneInstantiate(class_rw_t);
122 ZoneInstantiate(class_rw_ext_t);
123 #endif
124
125 }