]> git.saurik.com Git - apple/dyld.git/blob - src/threadLocalVariables.c
dyld-551.4.tar.gz
[apple/dyld.git] / src / threadLocalVariables.c
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <stddef.h>
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <Block.h>
33 #include <malloc/malloc.h>
34 #include <mach-o/loader.h>
35 #include <libkern/OSAtomic.h>
36
37 #include "dyld_priv.h"
38
39
40 #if __LP64__
41 typedef struct mach_header_64 macho_header;
42 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
43 typedef struct segment_command_64 macho_segment_command;
44 typedef struct section_64 macho_section;
45 #else
46 typedef struct mach_header macho_header;
47 #define LC_SEGMENT_COMMAND LC_SEGMENT
48 typedef struct segment_command macho_segment_command;
49 typedef struct section macho_section;
50 #endif
51
52 #ifndef S_THREAD_LOCAL_REGULAR
53 #define S_THREAD_LOCAL_REGULAR 0x11
54 #endif
55
56 #ifndef S_THREAD_LOCAL_ZEROFILL
57 #define S_THREAD_LOCAL_ZEROFILL 0x12
58 #endif
59
60 #ifndef S_THREAD_LOCAL_VARIABLES
61 #define S_THREAD_LOCAL_VARIABLES 0x13
62 #endif
63
64 #ifndef S_THREAD_LOCAL_VARIABLE_POINTERS
65 #define S_THREAD_LOCAL_VARIABLE_POINTERS 0x14
66 #endif
67
68 #ifndef S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
69 #define S_THREAD_LOCAL_INIT_FUNCTION_POINTERS 0x15
70 #endif
71
72 #ifndef MH_HAS_TLV_DESCRIPTORS
73 #define MH_HAS_TLV_DESCRIPTORS 0x800000
74 #endif
75
76
77 typedef void (*TermFunc)(void*);
78
79
80
81 #if __has_feature(tls) || __arm64__ || __arm__
82
83 typedef struct TLVHandler {
84 struct TLVHandler *next;
85 dyld_tlv_state_change_handler handler;
86 enum dyld_tlv_states state;
87 } TLVHandler;
88
89 // lock-free prepend-only linked list
90 static TLVHandler * volatile tlv_handlers = NULL;
91
92
93 struct TLVDescriptor
94 {
95 void* (*thunk)(struct TLVDescriptor*);
96 unsigned long key;
97 unsigned long offset;
98 };
99 typedef struct TLVDescriptor TLVDescriptor;
100
101
102 // implemented in assembly
103 extern void* tlv_get_addr(TLVDescriptor*);
104
105 struct TLVImageInfo
106 {
107 pthread_key_t key;
108 const struct mach_header* mh;
109 };
110 typedef struct TLVImageInfo TLVImageInfo;
111
112 static TLVImageInfo* tlv_live_images = NULL;
113 static unsigned int tlv_live_image_alloc_count = 0;
114 static unsigned int tlv_live_image_used_count = 0;
115 static pthread_mutex_t tlv_live_image_lock = PTHREAD_MUTEX_INITIALIZER;
116
117 static void tlv_set_key_for_image(const struct mach_header* mh, pthread_key_t key)
118 {
119 pthread_mutex_lock(&tlv_live_image_lock);
120 if ( tlv_live_image_used_count == tlv_live_image_alloc_count ) {
121 unsigned int newCount = (tlv_live_images == NULL) ? 8 : 2*tlv_live_image_alloc_count;
122 struct TLVImageInfo* newBuffer = malloc(sizeof(TLVImageInfo)*newCount);
123 if ( tlv_live_images != NULL ) {
124 memcpy(newBuffer, tlv_live_images, sizeof(TLVImageInfo)*tlv_live_image_used_count);
125 free(tlv_live_images);
126 }
127 tlv_live_images = newBuffer;
128 tlv_live_image_alloc_count = newCount;
129 }
130 tlv_live_images[tlv_live_image_used_count].key = key;
131 tlv_live_images[tlv_live_image_used_count].mh = mh;
132 ++tlv_live_image_used_count;
133 pthread_mutex_unlock(&tlv_live_image_lock);
134 }
135
136 static const struct mach_header* tlv_get_image_for_key(pthread_key_t key)
137 {
138 const struct mach_header* result = NULL;
139 pthread_mutex_lock(&tlv_live_image_lock);
140 for(unsigned int i=0; i < tlv_live_image_used_count; ++i) {
141 if ( tlv_live_images[i].key == key ) {
142 result = tlv_live_images[i].mh;
143 break;
144 }
145 }
146 pthread_mutex_unlock(&tlv_live_image_lock);
147 return result;
148 }
149
150
151 static void
152 tlv_notify(enum dyld_tlv_states state, void *buffer)
153 {
154 if (!tlv_handlers) return;
155
156 // Always use malloc_size() to ensure allocated and deallocated states
157 // send the same size. tlv_free() doesn't have anything else recorded.
158 dyld_tlv_info info = { sizeof(info), buffer, malloc_size(buffer) };
159
160 for (TLVHandler *h = tlv_handlers; h != NULL; h = h->next) {
161 if (h->state == state && h->handler) {
162 h->handler(h->state, &info);
163 }
164 }
165 }
166
167
168 // called lazily when TLV is first accessed
169 __attribute__((visibility("hidden")))
170 void* tlv_allocate_and_initialize_for_key(pthread_key_t key)
171 {
172 const struct mach_header* mh = tlv_get_image_for_key(key);
173 if ( mh == NULL )
174 return NULL; // if data structures are screwed up, don't crash
175
176 // first pass, find size and template
177 uint8_t* start = NULL;
178 unsigned long size = 0;
179 intptr_t slide = 0;
180 bool slideComputed = false;
181 bool hasInitializers = false;
182 const uint32_t cmd_count = mh->ncmds;
183 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
184 const struct load_command* cmd = cmds;
185 for (uint32_t i = 0; i < cmd_count; ++i) {
186 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
187 const macho_segment_command* seg = (macho_segment_command*)cmd;
188 if ( !slideComputed && (seg->filesize != 0) ) {
189 slide = (uintptr_t)mh - seg->vmaddr;
190 slideComputed = true;
191 }
192 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
193 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
194 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
195 switch ( sect->flags & SECTION_TYPE ) {
196 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
197 hasInitializers = true;
198 break;
199 case S_THREAD_LOCAL_ZEROFILL:
200 case S_THREAD_LOCAL_REGULAR:
201 if ( start == NULL ) {
202 // first of N contiguous TLV template sections, record as if this was only section
203 start = (uint8_t*)(sect->addr + slide);
204 size = sect->size;
205 }
206 else {
207 // non-first of N contiguous TLV template sections, accumlate values
208 const uint8_t* newEnd = (uint8_t*)(sect->addr + slide + sect->size);
209 size = newEnd - start;
210 }
211 break;
212 }
213 }
214 }
215 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
216 }
217 // no thread local storage in image: should never happen
218 if ( size == 0 )
219 return NULL;
220
221 // allocate buffer and fill with template
222 void* buffer = malloc(size);
223 memcpy(buffer, start, size);
224
225 // set this thread's value for key to be the new buffer.
226 pthread_setspecific(key, buffer);
227
228 // send tlv state notifications
229 tlv_notify(dyld_tlv_state_allocated, buffer);
230
231 // second pass, run initializers
232 if ( hasInitializers ) {
233 cmd = cmds;
234 for (uint32_t i = 0; i < cmd_count; ++i) {
235 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
236 const macho_segment_command* seg = (macho_segment_command*)cmd;
237 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
238 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
239 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
240 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_INIT_FUNCTION_POINTERS ) {
241 typedef void (*InitFunc)(void);
242 InitFunc* funcs = (InitFunc*)(sect->addr + slide);
243 const size_t count = sect->size / sizeof(uintptr_t);
244 for (size_t j=count; j > 0; --j) {
245 InitFunc func = funcs[j-1];
246 func();
247 }
248 }
249 }
250 }
251 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
252 }
253 }
254 return buffer;
255 }
256
257
258 // pthread destructor for TLV storage
259 static void
260 tlv_free(void *storage)
261 {
262 tlv_notify(dyld_tlv_state_deallocated, storage);
263 free(storage);
264 }
265
266
267 // called when image is loaded
268 static void tlv_initialize_descriptors(const struct mach_header* mh)
269 {
270 pthread_key_t key = 0;
271 intptr_t slide = 0;
272 bool slideComputed = false;
273 const uint32_t cmd_count = mh->ncmds;
274 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
275 const struct load_command* cmd = cmds;
276 for (uint32_t i = 0; i < cmd_count; ++i) {
277 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
278 const macho_segment_command* seg = (macho_segment_command*)cmd;
279 if ( !slideComputed && (seg->filesize != 0) ) {
280 slide = (uintptr_t)mh - seg->vmaddr;
281 slideComputed = true;
282 }
283 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
284 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
285 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
286 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_VARIABLES ) {
287 if ( sect->size != 0 ) {
288 // allocate pthread key when we first discover this image has TLVs
289 if ( key == 0 ) {
290 int result = pthread_key_create(&key, &tlv_free);
291 if ( result != 0 )
292 abort();
293 tlv_set_key_for_image(mh, key);
294 }
295 // initialize each descriptor
296 TLVDescriptor* start = (TLVDescriptor*)(sect->addr + slide);
297 TLVDescriptor* end = (TLVDescriptor*)(sect->addr + sect->size + slide);
298 for (TLVDescriptor* d=start; d < end; ++d) {
299 d->thunk = tlv_get_addr;
300 d->key = key;
301 //d->offset = d->offset; // offset unchanged
302 }
303 }
304 }
305 }
306 }
307 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
308 }
309 }
310
311
312 static void tlv_load_notification(const struct mach_header* mh, intptr_t slide)
313 {
314 // This is called on all images, even those without TLVs. So we want this to be fast.
315 // The linker sets MH_HAS_TLV_DESCRIPTORS so we don't have to search images just to find the don't have TLVs.
316 if ( mh->flags & MH_HAS_TLV_DESCRIPTORS )
317 tlv_initialize_descriptors(mh);
318 }
319
320
321 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
322 {
323 TLVHandler *h = malloc(sizeof(TLVHandler));
324 h->state = state;
325 h->handler = Block_copy(handler);
326
327 TLVHandler *old;
328 do {
329 old = tlv_handlers;
330 h->next = old;
331 } while (! OSAtomicCompareAndSwapPtrBarrier(old, h, (void * volatile *)&tlv_handlers));
332 }
333
334
335 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
336 {
337 pthread_mutex_lock(&tlv_live_image_lock);
338 unsigned int count = tlv_live_image_used_count;
339 void *list[count];
340 for (unsigned int i = 0; i < count; ++i) {
341 list[i] = pthread_getspecific(tlv_live_images[i].key);
342 }
343 pthread_mutex_unlock(&tlv_live_image_lock);
344
345 for (unsigned int i = 0; i < count; ++i) {
346 if (list[i]) {
347 dyld_tlv_info info = { sizeof(info), list[i], malloc_size(list[i]) };
348 handler(dyld_tlv_state_allocated, &info);
349 }
350 }
351 }
352
353
354 //
355 // thread_local terminators
356 //
357 // C++ 0x allows thread_local C++ objects which have constructors run
358 // on the thread before any use of the object and the object's destructor
359 // is run on the thread when the thread terminates.
360 //
361 // To support this libdyld gets a pthread key early in process start up and
362 // uses tlv_finalize and the key's destructor function. This key must be
363 // allocated before any thread local variables are instantiated because when
364 // a thread is terminated, the pthread package runs the destructor function
365 // on each key's storage values in key allocation order. Since we want
366 // C++ objects to be destructred before they are deallocated, we need the
367 // destructor key to come before the deallocation key.
368 //
369
370 struct TLVTerminatorListEntry
371 {
372 TermFunc termFunc;
373 void* objAddr;
374 };
375
376 struct TLVTerminatorList
377 {
378 uint32_t allocCount;
379 uint32_t useCount;
380 struct TLVTerminatorListEntry entries[1]; // variable length
381 };
382
383
384 static pthread_key_t tlv_terminators_key = 0;
385
386 void _tlv_atexit(TermFunc func, void* objAddr)
387 {
388 // NOTE: this does not need locks because it only operates on current thread data
389 struct TLVTerminatorList* list = (struct TLVTerminatorList*)pthread_getspecific(tlv_terminators_key);
390 if ( list == NULL ) {
391 // handle first allocation
392 list = (struct TLVTerminatorList*)malloc(offsetof(struct TLVTerminatorList, entries[1]));
393 list->allocCount = 1;
394 list->useCount = 1;
395 list->entries[0].termFunc = func;
396 list->entries[0].objAddr = objAddr;
397 pthread_setspecific(tlv_terminators_key, list);
398 }
399 else {
400 if ( list->useCount == list->allocCount ) {
401 // handle resizing allocation
402 uint32_t newAllocCount = list->allocCount * 2;
403 size_t newAllocSize = offsetof(struct TLVTerminatorList, entries[newAllocCount]);
404 struct TLVTerminatorList* newlist = (struct TLVTerminatorList*)malloc(newAllocSize);
405 newlist->allocCount = newAllocCount;
406 newlist->useCount = list->useCount;
407 for(uint32_t i=0; i < list->useCount; ++i)
408 newlist->entries[i] = list->entries[i];
409 pthread_setspecific(tlv_terminators_key, newlist);
410 free(list);
411 list = newlist;
412 }
413 // handle appending new entry
414 list->entries[list->useCount].termFunc = func;
415 list->entries[list->useCount].objAddr = objAddr;
416 list->useCount += 1;
417 }
418 }
419
420 // called by pthreads when the current thread is going away and
421 // _tlv_atexit() has been called on the thread.
422 static void tlv_finalize(void* storage)
423 {
424 struct TLVTerminatorList* list = (struct TLVTerminatorList*)storage;
425 // destroy in reverse order of construction
426 for(uint32_t i=list->useCount; i > 0 ; --i) {
427 struct TLVTerminatorListEntry* entry = &list->entries[i-1];
428 if ( entry->termFunc != NULL ) {
429 (*entry->termFunc)(entry->objAddr);
430 }
431 }
432 free(storage);
433 }
434
435 // <rdar://problem/13741816>
436 // called by exit() before it calls cxa_finalize() so that thread_local
437 // objects are destroyed before global objects.
438 void _tlv_exit()
439 {
440 void* termFuncs = pthread_getspecific(tlv_terminators_key);
441 if ( termFuncs != NULL )
442 tlv_finalize(termFuncs);
443 }
444
445
446 __attribute__((visibility("hidden")))
447 void tlv_initializer()
448 {
449 // create pthread key to handle thread_local destructors
450 // NOTE: this key must be allocated before any keys for TLV
451 // so that _pthread_tsd_cleanup will run destructors before deallocation
452 (void)pthread_key_create(&tlv_terminators_key, &tlv_finalize);
453
454 // register with dyld for notification when images are loaded
455 _dyld_register_func_for_add_image(tlv_load_notification);
456
457 }
458
459
460 // linked images with TLV have references to this symbol, but it is never used at runtime
461 void _tlv_bootstrap()
462 {
463 abort();
464 }
465
466
467
468 #else
469
470
471
472 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
473 {
474 }
475
476 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
477 {
478 }
479
480 void _tlv_exit()
481 {
482 }
483
484 void _tlv_atexit(TermFunc func, void* objAddr)
485 {
486 }
487
488 __attribute__((visibility("hidden")))
489 void tlv_initializer()
490 {
491 }
492
493
494
495 #endif // __has_feature(tls)
496
497