]> git.saurik.com Git - apple/dyld.git/blob - src/threadLocalVariables.c
dyld-239.3.tar.gz
[apple/dyld.git] / src / threadLocalVariables.c
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <stddef.h>
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <Block.h>
33 #include <malloc/malloc.h>
34 #include <mach-o/loader.h>
35 #include <libkern/OSAtomic.h>
36
37 #include "dyld_priv.h"
38
39
40 #if __LP64__
41 typedef struct mach_header_64 macho_header;
42 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
43 typedef struct segment_command_64 macho_segment_command;
44 typedef struct section_64 macho_section;
45 #else
46 typedef struct mach_header macho_header;
47 #define LC_SEGMENT_COMMAND LC_SEGMENT
48 typedef struct segment_command macho_segment_command;
49 typedef struct section macho_section;
50 #endif
51
52 #ifndef S_THREAD_LOCAL_REGULAR
53 #define S_THREAD_LOCAL_REGULAR 0x11
54 #endif
55
56 #ifndef S_THREAD_LOCAL_ZEROFILL
57 #define S_THREAD_LOCAL_ZEROFILL 0x12
58 #endif
59
60 #ifndef S_THREAD_LOCAL_VARIABLES
61 #define S_THREAD_LOCAL_VARIABLES 0x13
62 #endif
63
64 #ifndef S_THREAD_LOCAL_VARIABLE_POINTERS
65 #define S_THREAD_LOCAL_VARIABLE_POINTERS 0x14
66 #endif
67
68 #ifndef S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
69 #define S_THREAD_LOCAL_INIT_FUNCTION_POINTERS 0x15
70 #endif
71
72 #ifndef MH_HAS_TLV_DESCRIPTORS
73 #define MH_HAS_TLV_DESCRIPTORS 0x800000
74 #endif
75
76
77 typedef void (*TermFunc)(void*);
78
79
80
81 #if __has_feature(tls)
82
83 typedef struct TLVHandler {
84 struct TLVHandler *next;
85 dyld_tlv_state_change_handler handler;
86 enum dyld_tlv_states state;
87 } TLVHandler;
88
89 // lock-free prepend-only linked list
90 static TLVHandler * volatile tlv_handlers = NULL;
91
92
93 struct TLVDescriptor
94 {
95 void* (*thunk)(struct TLVDescriptor*);
96 unsigned long key;
97 unsigned long offset;
98 };
99 typedef struct TLVDescriptor TLVDescriptor;
100
101
102 // implemented in assembly
103 extern void* tlv_get_addr(TLVDescriptor*);
104
105 struct TLVImageInfo
106 {
107 pthread_key_t key;
108 const struct mach_header* mh;
109 };
110 typedef struct TLVImageInfo TLVImageInfo;
111
112 static TLVImageInfo* tlv_live_images = NULL;
113 static unsigned int tlv_live_image_alloc_count = 0;
114 static unsigned int tlv_live_image_used_count = 0;
115 static pthread_mutex_t tlv_live_image_lock = PTHREAD_MUTEX_INITIALIZER;
116
117 static void tlv_set_key_for_image(const struct mach_header* mh, pthread_key_t key)
118 {
119 pthread_mutex_lock(&tlv_live_image_lock);
120 if ( tlv_live_image_used_count == tlv_live_image_alloc_count ) {
121 unsigned int newCount = (tlv_live_images == NULL) ? 8 : 2*tlv_live_image_alloc_count;
122 struct TLVImageInfo* newBuffer = malloc(sizeof(TLVImageInfo)*newCount);
123 if ( tlv_live_images != NULL ) {
124 memcpy(newBuffer, tlv_live_images, sizeof(TLVImageInfo)*tlv_live_image_used_count);
125 free(tlv_live_images);
126 }
127 tlv_live_images = newBuffer;
128 tlv_live_image_alloc_count = newCount;
129 }
130 tlv_live_images[tlv_live_image_used_count].key = key;
131 tlv_live_images[tlv_live_image_used_count].mh = mh;
132 ++tlv_live_image_used_count;
133 pthread_mutex_unlock(&tlv_live_image_lock);
134 }
135
136 static const struct mach_header* tlv_get_image_for_key(pthread_key_t key)
137 {
138 const struct mach_header* result = NULL;
139 pthread_mutex_lock(&tlv_live_image_lock);
140 for(unsigned int i=0; i < tlv_live_image_used_count; ++i) {
141 if ( tlv_live_images[i].key == key ) {
142 result = tlv_live_images[i].mh;
143 break;
144 }
145 }
146 pthread_mutex_unlock(&tlv_live_image_lock);
147 return result;
148 }
149
150
151 static void
152 tlv_notify(enum dyld_tlv_states state, void *buffer)
153 {
154 if (!tlv_handlers) return;
155
156 // Always use malloc_size() to ensure allocated and deallocated states
157 // send the same size. tlv_free() doesn't have anything else recorded.
158 dyld_tlv_info info = { sizeof(info), buffer, malloc_size(buffer) };
159
160 for (TLVHandler *h = tlv_handlers; h != NULL; h = h->next) {
161 if (h->state == state && h->handler) {
162 h->handler(h->state, &info);
163 }
164 }
165 }
166
167
168 // called lazily when TLV is first accessed
169 __attribute__((visibility("hidden")))
170 void* tlv_allocate_and_initialize_for_key(pthread_key_t key)
171 {
172 const struct mach_header* mh = tlv_get_image_for_key(key);
173 if ( mh == NULL )
174 return NULL; // if data structures are screwed up, don't crash
175
176 // first pass, find size and template
177 uint8_t* start = NULL;
178 unsigned long size = 0;
179 intptr_t slide = 0;
180 bool slideComputed = false;
181 bool hasInitializers = false;
182 const uint32_t cmd_count = mh->ncmds;
183 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
184 const struct load_command* cmd = cmds;
185 for (uint32_t i = 0; i < cmd_count; ++i) {
186 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
187 const macho_segment_command* seg = (macho_segment_command*)cmd;
188 if ( !slideComputed && (seg->filesize != 0) ) {
189 slide = (uintptr_t)mh - seg->vmaddr;
190 slideComputed = true;
191 }
192 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
193 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
194 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
195 switch ( sect->flags & SECTION_TYPE ) {
196 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
197 hasInitializers = true;
198 break;
199 case S_THREAD_LOCAL_ZEROFILL:
200 case S_THREAD_LOCAL_REGULAR:
201 if ( start == NULL ) {
202 // first of N contiguous TLV template sections, record as if this was only section
203 start = (uint8_t*)(sect->addr + slide);
204 size = sect->size;
205 }
206 else {
207 // non-first of N contiguous TLV template sections, accumlate values
208 const uint8_t* newEnd = (uint8_t*)(sect->addr + slide + sect->size);
209 size = newEnd - start;
210 }
211 break;
212 }
213 }
214 }
215 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
216 }
217
218 // allocate buffer and fill with template
219 void* buffer = malloc(size);
220 memcpy(buffer, start, size);
221
222 // set this thread's value for key to be the new buffer.
223 pthread_setspecific(key, buffer);
224
225 // send tlv state notifications
226 tlv_notify(dyld_tlv_state_allocated, buffer);
227
228 // second pass, run initializers
229 if ( hasInitializers ) {
230 cmd = cmds;
231 for (uint32_t i = 0; i < cmd_count; ++i) {
232 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
233 const macho_segment_command* seg = (macho_segment_command*)cmd;
234 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
235 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
236 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
237 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_INIT_FUNCTION_POINTERS ) {
238 typedef void (*InitFunc)(void);
239 InitFunc* funcs = (InitFunc*)(sect->addr + slide);
240 const uint32_t count = sect->size / sizeof(uintptr_t);
241 for (uint32_t i=count; i > 0; --i) {
242 InitFunc func = funcs[i-1];
243 func();
244 }
245 }
246 }
247 }
248 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
249 }
250 }
251 return buffer;
252 }
253
254
255 // pthread destructor for TLV storage
256 static void
257 tlv_free(void *storage)
258 {
259 tlv_notify(dyld_tlv_state_deallocated, storage);
260 free(storage);
261 }
262
263
264 // called when image is loaded
265 static void tlv_initialize_descriptors(const struct mach_header* mh)
266 {
267 pthread_key_t key = 0;
268 intptr_t slide = 0;
269 bool slideComputed = false;
270 const uint32_t cmd_count = mh->ncmds;
271 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
272 const struct load_command* cmd = cmds;
273 for (uint32_t i = 0; i < cmd_count; ++i) {
274 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
275 const macho_segment_command* seg = (macho_segment_command*)cmd;
276 if ( !slideComputed && (seg->filesize != 0) ) {
277 slide = (uintptr_t)mh - seg->vmaddr;
278 slideComputed = true;
279 }
280 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
281 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
282 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
283 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_VARIABLES ) {
284 if ( sect->size != 0 ) {
285 // allocate pthread key when we first discover this image has TLVs
286 if ( key == 0 ) {
287 int result = pthread_key_create(&key, &tlv_free);
288 if ( result != 0 )
289 abort();
290 tlv_set_key_for_image(mh, key);
291 }
292 // initialize each descriptor
293 TLVDescriptor* start = (TLVDescriptor*)(sect->addr + slide);
294 TLVDescriptor* end = (TLVDescriptor*)(sect->addr + sect->size + slide);
295 for (TLVDescriptor* d=start; d < end; ++d) {
296 d->thunk = tlv_get_addr;
297 d->key = key;
298 //d->offset = d->offset; // offset unchanged
299 }
300 }
301 }
302 }
303 }
304 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
305 }
306 }
307
308 // called by dyld when a image is loaded
309 static const char* tlv_load_notification(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info info[])
310 {
311 // this is called on all images, even those without TLVs, so we want
312 // this to be fast. The linker sets MH_HAS_TLV_DESCRIPTORS so we don't
313 // have to search images just to find the don't have TLVs.
314 for (uint32_t i=0; i < infoCount; ++i) {
315 if ( info[i].imageLoadAddress->flags & MH_HAS_TLV_DESCRIPTORS )
316 tlv_initialize_descriptors(info[i].imageLoadAddress);
317 }
318 return NULL;
319 }
320
321
322 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
323 {
324 TLVHandler *h = malloc(sizeof(TLVHandler));
325 h->state = state;
326 h->handler = Block_copy(handler);
327
328 TLVHandler *old;
329 do {
330 old = tlv_handlers;
331 h->next = old;
332 } while (! OSAtomicCompareAndSwapPtrBarrier(old, h, (void * volatile *)&tlv_handlers));
333 }
334
335
336 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
337 {
338 pthread_mutex_lock(&tlv_live_image_lock);
339 unsigned int count = tlv_live_image_used_count;
340 void *list[count];
341 for (unsigned int i = 0; i < count; ++i) {
342 list[i] = pthread_getspecific(tlv_live_images[i].key);
343 }
344 pthread_mutex_unlock(&tlv_live_image_lock);
345
346 for (unsigned int i = 0; i < count; ++i) {
347 if (list[i]) {
348 dyld_tlv_info info = { sizeof(info), list[i], malloc_size(list[i]) };
349 handler(dyld_tlv_state_allocated, &info);
350 }
351 }
352 }
353
354
355 //
356 // thread_local terminators
357 //
358 // C++ 0x allows thread_local C++ objects which have constructors run
359 // on the thread before any use of the object and the object's destructor
360 // is run on the thread when the thread terminates.
361 //
362 // To support this libdyld gets a pthread key early in process start up and
363 // uses tlv_finalize and the key's destructor function. This key must be
364 // allocated before any thread local variables are instantiated because when
365 // a thread is terminated, the pthread package runs the destructor function
366 // on each key's storage values in key allocation order. Since we want
367 // C++ objects to be destructred before they are deallocated, we need the
368 // destructor key to come before the deallocation key.
369 //
370
371 struct TLVTerminatorListEntry
372 {
373 TermFunc termFunc;
374 void* objAddr;
375 };
376
377 struct TLVTerminatorList
378 {
379 uint32_t allocCount;
380 uint32_t useCount;
381 struct TLVTerminatorListEntry entries[1]; // variable length
382 };
383
384
385 static pthread_key_t tlv_terminators_key = 0;
386
387 void _tlv_atexit(TermFunc func, void* objAddr)
388 {
389 // NOTE: this does not need locks because it only operates on current thread data
390 struct TLVTerminatorList* list = (struct TLVTerminatorList*)pthread_getspecific(tlv_terminators_key);
391 if ( list == NULL ) {
392 // handle first allocation
393 list = (struct TLVTerminatorList*)malloc(offsetof(struct TLVTerminatorList, entries[1]));
394 list->allocCount = 1;
395 list->useCount = 1;
396 list->entries[0].termFunc = func;
397 list->entries[0].objAddr = objAddr;
398 pthread_setspecific(tlv_terminators_key, list);
399 }
400 else {
401 if ( list->allocCount == list->allocCount ) {
402 // handle resizing allocation
403 uint32_t newAllocCount = list->allocCount * 2;
404 uint32_t newAllocSize = offsetof(struct TLVTerminatorList, entries[newAllocCount]);
405 struct TLVTerminatorList* newlist = (struct TLVTerminatorList*)malloc(newAllocSize);
406 newlist->allocCount = newAllocCount;
407 newlist->useCount = list->useCount;
408 for(uint32_t i=0; i < list->useCount; ++i)
409 newlist->entries[i] = list->entries[i];
410 pthread_setspecific(tlv_terminators_key, newlist);
411 free(list);
412 list = newlist;
413 }
414 // handle appending new entry
415 list->entries[list->useCount].termFunc = func;
416 list->entries[list->useCount].objAddr = objAddr;
417 list->useCount += 1;
418 }
419 }
420
421 // called by pthreads when the current thread is going away and
422 // _tlv_atexit() has been called on the thread.
423 static void tlv_finalize(void* storage)
424 {
425 struct TLVTerminatorList* list = (struct TLVTerminatorList*)storage;
426 // destroy in reverse order of construction
427 for(uint32_t i=list->useCount; i > 0 ; --i) {
428 struct TLVTerminatorListEntry* entry = &list->entries[i-1];
429 if ( entry->termFunc != NULL ) {
430 (*entry->termFunc)(entry->objAddr);
431 }
432 }
433 free(storage);
434 }
435
436 // <rdar://problem/13741816>
437 // called by exit() before it calls cxa_finalize() so that thread_local
438 // objects are destroyed before global objects.
439 void _tlv_exit()
440 {
441 void* termFuncs = pthread_getspecific(tlv_terminators_key);
442 if ( termFuncs != NULL )
443 tlv_finalize(termFuncs);
444 }
445
446
447 __attribute__((visibility("hidden")))
448 void tlv_initializer()
449 {
450 // create pthread key to handle thread_local destructors
451 // NOTE: this key must be allocated before any keys for TLV
452 // so that _pthread_tsd_cleanup will run destructors before deallocation
453 (void)pthread_key_create(&tlv_terminators_key, &tlv_finalize);
454
455 // register with dyld for notification when images are loaded
456 dyld_register_image_state_change_handler(dyld_image_state_bound, true, tlv_load_notification);
457 }
458
459
460 // linked images with TLV have references to this symbol, but it is never used at runtime
461 void _tlv_bootstrap()
462 {
463 abort();
464 }
465
466
467
468 #else
469
470
471
472 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
473 {
474 }
475
476 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
477 {
478 }
479
480 void _tlv_exit()
481 {
482 }
483
484 void _tlv_atexit(TermFunc func, void* objAddr)
485 {
486 }
487
488 __attribute__((visibility("hidden")))
489 void tlv_initializer()
490 {
491 }
492
493
494
495 #endif // __has_feature(tls)
496
497