]> git.saurik.com Git - apple/dyld.git/blob - src/threadLocalVariables.c
dyld-421.1.tar.gz
[apple/dyld.git] / src / threadLocalVariables.c
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <stddef.h>
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <Block.h>
33 #include <malloc/malloc.h>
34 #include <mach-o/loader.h>
35 #include <libkern/OSAtomic.h>
36
37 #include "dyld_priv.h"
38
39
40 #if __LP64__
41 typedef struct mach_header_64 macho_header;
42 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
43 typedef struct segment_command_64 macho_segment_command;
44 typedef struct section_64 macho_section;
45 #else
46 typedef struct mach_header macho_header;
47 #define LC_SEGMENT_COMMAND LC_SEGMENT
48 typedef struct segment_command macho_segment_command;
49 typedef struct section macho_section;
50 #endif
51
52 #ifndef S_THREAD_LOCAL_REGULAR
53 #define S_THREAD_LOCAL_REGULAR 0x11
54 #endif
55
56 #ifndef S_THREAD_LOCAL_ZEROFILL
57 #define S_THREAD_LOCAL_ZEROFILL 0x12
58 #endif
59
60 #ifndef S_THREAD_LOCAL_VARIABLES
61 #define S_THREAD_LOCAL_VARIABLES 0x13
62 #endif
63
64 #ifndef S_THREAD_LOCAL_VARIABLE_POINTERS
65 #define S_THREAD_LOCAL_VARIABLE_POINTERS 0x14
66 #endif
67
68 #ifndef S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
69 #define S_THREAD_LOCAL_INIT_FUNCTION_POINTERS 0x15
70 #endif
71
72 #ifndef MH_HAS_TLV_DESCRIPTORS
73 #define MH_HAS_TLV_DESCRIPTORS 0x800000
74 #endif
75
76
77 typedef void (*TermFunc)(void*);
78
79
80
81 #if __has_feature(tls) || __arm64__ || __arm__
82
83 typedef struct TLVHandler {
84 struct TLVHandler *next;
85 dyld_tlv_state_change_handler handler;
86 enum dyld_tlv_states state;
87 } TLVHandler;
88
89 // lock-free prepend-only linked list
90 static TLVHandler * volatile tlv_handlers = NULL;
91
92
93 struct TLVDescriptor
94 {
95 void* (*thunk)(struct TLVDescriptor*);
96 unsigned long key;
97 unsigned long offset;
98 };
99 typedef struct TLVDescriptor TLVDescriptor;
100
101
102 // implemented in assembly
103 extern void* tlv_get_addr(TLVDescriptor*);
104
105 struct TLVImageInfo
106 {
107 pthread_key_t key;
108 const struct mach_header* mh;
109 };
110 typedef struct TLVImageInfo TLVImageInfo;
111
112 static TLVImageInfo* tlv_live_images = NULL;
113 static unsigned int tlv_live_image_alloc_count = 0;
114 static unsigned int tlv_live_image_used_count = 0;
115 static pthread_mutex_t tlv_live_image_lock = PTHREAD_MUTEX_INITIALIZER;
116
117 static void tlv_set_key_for_image(const struct mach_header* mh, pthread_key_t key)
118 {
119 pthread_mutex_lock(&tlv_live_image_lock);
120 if ( tlv_live_image_used_count == tlv_live_image_alloc_count ) {
121 unsigned int newCount = (tlv_live_images == NULL) ? 8 : 2*tlv_live_image_alloc_count;
122 struct TLVImageInfo* newBuffer = malloc(sizeof(TLVImageInfo)*newCount);
123 if ( tlv_live_images != NULL ) {
124 memcpy(newBuffer, tlv_live_images, sizeof(TLVImageInfo)*tlv_live_image_used_count);
125 free(tlv_live_images);
126 }
127 tlv_live_images = newBuffer;
128 tlv_live_image_alloc_count = newCount;
129 }
130 tlv_live_images[tlv_live_image_used_count].key = key;
131 tlv_live_images[tlv_live_image_used_count].mh = mh;
132 ++tlv_live_image_used_count;
133 pthread_mutex_unlock(&tlv_live_image_lock);
134 }
135
136 static const struct mach_header* tlv_get_image_for_key(pthread_key_t key)
137 {
138 const struct mach_header* result = NULL;
139 pthread_mutex_lock(&tlv_live_image_lock);
140 for(unsigned int i=0; i < tlv_live_image_used_count; ++i) {
141 if ( tlv_live_images[i].key == key ) {
142 result = tlv_live_images[i].mh;
143 break;
144 }
145 }
146 pthread_mutex_unlock(&tlv_live_image_lock);
147 return result;
148 }
149
150
151 static void
152 tlv_notify(enum dyld_tlv_states state, void *buffer)
153 {
154 if (!tlv_handlers) return;
155
156 // Always use malloc_size() to ensure allocated and deallocated states
157 // send the same size. tlv_free() doesn't have anything else recorded.
158 dyld_tlv_info info = { sizeof(info), buffer, malloc_size(buffer) };
159
160 for (TLVHandler *h = tlv_handlers; h != NULL; h = h->next) {
161 if (h->state == state && h->handler) {
162 h->handler(h->state, &info);
163 }
164 }
165 }
166
167
168 // called lazily when TLV is first accessed
169 __attribute__((visibility("hidden")))
170 void* tlv_allocate_and_initialize_for_key(pthread_key_t key)
171 {
172 const struct mach_header* mh = tlv_get_image_for_key(key);
173 if ( mh == NULL )
174 return NULL; // if data structures are screwed up, don't crash
175
176 // first pass, find size and template
177 uint8_t* start = NULL;
178 unsigned long size = 0;
179 intptr_t slide = 0;
180 bool slideComputed = false;
181 bool hasInitializers = false;
182 const uint32_t cmd_count = mh->ncmds;
183 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
184 const struct load_command* cmd = cmds;
185 for (uint32_t i = 0; i < cmd_count; ++i) {
186 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
187 const macho_segment_command* seg = (macho_segment_command*)cmd;
188 if ( !slideComputed && (seg->filesize != 0) ) {
189 slide = (uintptr_t)mh - seg->vmaddr;
190 slideComputed = true;
191 }
192 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
193 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
194 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
195 switch ( sect->flags & SECTION_TYPE ) {
196 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
197 hasInitializers = true;
198 break;
199 case S_THREAD_LOCAL_ZEROFILL:
200 case S_THREAD_LOCAL_REGULAR:
201 if ( start == NULL ) {
202 // first of N contiguous TLV template sections, record as if this was only section
203 start = (uint8_t*)(sect->addr + slide);
204 size = sect->size;
205 }
206 else {
207 // non-first of N contiguous TLV template sections, accumlate values
208 const uint8_t* newEnd = (uint8_t*)(sect->addr + slide + sect->size);
209 size = newEnd - start;
210 }
211 break;
212 }
213 }
214 }
215 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
216 }
217
218 // allocate buffer and fill with template
219 void* buffer = malloc(size);
220 memcpy(buffer, start, size);
221
222 // set this thread's value for key to be the new buffer.
223 pthread_setspecific(key, buffer);
224
225 // send tlv state notifications
226 tlv_notify(dyld_tlv_state_allocated, buffer);
227
228 // second pass, run initializers
229 if ( hasInitializers ) {
230 cmd = cmds;
231 for (uint32_t i = 0; i < cmd_count; ++i) {
232 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
233 const macho_segment_command* seg = (macho_segment_command*)cmd;
234 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
235 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
236 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
237 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_INIT_FUNCTION_POINTERS ) {
238 typedef void (*InitFunc)(void);
239 InitFunc* funcs = (InitFunc*)(sect->addr + slide);
240 const size_t count = sect->size / sizeof(uintptr_t);
241 for (size_t j=count; j > 0; --j) {
242 InitFunc func = funcs[j-1];
243 func();
244 }
245 }
246 }
247 }
248 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
249 }
250 }
251 return buffer;
252 }
253
254
255 // pthread destructor for TLV storage
256 static void
257 tlv_free(void *storage)
258 {
259 tlv_notify(dyld_tlv_state_deallocated, storage);
260 free(storage);
261 }
262
263
264 // called when image is loaded
265 static void tlv_initialize_descriptors(const struct mach_header* mh)
266 {
267 pthread_key_t key = 0;
268 intptr_t slide = 0;
269 bool slideComputed = false;
270 const uint32_t cmd_count = mh->ncmds;
271 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
272 const struct load_command* cmd = cmds;
273 for (uint32_t i = 0; i < cmd_count; ++i) {
274 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
275 const macho_segment_command* seg = (macho_segment_command*)cmd;
276 if ( !slideComputed && (seg->filesize != 0) ) {
277 slide = (uintptr_t)mh - seg->vmaddr;
278 slideComputed = true;
279 }
280 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
281 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
282 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
283 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_VARIABLES ) {
284 if ( sect->size != 0 ) {
285 // allocate pthread key when we first discover this image has TLVs
286 if ( key == 0 ) {
287 int result = pthread_key_create(&key, &tlv_free);
288 if ( result != 0 )
289 abort();
290 tlv_set_key_for_image(mh, key);
291 }
292 // initialize each descriptor
293 TLVDescriptor* start = (TLVDescriptor*)(sect->addr + slide);
294 TLVDescriptor* end = (TLVDescriptor*)(sect->addr + sect->size + slide);
295 for (TLVDescriptor* d=start; d < end; ++d) {
296 d->thunk = tlv_get_addr;
297 d->key = key;
298 //d->offset = d->offset; // offset unchanged
299 }
300 }
301 }
302 }
303 }
304 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
305 }
306 }
307
308
309 void tlv_load_notification(const struct mach_header* mh, intptr_t slide)
310 {
311 // This is called on all images, even those without TLVs. So we want this to be fast.
312 // The linker sets MH_HAS_TLV_DESCRIPTORS so we don't have to search images just to find the don't have TLVs.
313 if ( mh->flags & MH_HAS_TLV_DESCRIPTORS )
314 tlv_initialize_descriptors(mh);
315 }
316
317
318 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
319 {
320 TLVHandler *h = malloc(sizeof(TLVHandler));
321 h->state = state;
322 h->handler = Block_copy(handler);
323
324 TLVHandler *old;
325 do {
326 old = tlv_handlers;
327 h->next = old;
328 } while (! OSAtomicCompareAndSwapPtrBarrier(old, h, (void * volatile *)&tlv_handlers));
329 }
330
331
332 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
333 {
334 pthread_mutex_lock(&tlv_live_image_lock);
335 unsigned int count = tlv_live_image_used_count;
336 void *list[count];
337 for (unsigned int i = 0; i < count; ++i) {
338 list[i] = pthread_getspecific(tlv_live_images[i].key);
339 }
340 pthread_mutex_unlock(&tlv_live_image_lock);
341
342 for (unsigned int i = 0; i < count; ++i) {
343 if (list[i]) {
344 dyld_tlv_info info = { sizeof(info), list[i], malloc_size(list[i]) };
345 handler(dyld_tlv_state_allocated, &info);
346 }
347 }
348 }
349
350
351 //
352 // thread_local terminators
353 //
354 // C++ 0x allows thread_local C++ objects which have constructors run
355 // on the thread before any use of the object and the object's destructor
356 // is run on the thread when the thread terminates.
357 //
358 // To support this libdyld gets a pthread key early in process start up and
359 // uses tlv_finalize and the key's destructor function. This key must be
360 // allocated before any thread local variables are instantiated because when
361 // a thread is terminated, the pthread package runs the destructor function
362 // on each key's storage values in key allocation order. Since we want
363 // C++ objects to be destructred before they are deallocated, we need the
364 // destructor key to come before the deallocation key.
365 //
366
367 struct TLVTerminatorListEntry
368 {
369 TermFunc termFunc;
370 void* objAddr;
371 };
372
373 struct TLVTerminatorList
374 {
375 uint32_t allocCount;
376 uint32_t useCount;
377 struct TLVTerminatorListEntry entries[1]; // variable length
378 };
379
380
381 static pthread_key_t tlv_terminators_key = 0;
382
383 void _tlv_atexit(TermFunc func, void* objAddr)
384 {
385 // NOTE: this does not need locks because it only operates on current thread data
386 struct TLVTerminatorList* list = (struct TLVTerminatorList*)pthread_getspecific(tlv_terminators_key);
387 if ( list == NULL ) {
388 // handle first allocation
389 list = (struct TLVTerminatorList*)malloc(offsetof(struct TLVTerminatorList, entries[1]));
390 list->allocCount = 1;
391 list->useCount = 1;
392 list->entries[0].termFunc = func;
393 list->entries[0].objAddr = objAddr;
394 pthread_setspecific(tlv_terminators_key, list);
395 }
396 else {
397 if ( list->allocCount == list->allocCount ) {
398 // handle resizing allocation
399 uint32_t newAllocCount = list->allocCount * 2;
400 size_t newAllocSize = offsetof(struct TLVTerminatorList, entries[newAllocCount]);
401 struct TLVTerminatorList* newlist = (struct TLVTerminatorList*)malloc(newAllocSize);
402 newlist->allocCount = newAllocCount;
403 newlist->useCount = list->useCount;
404 for(uint32_t i=0; i < list->useCount; ++i)
405 newlist->entries[i] = list->entries[i];
406 pthread_setspecific(tlv_terminators_key, newlist);
407 free(list);
408 list = newlist;
409 }
410 // handle appending new entry
411 list->entries[list->useCount].termFunc = func;
412 list->entries[list->useCount].objAddr = objAddr;
413 list->useCount += 1;
414 }
415 }
416
417 // called by pthreads when the current thread is going away and
418 // _tlv_atexit() has been called on the thread.
419 static void tlv_finalize(void* storage)
420 {
421 struct TLVTerminatorList* list = (struct TLVTerminatorList*)storage;
422 // destroy in reverse order of construction
423 for(uint32_t i=list->useCount; i > 0 ; --i) {
424 struct TLVTerminatorListEntry* entry = &list->entries[i-1];
425 if ( entry->termFunc != NULL ) {
426 (*entry->termFunc)(entry->objAddr);
427 }
428 }
429 free(storage);
430 }
431
432 // <rdar://problem/13741816>
433 // called by exit() before it calls cxa_finalize() so that thread_local
434 // objects are destroyed before global objects.
435 void _tlv_exit()
436 {
437 void* termFuncs = pthread_getspecific(tlv_terminators_key);
438 if ( termFuncs != NULL )
439 tlv_finalize(termFuncs);
440 }
441
442
443 __attribute__((visibility("hidden")))
444 void tlv_initializer()
445 {
446 // create pthread key to handle thread_local destructors
447 // NOTE: this key must be allocated before any keys for TLV
448 // so that _pthread_tsd_cleanup will run destructors before deallocation
449 (void)pthread_key_create(&tlv_terminators_key, &tlv_finalize);
450
451 // register with dyld for notification when images are loaded
452 _dyld_register_func_for_add_image(tlv_load_notification);
453
454 }
455
456
457 // linked images with TLV have references to this symbol, but it is never used at runtime
458 void _tlv_bootstrap()
459 {
460 abort();
461 }
462
463
464
465 #else
466
467
468
469 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
470 {
471 }
472
473 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
474 {
475 }
476
477 void _tlv_exit()
478 {
479 }
480
481 void _tlv_atexit(TermFunc func, void* objAddr)
482 {
483 }
484
485 __attribute__((visibility("hidden")))
486 void tlv_initializer()
487 {
488 }
489
490
491
492 #endif // __has_feature(tls)
493
494