]> git.saurik.com Git - apple/dyld.git/blame - src/threadLocalVariables.c
dyld-195.5.tar.gz
[apple/dyld.git] / src / threadLocalVariables.c
CommitLineData
412ebb8e
A
1/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25
26#include <stdlib.h>
27#include <stdint.h>
28#include <string.h>
29#include <stddef.h>
30#include <stdio.h>
31#include <pthread.h>
32#include <Block.h>
33#include <malloc/malloc.h>
34#include <mach-o/loader.h>
35#include <libkern/OSAtomic.h>
36
37#include "dyld_priv.h"
38
39
40#if __LP64__
41 typedef struct mach_header_64 macho_header;
42 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
43 typedef struct segment_command_64 macho_segment_command;
44 typedef struct section_64 macho_section;
45#else
46 typedef struct mach_header macho_header;
47 #define LC_SEGMENT_COMMAND LC_SEGMENT
48 typedef struct segment_command macho_segment_command;
49 typedef struct section macho_section;
50#endif
51
52#ifndef S_THREAD_LOCAL_REGULAR
53#define S_THREAD_LOCAL_REGULAR 0x11
54#endif
55
56#ifndef S_THREAD_LOCAL_ZEROFILL
57#define S_THREAD_LOCAL_ZEROFILL 0x12
58#endif
59
60#ifndef S_THREAD_LOCAL_VARIABLES
61#define S_THREAD_LOCAL_VARIABLES 0x13
62#endif
63
64#ifndef S_THREAD_LOCAL_VARIABLE_POINTERS
65#define S_THREAD_LOCAL_VARIABLE_POINTERS 0x14
66#endif
67
68#ifndef S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
69#define S_THREAD_LOCAL_INIT_FUNCTION_POINTERS 0x15
70#endif
71
72#ifndef MH_HAS_TLV_DESCRIPTORS
73 #define MH_HAS_TLV_DESCRIPTORS 0x800000
74#endif
75
76#if __i386__ || __x86_64__
77
78typedef struct TLVHandler {
79 struct TLVHandler *next;
80 dyld_tlv_state_change_handler handler;
81 enum dyld_tlv_states state;
82} TLVHandler;
83
84// lock-free prepend-only linked list
85static TLVHandler * volatile tlv_handlers = NULL;
86
87
88struct TLVDescriptor
89{
90 void* (*thunk)(struct TLVDescriptor*);
91 unsigned long key;
92 unsigned long offset;
93};
94typedef struct TLVDescriptor TLVDescriptor;
95
96
97// implemented in assembly
98extern void* tlv_get_addr(TLVDescriptor*);
99
100struct TLVImageInfo
101{
102 pthread_key_t key;
103 const struct mach_header* mh;
104};
105typedef struct TLVImageInfo TLVImageInfo;
106
107static TLVImageInfo* tlv_live_images = NULL;
108static unsigned int tlv_live_image_alloc_count = 0;
109static unsigned int tlv_live_image_used_count = 0;
110static pthread_mutex_t tlv_live_image_lock = PTHREAD_MUTEX_INITIALIZER;
111
112static void tlv_set_key_for_image(const struct mach_header* mh, pthread_key_t key)
113{
114 pthread_mutex_lock(&tlv_live_image_lock);
115 if ( tlv_live_image_used_count == tlv_live_image_alloc_count ) {
116 unsigned int newCount = (tlv_live_images == NULL) ? 8 : 2*tlv_live_image_alloc_count;
117 struct TLVImageInfo* newBuffer = malloc(sizeof(TLVImageInfo)*newCount);
118 if ( tlv_live_images != NULL ) {
119 memcpy(newBuffer, tlv_live_images, sizeof(TLVImageInfo)*tlv_live_image_used_count);
120 free(tlv_live_images);
121 }
122 tlv_live_images = newBuffer;
123 tlv_live_image_alloc_count = newCount;
124 }
125 tlv_live_images[tlv_live_image_used_count].key = key;
126 tlv_live_images[tlv_live_image_used_count].mh = mh;
127 ++tlv_live_image_used_count;
128 pthread_mutex_unlock(&tlv_live_image_lock);
129}
130
131static const struct mach_header* tlv_get_image_for_key(pthread_key_t key)
132{
133 const struct mach_header* result = NULL;
134 pthread_mutex_lock(&tlv_live_image_lock);
135 for(unsigned int i=0; i < tlv_live_image_used_count; ++i) {
136 if ( tlv_live_images[i].key == key ) {
137 result = tlv_live_images[i].mh;
138 break;
139 }
140 }
141 pthread_mutex_unlock(&tlv_live_image_lock);
142 return result;
143}
144
145
146static void
147tlv_notify(enum dyld_tlv_states state, void *buffer)
148{
149 if (!tlv_handlers) return;
150
151 // Always use malloc_size() to ensure allocated and deallocated states
152 // send the same size. tlv_free() doesn't have anything else recorded.
153 dyld_tlv_info info = { sizeof(info), buffer, malloc_size(buffer) };
154
155 for (TLVHandler *h = tlv_handlers; h != NULL; h = h->next) {
156 if (h->state == state && h->handler) {
157 h->handler(h->state, &info);
158 }
159 }
160}
161
162
163// called lazily when TLV is first accessed
164__attribute__((visibility("hidden")))
165void* tlv_allocate_and_initialize_for_key(pthread_key_t key)
166{
167 const struct mach_header* mh = tlv_get_image_for_key(key);
168 // first pass, find size and template
169 uint8_t* start = NULL;
170 unsigned long size;
171 intptr_t slide = 0;
172 bool slideComputed = false;
173 bool hasInitializers = false;
174 const uint32_t cmd_count = mh->ncmds;
175 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
176 const struct load_command* cmd = cmds;
177 for (uint32_t i = 0; i < cmd_count; ++i) {
178 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
179 const macho_segment_command* seg = (macho_segment_command*)cmd;
180 if ( !slideComputed && (seg->filesize != 0) ) {
181 slide = (uintptr_t)mh - seg->vmaddr;
182 slideComputed = true;
183 }
184 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
185 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
186 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
187 switch ( sect->flags & SECTION_TYPE ) {
188 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS:
189 hasInitializers = true;
190 break;
191 case S_THREAD_LOCAL_ZEROFILL:
192 case S_THREAD_LOCAL_REGULAR:
193 if ( start == NULL ) {
194 // first of N contiguous TLV template sections, record as if this was only section
195 start = (uint8_t*)(sect->addr + slide);
196 size = sect->size;
197 }
198 else {
199 // non-first of N contiguous TLV template sections, accumlate values
200 const uint8_t* newEnd = (uint8_t*)(sect->addr + slide + sect->size);
201 size = newEnd - start;
202 }
203 break;
204 }
205 }
206 }
207 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
208 }
209
210 // allocate buffer and fill with template
211 void* buffer = malloc(size);
212 memcpy(buffer, start, size);
213
214 // set this thread's value for key to be the new buffer.
215 pthread_setspecific(key, buffer);
216
217 // send tlv state notifications
218 tlv_notify(dyld_tlv_state_allocated, buffer);
219
220 // second pass, run initializers
221 if ( hasInitializers ) {
222 cmd = cmds;
223 for (uint32_t i = 0; i < cmd_count; ++i) {
224 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
225 const macho_segment_command* seg = (macho_segment_command*)cmd;
226 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
227 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
228 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
229 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_INIT_FUNCTION_POINTERS ) {
230 typedef void (*InitFunc)(void);
231 InitFunc* funcs = (InitFunc*)(sect->addr + slide);
232 const uint32_t count = sect->size / sizeof(uintptr_t);
233 for (uint32_t i=count; i > 0; --i) {
234 InitFunc func = funcs[i-1];
235 func();
236 }
237 }
238 }
239 }
240 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
241 }
242 }
243 return buffer;
244}
245
246
247// pthread destructor for TLV storage
248static void
249tlv_free(void *storage)
250{
251 tlv_notify(dyld_tlv_state_deallocated, storage);
252 free(storage);
253}
254
255
256// called when image is loaded
257static void tlv_initialize_descriptors(const struct mach_header* mh)
258{
259 pthread_key_t key = 0;
260 intptr_t slide = 0;
261 bool slideComputed = false;
262 const uint32_t cmd_count = mh->ncmds;
263 const struct load_command* const cmds = (struct load_command*)(((uint8_t*)mh) + sizeof(macho_header));
264 const struct load_command* cmd = cmds;
265 for (uint32_t i = 0; i < cmd_count; ++i) {
266 if ( cmd->cmd == LC_SEGMENT_COMMAND) {
267 const macho_segment_command* seg = (macho_segment_command*)cmd;
268 if ( !slideComputed && (seg->filesize != 0) ) {
269 slide = (uintptr_t)mh - seg->vmaddr;
270 slideComputed = true;
271 }
272 const macho_section* const sectionsStart = (macho_section*)((char*)seg + sizeof(macho_segment_command));
273 const macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
274 for (const macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
275 if ( (sect->flags & SECTION_TYPE) == S_THREAD_LOCAL_VARIABLES ) {
276 if ( sect->size != 0 ) {
277 // allocate pthread key when we first discover this image has TLVs
278 if ( key == 0 ) {
279 int result = pthread_key_create(&key, &tlv_free);
280 if ( result != 0 )
281 abort();
282 tlv_set_key_for_image(mh, key);
283 }
284 // initialize each descriptor
285 TLVDescriptor* start = (TLVDescriptor*)(sect->addr + slide);
286 TLVDescriptor* end = (TLVDescriptor*)(sect->addr + sect->size + slide);
287 for (TLVDescriptor* d=start; d < end; ++d) {
288 d->thunk = tlv_get_addr;
289 d->key = key;
290 //d->offset = d->offset; // offset unchanged
291 }
292 }
293 }
294 }
295 }
296 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
297 }
298}
299
300// called by dyld when a image is loaded
301static const char* tlv_load_notification(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info info[])
302{
303 // this is called on all images, even those without TLVs, so we want
304 // this to be fast. The linker sets MH_HAS_TLV_DESCRIPTORS so we don't
305 // have to search images just to find the don't have TLVs.
306 for (uint32_t i=0; i < infoCount; ++i) {
307 if ( info[i].imageLoadAddress->flags & MH_HAS_TLV_DESCRIPTORS )
308 tlv_initialize_descriptors(info[i].imageLoadAddress);
309 }
310 return NULL;
311}
312
313
314void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
315{
316 TLVHandler *h = malloc(sizeof(TLVHandler));
317 h->state = state;
318 h->handler = Block_copy(handler);
319
320 TLVHandler *old;
321 do {
322 old = tlv_handlers;
323 h->next = old;
324 } while (! OSAtomicCompareAndSwapPtrBarrier(old, h, (void * volatile *)&tlv_handlers));
325}
326
327
328void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
329{
330 pthread_mutex_lock(&tlv_live_image_lock);
331 unsigned int count = tlv_live_image_used_count;
332 void *list[count];
333 for (unsigned int i = 0; i < count; ++i) {
334 list[i] = pthread_getspecific(tlv_live_images[i].key);
335 }
336 pthread_mutex_unlock(&tlv_live_image_lock);
337
338 for (unsigned int i = 0; i < count; ++i) {
339 if (list[i]) {
340 dyld_tlv_info info = { sizeof(info), list[i], malloc_size(list[i]) };
341 handler(dyld_tlv_state_allocated, &info);
342 }
343 }
344}
345
346
347//
348// thread_local terminators
349//
350// C++ 0x allows thread_local C++ objects which have constructors run
351// on the thread before any use of the object and the object's destructor
352// is run on the thread when the thread terminates.
353//
354// To support this libdyld gets a pthread key early in process start up and
355// uses tlv_finalize and the key's destructor function. This key must be
356// allocated before any thread local variables are instantiated because when
357// a thread is terminated, the pthread package runs the destructor function
358// on each key's storage values in key allocation order. Since we want
359// C++ objects to be destructred before they are deallocated, we need the
360// destructor key to come before the deallocation key.
361//
362
363typedef void (*TermFunc)(void*);
364struct TLVTerminatorListEntry
365{
366 TermFunc termFunc;
367 void* objAddr;
368};
369
370struct TLVTerminatorList
371{
372 uint32_t allocCount;
373 uint32_t useCount;
374 struct TLVTerminatorListEntry entries[1]; // variable length
375};
376
377
378static pthread_key_t tlv_terminators_key = 0;
379
380void _tlv_atexit(TermFunc func, void* objAddr)
381{
382 // NOTE: this does not need locks because it only operates on current thread data
383 struct TLVTerminatorList* list = (struct TLVTerminatorList*)pthread_getspecific(tlv_terminators_key);
384 if ( list == NULL ) {
385 // handle first allocation
386 list = (struct TLVTerminatorList*)malloc(offsetof(struct TLVTerminatorList, entries[1]));
387 list->allocCount = 1;
388 list->useCount = 1;
389 list->entries[0].termFunc = func;
390 list->entries[0].objAddr = objAddr;
391 pthread_setspecific(tlv_terminators_key, list);
392 }
393 else {
394 if ( list->allocCount == list->allocCount ) {
395 // handle resizing allocation
396 uint32_t newAllocCount = list->allocCount * 2;
397 uint32_t newAllocSize = offsetof(struct TLVTerminatorList, entries[newAllocCount]);
398 struct TLVTerminatorList* newlist = (struct TLVTerminatorList*)malloc(newAllocSize);
399 newlist->allocCount = newAllocCount;
400 newlist->useCount = list->useCount;
401 for(uint32_t i=0; i < list->useCount; ++i)
402 newlist->entries[i] = list->entries[i];
403 pthread_setspecific(tlv_terminators_key, newlist);
404 free(list);
405 list = newlist;
406 }
407 // handle appending new entry
408 list->entries[list->useCount].termFunc = func;
409 list->entries[list->useCount].objAddr = objAddr;
410 list->useCount += 1;
411 }
412}
413
414// called by pthreads when the current thread is going way and
415// _tlv_atexit() has been called on the thread.
416static void tlv_finalize(void* storage)
417{
418 struct TLVTerminatorList* list = (struct TLVTerminatorList*)storage;
419 for(uint32_t i=0; i < list->useCount; ++i) {
420 struct TLVTerminatorListEntry* entry = &list->entries[i];
421 if ( entry->termFunc != NULL ) {
422 (*entry->termFunc)(entry->objAddr);
423 }
424 }
425 free(storage);
426}
427
428
429__attribute__((visibility("hidden")))
430void tlv_initializer()
431{
432 // create pthread key to handle thread_local destructors
433 // NOTE: this key must be allocated before any keys for TLV
434 // so that _pthread_tsd_cleanup will run destructors before deallocation
435 (void)pthread_key_create(&tlv_terminators_key, &tlv_finalize);
436
437 // register with dyld for notification when images are loaded
438 dyld_register_image_state_change_handler(dyld_image_state_bound, true, tlv_load_notification);
439}
440
441
442// linked images with TLV have references to this symbol, but it is never used at runtime
443void _tlv_bootstrap()
444{
445 abort();
446}
447
448
449// __i386__ || __x86_64__
450#else
451// !(__i386__ || __x86_64__)
452
453
454void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state, dyld_tlv_state_change_handler handler)
455{
456}
457
458void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler)
459{
460}
461
462__attribute__((visibility("hidden")))
463void tlv_initializer()
464{
465}
466
467
468// !(__i386__ || __x86_64__)
469#endif
470
471