1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
33 #include <malloc/malloc.h>
34 #include <mach-o/loader.h>
35 #include <libkern/OSAtomic.h>
37 #include "dyld_priv.h"
41 typedef struct mach_header_64 macho_header
;
42 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
43 typedef struct segment_command_64 macho_segment_command
;
44 typedef struct section_64 macho_section
;
46 typedef struct mach_header macho_header
;
47 #define LC_SEGMENT_COMMAND LC_SEGMENT
48 typedef struct segment_command macho_segment_command
;
49 typedef struct section macho_section
;
52 #ifndef S_THREAD_LOCAL_REGULAR
53 #define S_THREAD_LOCAL_REGULAR 0x11
56 #ifndef S_THREAD_LOCAL_ZEROFILL
57 #define S_THREAD_LOCAL_ZEROFILL 0x12
60 #ifndef S_THREAD_LOCAL_VARIABLES
61 #define S_THREAD_LOCAL_VARIABLES 0x13
64 #ifndef S_THREAD_LOCAL_VARIABLE_POINTERS
65 #define S_THREAD_LOCAL_VARIABLE_POINTERS 0x14
68 #ifndef S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
69 #define S_THREAD_LOCAL_INIT_FUNCTION_POINTERS 0x15
72 #ifndef MH_HAS_TLV_DESCRIPTORS
73 #define MH_HAS_TLV_DESCRIPTORS 0x800000
76 #if __i386__ || __x86_64__
78 typedef struct TLVHandler
{
79 struct TLVHandler
*next
;
80 dyld_tlv_state_change_handler handler
;
81 enum dyld_tlv_states state
;
84 // lock-free prepend-only linked list
85 static TLVHandler
* volatile tlv_handlers
= NULL
;
90 void* (*thunk
)(struct TLVDescriptor
*);
94 typedef struct TLVDescriptor TLVDescriptor
;
97 // implemented in assembly
98 extern void* tlv_get_addr(TLVDescriptor
*);
103 const struct mach_header
* mh
;
105 typedef struct TLVImageInfo TLVImageInfo
;
107 static TLVImageInfo
* tlv_live_images
= NULL
;
108 static unsigned int tlv_live_image_alloc_count
= 0;
109 static unsigned int tlv_live_image_used_count
= 0;
110 static pthread_mutex_t tlv_live_image_lock
= PTHREAD_MUTEX_INITIALIZER
;
112 static void tlv_set_key_for_image(const struct mach_header
* mh
, pthread_key_t key
)
114 pthread_mutex_lock(&tlv_live_image_lock
);
115 if ( tlv_live_image_used_count
== tlv_live_image_alloc_count
) {
116 unsigned int newCount
= (tlv_live_images
== NULL
) ? 8 : 2*tlv_live_image_alloc_count
;
117 struct TLVImageInfo
* newBuffer
= malloc(sizeof(TLVImageInfo
)*newCount
);
118 if ( tlv_live_images
!= NULL
) {
119 memcpy(newBuffer
, tlv_live_images
, sizeof(TLVImageInfo
)*tlv_live_image_used_count
);
120 free(tlv_live_images
);
122 tlv_live_images
= newBuffer
;
123 tlv_live_image_alloc_count
= newCount
;
125 tlv_live_images
[tlv_live_image_used_count
].key
= key
;
126 tlv_live_images
[tlv_live_image_used_count
].mh
= mh
;
127 ++tlv_live_image_used_count
;
128 pthread_mutex_unlock(&tlv_live_image_lock
);
131 static const struct mach_header
* tlv_get_image_for_key(pthread_key_t key
)
133 const struct mach_header
* result
= NULL
;
134 pthread_mutex_lock(&tlv_live_image_lock
);
135 for(unsigned int i
=0; i
< tlv_live_image_used_count
; ++i
) {
136 if ( tlv_live_images
[i
].key
== key
) {
137 result
= tlv_live_images
[i
].mh
;
141 pthread_mutex_unlock(&tlv_live_image_lock
);
147 tlv_notify(enum dyld_tlv_states state
, void *buffer
)
149 if (!tlv_handlers
) return;
151 // Always use malloc_size() to ensure allocated and deallocated states
152 // send the same size. tlv_free() doesn't have anything else recorded.
153 dyld_tlv_info info
= { sizeof(info
), buffer
, malloc_size(buffer
) };
155 for (TLVHandler
*h
= tlv_handlers
; h
!= NULL
; h
= h
->next
) {
156 if (h
->state
== state
&& h
->handler
) {
157 h
->handler(h
->state
, &info
);
163 // called lazily when TLV is first accessed
164 __attribute__((visibility("hidden")))
165 void* tlv_allocate_and_initialize_for_key(pthread_key_t key
)
167 const struct mach_header
* mh
= tlv_get_image_for_key(key
);
168 // first pass, find size and template
169 uint8_t* start
= NULL
;
172 bool slideComputed
= false;
173 bool hasInitializers
= false;
174 const uint32_t cmd_count
= mh
->ncmds
;
175 const struct load_command
* const cmds
= (struct load_command
*)(((uint8_t*)mh
) + sizeof(macho_header
));
176 const struct load_command
* cmd
= cmds
;
177 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
178 if ( cmd
->cmd
== LC_SEGMENT_COMMAND
) {
179 const macho_segment_command
* seg
= (macho_segment_command
*)cmd
;
180 if ( !slideComputed
&& (seg
->filesize
!= 0) ) {
181 slide
= (uintptr_t)mh
- seg
->vmaddr
;
182 slideComputed
= true;
184 const macho_section
* const sectionsStart
= (macho_section
*)((char*)seg
+ sizeof(macho_segment_command
));
185 const macho_section
* const sectionsEnd
= §ionsStart
[seg
->nsects
];
186 for (const macho_section
* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
187 switch ( sect
->flags
& SECTION_TYPE
) {
188 case S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
:
189 hasInitializers
= true;
191 case S_THREAD_LOCAL_ZEROFILL
:
192 case S_THREAD_LOCAL_REGULAR
:
193 if ( start
== NULL
) {
194 // first of N contiguous TLV template sections, record as if this was only section
195 start
= (uint8_t*)(sect
->addr
+ slide
);
199 // non-first of N contiguous TLV template sections, accumlate values
200 const uint8_t* newEnd
= (uint8_t*)(sect
->addr
+ slide
+ sect
->size
);
201 size
= newEnd
- start
;
207 cmd
= (const struct load_command
*)(((char*)cmd
)+cmd
->cmdsize
);
210 // allocate buffer and fill with template
211 void* buffer
= malloc(size
);
212 memcpy(buffer
, start
, size
);
214 // set this thread's value for key to be the new buffer.
215 pthread_setspecific(key
, buffer
);
217 // send tlv state notifications
218 tlv_notify(dyld_tlv_state_allocated
, buffer
);
220 // second pass, run initializers
221 if ( hasInitializers
) {
223 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
224 if ( cmd
->cmd
== LC_SEGMENT_COMMAND
) {
225 const macho_segment_command
* seg
= (macho_segment_command
*)cmd
;
226 const macho_section
* const sectionsStart
= (macho_section
*)((char*)seg
+ sizeof(macho_segment_command
));
227 const macho_section
* const sectionsEnd
= §ionsStart
[seg
->nsects
];
228 for (const macho_section
* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
229 if ( (sect
->flags
& SECTION_TYPE
) == S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
) {
230 typedef void (*InitFunc
)(void);
231 InitFunc
* funcs
= (InitFunc
*)(sect
->addr
+ slide
);
232 const uint32_t count
= sect
->size
/ sizeof(uintptr_t);
233 for (uint32_t i
=count
; i
> 0; --i
) {
234 InitFunc func
= funcs
[i
-1];
240 cmd
= (const struct load_command
*)(((char*)cmd
)+cmd
->cmdsize
);
247 // pthread destructor for TLV storage
249 tlv_free(void *storage
)
251 tlv_notify(dyld_tlv_state_deallocated
, storage
);
256 // called when image is loaded
257 static void tlv_initialize_descriptors(const struct mach_header
* mh
)
259 pthread_key_t key
= 0;
261 bool slideComputed
= false;
262 const uint32_t cmd_count
= mh
->ncmds
;
263 const struct load_command
* const cmds
= (struct load_command
*)(((uint8_t*)mh
) + sizeof(macho_header
));
264 const struct load_command
* cmd
= cmds
;
265 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
266 if ( cmd
->cmd
== LC_SEGMENT_COMMAND
) {
267 const macho_segment_command
* seg
= (macho_segment_command
*)cmd
;
268 if ( !slideComputed
&& (seg
->filesize
!= 0) ) {
269 slide
= (uintptr_t)mh
- seg
->vmaddr
;
270 slideComputed
= true;
272 const macho_section
* const sectionsStart
= (macho_section
*)((char*)seg
+ sizeof(macho_segment_command
));
273 const macho_section
* const sectionsEnd
= §ionsStart
[seg
->nsects
];
274 for (const macho_section
* sect
=sectionsStart
; sect
< sectionsEnd
; ++sect
) {
275 if ( (sect
->flags
& SECTION_TYPE
) == S_THREAD_LOCAL_VARIABLES
) {
276 if ( sect
->size
!= 0 ) {
277 // allocate pthread key when we first discover this image has TLVs
279 int result
= pthread_key_create(&key
, &tlv_free
);
282 tlv_set_key_for_image(mh
, key
);
284 // initialize each descriptor
285 TLVDescriptor
* start
= (TLVDescriptor
*)(sect
->addr
+ slide
);
286 TLVDescriptor
* end
= (TLVDescriptor
*)(sect
->addr
+ sect
->size
+ slide
);
287 for (TLVDescriptor
* d
=start
; d
< end
; ++d
) {
288 d
->thunk
= tlv_get_addr
;
290 //d->offset = d->offset; // offset unchanged
296 cmd
= (const struct load_command
*)(((char*)cmd
)+cmd
->cmdsize
);
300 // called by dyld when a image is loaded
301 static const char* tlv_load_notification(enum dyld_image_states state
, uint32_t infoCount
, const struct dyld_image_info info
[])
303 // this is called on all images, even those without TLVs, so we want
304 // this to be fast. The linker sets MH_HAS_TLV_DESCRIPTORS so we don't
305 // have to search images just to find the don't have TLVs.
306 for (uint32_t i
=0; i
< infoCount
; ++i
) {
307 if ( info
[i
].imageLoadAddress
->flags
& MH_HAS_TLV_DESCRIPTORS
)
308 tlv_initialize_descriptors(info
[i
].imageLoadAddress
);
314 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state
, dyld_tlv_state_change_handler handler
)
316 TLVHandler
*h
= malloc(sizeof(TLVHandler
));
318 h
->handler
= Block_copy(handler
);
324 } while (! OSAtomicCompareAndSwapPtrBarrier(old
, h
, (void * volatile *)&tlv_handlers
));
328 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler
)
330 pthread_mutex_lock(&tlv_live_image_lock
);
331 unsigned int count
= tlv_live_image_used_count
;
333 for (unsigned int i
= 0; i
< count
; ++i
) {
334 list
[i
] = pthread_getspecific(tlv_live_images
[i
].key
);
336 pthread_mutex_unlock(&tlv_live_image_lock
);
338 for (unsigned int i
= 0; i
< count
; ++i
) {
340 dyld_tlv_info info
= { sizeof(info
), list
[i
], malloc_size(list
[i
]) };
341 handler(dyld_tlv_state_allocated
, &info
);
348 // thread_local terminators
350 // C++ 0x allows thread_local C++ objects which have constructors run
351 // on the thread before any use of the object and the object's destructor
352 // is run on the thread when the thread terminates.
354 // To support this libdyld gets a pthread key early in process start up and
355 // uses tlv_finalize and the key's destructor function. This key must be
356 // allocated before any thread local variables are instantiated because when
357 // a thread is terminated, the pthread package runs the destructor function
358 // on each key's storage values in key allocation order. Since we want
359 // C++ objects to be destructred before they are deallocated, we need the
360 // destructor key to come before the deallocation key.
363 typedef void (*TermFunc
)(void*);
364 struct TLVTerminatorListEntry
370 struct TLVTerminatorList
374 struct TLVTerminatorListEntry entries
[1]; // variable length
378 static pthread_key_t tlv_terminators_key
= 0;
380 void _tlv_atexit(TermFunc func
, void* objAddr
)
382 // NOTE: this does not need locks because it only operates on current thread data
383 struct TLVTerminatorList
* list
= (struct TLVTerminatorList
*)pthread_getspecific(tlv_terminators_key
);
384 if ( list
== NULL
) {
385 // handle first allocation
386 list
= (struct TLVTerminatorList
*)malloc(offsetof(struct TLVTerminatorList
, entries
[1]));
387 list
->allocCount
= 1;
389 list
->entries
[0].termFunc
= func
;
390 list
->entries
[0].objAddr
= objAddr
;
391 pthread_setspecific(tlv_terminators_key
, list
);
394 if ( list
->allocCount
== list
->allocCount
) {
395 // handle resizing allocation
396 uint32_t newAllocCount
= list
->allocCount
* 2;
397 uint32_t newAllocSize
= offsetof(struct TLVTerminatorList
, entries
[newAllocCount
]);
398 struct TLVTerminatorList
* newlist
= (struct TLVTerminatorList
*)malloc(newAllocSize
);
399 newlist
->allocCount
= newAllocCount
;
400 newlist
->useCount
= list
->useCount
;
401 for(uint32_t i
=0; i
< list
->useCount
; ++i
)
402 newlist
->entries
[i
] = list
->entries
[i
];
403 pthread_setspecific(tlv_terminators_key
, newlist
);
407 // handle appending new entry
408 list
->entries
[list
->useCount
].termFunc
= func
;
409 list
->entries
[list
->useCount
].objAddr
= objAddr
;
414 // called by pthreads when the current thread is going way and
415 // _tlv_atexit() has been called on the thread.
416 static void tlv_finalize(void* storage
)
418 struct TLVTerminatorList
* list
= (struct TLVTerminatorList
*)storage
;
419 for(uint32_t i
=0; i
< list
->useCount
; ++i
) {
420 struct TLVTerminatorListEntry
* entry
= &list
->entries
[i
];
421 if ( entry
->termFunc
!= NULL
) {
422 (*entry
->termFunc
)(entry
->objAddr
);
429 __attribute__((visibility("hidden")))
430 void tlv_initializer()
432 // create pthread key to handle thread_local destructors
433 // NOTE: this key must be allocated before any keys for TLV
434 // so that _pthread_tsd_cleanup will run destructors before deallocation
435 (void)pthread_key_create(&tlv_terminators_key
, &tlv_finalize
);
437 // register with dyld for notification when images are loaded
438 dyld_register_image_state_change_handler(dyld_image_state_bound
, true, tlv_load_notification
);
442 // linked images with TLV have references to this symbol, but it is never used at runtime
443 void _tlv_bootstrap()
449 // __i386__ || __x86_64__
451 // !(__i386__ || __x86_64__)
454 void dyld_register_tlv_state_change_handler(enum dyld_tlv_states state
, dyld_tlv_state_change_handler handler
)
458 void dyld_enumerate_tlv_storage(dyld_tlv_state_change_handler handler
)
462 __attribute__((visibility("hidden")))
463 void tlv_initializer()
468 // !(__i386__ || __x86_64__)