]> git.saurik.com Git - apple/xnu.git/blob - libkern/libclosure/runtime.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / libkern / libclosure / runtime.cpp
1 /*
2 * runtime.c
3 * libclosure
4 *
5 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
6 *
7 * @APPLE_LLVM_LICENSE_HEADER@
8 */
9
10
11 #ifndef KERNEL
12
13 #include "Block_private.h"
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <dlfcn.h>
17 #include <os/assumes.h>
18
19 #else /* !KERNEL */
20 #define TARGET_OS_WIN32 0
21
22 #include <libkern/Block_private.h>
23 __BEGIN_DECLS
24 #include <kern/kalloc.h>
25 __END_DECLS
26
27 static inline void *
28 malloc(size_t size)
29 {
30 if (size == 0) {
31 return NULL;
32 }
33 return kheap_alloc_tag_bt(KHEAP_DEFAULT, size,
34 (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN);
35 }
36
37 static inline void
38 free(void *addr)
39 {
40 kheap_free_addr(KHEAP_DEFAULT, addr);
41 }
42
43 #endif /* KERNEL */
44
45 #include <machine/atomic.h>
46 #include <string.h>
47 #include <stdint.h>
48 #ifndef os_assumes
49 #define os_assumes(_x) (_x)
50 #endif
51 #ifndef os_assert
52 #define os_assert(_x) assert(_x)
53 #endif
54
55 #if TARGET_OS_WIN32
56 #define _CRT_SECURE_NO_WARNINGS 1
57 #include <windows.h>
58 static __inline bool
59 OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
60 {
61 // fixme barrier is overkill -- see objc-os.h
62 long original = InterlockedCompareExchange(dst, newl, oldl);
63 return original == oldl;
64 }
65
66 static __inline bool
67 OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
68 {
69 // fixme barrier is overkill -- see objc-os.h
70 int original = InterlockedCompareExchange(dst, newi, oldi);
71 return original == oldi;
72 }
73 #else
74 #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
75 #define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) os_atomic_cmpxchg(_Ptr, _Old, _New, relaxed)
76 #endif
77
78
79 /*******************************************************************************
80 * Internal Utilities
81 ********************************************************************************/
82
83 static int32_t
84 latching_incr_int(volatile int32_t *where)
85 {
86 while (1) {
87 int32_t old_value = *where;
88 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
89 return BLOCK_REFCOUNT_MASK;
90 }
91 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
92 return old_value + 2;
93 }
94 }
95 }
96
97 static bool
98 latching_incr_int_not_deallocating(volatile int32_t *where)
99 {
100 while (1) {
101 int32_t old_value = *where;
102 if (old_value & BLOCK_DEALLOCATING) {
103 // if deallocating we can't do this
104 return false;
105 }
106 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107 // if latched, we're leaking this block, and we succeed
108 return true;
109 }
110 if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) {
111 // otherwise, we must store a new retained value without the deallocating bit set
112 return true;
113 }
114 }
115 }
116
117
118 // return should_deallocate?
119 static bool
120 latching_decr_int_should_deallocate(volatile int32_t *where)
121 {
122 while (1) {
123 int32_t old_value = *where;
124 if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
125 return false; // latched high
126 }
127 if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
128 return false; // underflow, latch low
129 }
130 int32_t new_value = old_value - 2;
131 bool result = false;
132 if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) {
133 new_value = old_value - 1;
134 result = true;
135 }
136 if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
137 return result;
138 }
139 }
140 }
141
142
143 /**************************************************************************
144 * Framework callback functions and their default implementations.
145 ***************************************************************************/
146 #if !TARGET_OS_WIN32
147 #pragma mark Framework Callback Routines
148 #endif
149 #if KERNEL
150 static inline void
151 _Block_retain_object(const void *ptr __unused)
152 {
153 }
154
155 static inline void
156 _Block_release_object(const void *ptr __unused)
157 {
158 }
159
160 static inline void
161 _Block_destructInstance(const void *aBlock __unused)
162 {
163 }
164
165 #else
166
167 static void
168 _Block_retain_object_default(const void *ptr __unused)
169 {
170 }
171
172 static void
173 _Block_release_object_default(const void *ptr __unused)
174 {
175 }
176
177 static void
178 _Block_destructInstance_default(const void *aBlock __unused)
179 {
180 }
181
182 static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
183 static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
184 static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
185
186
187 /**************************************************************************
188 * Callback registration from ObjC runtime and CoreFoundation
189 ***************************************************************************/
190
191 void
192 _Block_use_RR2(const Block_callbacks_RR *callbacks)
193 {
194 _Block_retain_object = callbacks->retain;
195 _Block_release_object = callbacks->release;
196 _Block_destructInstance = callbacks->destructInstance;
197 }
198 #endif // !KERNEL
199
200 /****************************************************************************
201 * Accessors for block descriptor fields
202 *****************************************************************************/
203
204 template <class T>
205 static T *
206 unwrap_relative_pointer(int32_t &offset)
207 {
208 if (offset == 0) {
209 return nullptr;
210 }
211
212 uintptr_t base = (uintptr_t)&offset;
213 uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset;
214 uintptr_t pointer = base + extendedOffset;
215 return (T *)pointer;
216 }
217
218 #if 0
219 static struct Block_descriptor_2 *
220 _Block_descriptor_2(struct Block_layout *aBlock)
221 {
222 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
223 desc += sizeof(struct Block_descriptor_1);
224 return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc);
225 }
226 #endif
227
228 static struct Block_descriptor_3 *
229 _Block_descriptor_3(struct Block_layout *aBlock)
230 {
231 uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock);
232 desc += sizeof(struct Block_descriptor_1);
233 if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
234 desc += sizeof(struct Block_descriptor_2);
235 }
236 return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc);
237 }
238
239 static void
240 _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
241 {
242 if (auto *pFn = _Block_get_copy_function(aBlock)) {
243 pFn(result, aBlock);
244 }
245 }
246
247 static void
248 _Block_call_dispose_helper(struct Block_layout *aBlock)
249 {
250 if (auto *pFn = _Block_get_dispose_function(aBlock)) {
251 pFn(aBlock);
252 }
253 }
254
255 /*******************************************************************************
256 * Internal Support routines for copying
257 ********************************************************************************/
258
259 #if !TARGET_OS_WIN32
260 #pragma mark Copy/Release support
261 #endif
262
263 // Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
264 void *
265 _Block_copy(const void *arg)
266 {
267 struct Block_layout *aBlock;
268
269 if (!arg) {
270 return NULL;
271 }
272
273 // The following would be better done as a switch statement
274 aBlock = (struct Block_layout *)arg;
275 if (aBlock->flags & BLOCK_NEEDS_FREE) {
276 // latches on high
277 latching_incr_int(&aBlock->flags);
278 return aBlock;
279 } else if (aBlock->flags & BLOCK_IS_GLOBAL) {
280 return aBlock;
281 } else {
282 // Its a stack block. Make a copy.
283 size_t size = Block_size(aBlock);
284 struct Block_layout *result = (struct Block_layout *)malloc(size);
285 if (!result) {
286 return NULL;
287 }
288 memmove(result, aBlock, size); // bitcopy first
289 #if __has_feature(ptrauth_calls)
290 // Resign the invoke pointer as it uses address authentication.
291 result->invoke = aBlock->invoke;
292
293 #if __has_feature(ptrauth_signed_block_descriptors)
294 uintptr_t oldDesc =
295 ptrauth_blend_discriminator(
296 &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator);
297 uintptr_t newDesc =
298 ptrauth_blend_discriminator(
299 &result->descriptor, _Block_descriptor_ptrauth_discriminator);
300
301 result->descriptor =
302 ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc,
303 ptrauth_key_asda, newDesc);
304 #endif
305 #endif
306
307 // reset refcount
308 result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed
309 result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
310 _Block_call_copy_helper(result, aBlock);
311 // Set isa last so memory analysis tools see a fully-initialized object.
312 result->isa = _NSConcreteMallocBlock;
313 return result;
314 }
315 }
316
317
318 // Runtime entry points for maintaining the sharing knowledge of byref data blocks.
319
320 // A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
321 // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
322 // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it.
323 // Otherwise we need to copy it and update the stack forwarding pointer
324 static struct Block_byref *
325 _Block_byref_copy(const void *arg)
326 {
327 struct Block_byref *src = (struct Block_byref *)arg;
328
329 if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
330 // src points to stack
331 struct Block_byref *copy = (struct Block_byref *)malloc(src->size);
332 copy->isa = NULL;
333 // byref value 4 is logical refcount of 2: one for caller, one for stack
334 copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4;
335 copy->forwarding = copy; // patch heap copy to point to itself
336 src->forwarding = copy; // patch stack to point to heap copy
337 copy->size = src->size;
338
339 if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
340 // Trust copy helper to copy everything of interest
341 // If more than one field shows up in a byref block this is wrong XXX
342 struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1);
343 struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1);
344 copy2->byref_keep = src2->byref_keep;
345 copy2->byref_destroy = src2->byref_destroy;
346
347 if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
348 struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1);
349 struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1);
350 copy3->layout = src3->layout;
351 }
352
353 (*src2->byref_keep)(copy, src);
354 } else {
355 // Bitwise copy.
356 // This copy includes Block_byref_3, if any.
357 memmove(copy + 1, src + 1, src->size - sizeof(*src));
358 }
359 }
360 // already copied to heap
361 else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
362 latching_incr_int(&src->forwarding->flags);
363 }
364
365 return src->forwarding;
366 }
367
368 static void
369 _Block_byref_release(const void *arg)
370 {
371 struct Block_byref *byref = (struct Block_byref *)arg;
372
373 // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
374 byref = byref->forwarding;
375
376 if (byref->flags & BLOCK_BYREF_NEEDS_FREE) {
377 __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK;
378 os_assert(refcount);
379 if (latching_decr_int_should_deallocate(&byref->flags)) {
380 if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
381 struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1);
382 (*byref2->byref_destroy)(byref);
383 }
384 free(byref);
385 }
386 }
387 }
388
389
390 /************************************************************
391 *
392 * API supporting SPI
393 * _Block_copy, _Block_release, and (old) _Block_destroy
394 *
395 ***********************************************************/
396
397 #if !TARGET_OS_WIN32
398 #pragma mark SPI/API
399 #endif
400
401
402 // API entry point to release a copied Block
403 void
404 _Block_release(const void *arg)
405 {
406 struct Block_layout *aBlock = (struct Block_layout *)arg;
407 if (!aBlock) {
408 return;
409 }
410 if (aBlock->flags & BLOCK_IS_GLOBAL) {
411 return;
412 }
413 if (!(aBlock->flags & BLOCK_NEEDS_FREE)) {
414 return;
415 }
416
417 if (latching_decr_int_should_deallocate(&aBlock->flags)) {
418 _Block_call_dispose_helper(aBlock);
419 _Block_destructInstance(aBlock);
420 free(aBlock);
421 }
422 }
423
424 bool
425 _Block_tryRetain(const void *arg)
426 {
427 struct Block_layout *aBlock = (struct Block_layout *)arg;
428 return latching_incr_int_not_deallocating(&aBlock->flags);
429 }
430
431 bool
432 _Block_isDeallocating(const void *arg)
433 {
434 struct Block_layout *aBlock = (struct Block_layout *)arg;
435 return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
436 }
437
438
439 /************************************************************
440 *
441 * SPI used by other layers
442 *
443 ***********************************************************/
444
445 size_t
446 Block_size(void *aBlock)
447 {
448 auto *layout = (Block_layout *)aBlock;
449 void *desc = _Block_get_descriptor(layout);
450 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
451 return ((Block_descriptor_small *)desc)->size;
452 }
453 return ((Block_descriptor_1 *)desc)->size;
454 }
455
456 bool
457 _Block_use_stret(void *aBlock)
458 {
459 struct Block_layout *layout = (struct Block_layout *)aBlock;
460
461 int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
462 return (layout->flags & requiredFlags) == requiredFlags;
463 }
464
465 // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
466 bool
467 _Block_has_signature(void *aBlock)
468 {
469 return _Block_signature(aBlock) ? true : false;
470 }
471
472 const char *
473 _Block_signature(void *aBlock)
474 {
475 struct Block_layout *layout = (struct Block_layout *)aBlock;
476 if (!(layout->flags & BLOCK_HAS_SIGNATURE)) {
477 return nullptr;
478 }
479
480 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
481 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
482 return unwrap_relative_pointer<const char>(bds->signature);
483 }
484
485 struct Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
486 return desc3->signature;
487 }
488
489 const char *
490 _Block_layout(void *aBlock)
491 {
492 // Don't return extended layout to callers expecting old GC layout
493 Block_layout *layout = (Block_layout *)aBlock;
494 if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
495 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
496 return nullptr;
497 }
498
499 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
500 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
501 return unwrap_relative_pointer<const char>(bds->layout);
502 }
503
504 Block_descriptor_3 *desc = _Block_descriptor_3(layout);
505 return desc->layout;
506 }
507
508 const char *
509 _Block_extended_layout(void *aBlock)
510 {
511 // Don't return old GC layout to callers expecting extended layout
512 Block_layout *layout = (Block_layout *)aBlock;
513 if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) ||
514 !(layout->flags & BLOCK_HAS_SIGNATURE)) {
515 return nullptr;
516 }
517
518 const char *extLayout;
519 if (layout->flags & BLOCK_SMALL_DESCRIPTOR) {
520 auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout);
521 if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) {
522 extLayout = (const char *)(uintptr_t)bds->layout;
523 } else {
524 extLayout = unwrap_relative_pointer<const char>(bds->layout);
525 }
526 } else {
527 Block_descriptor_3 *desc3 = _Block_descriptor_3(layout);
528 extLayout = desc3->layout;
529 }
530
531 // Return empty string (all non-object bytes) instead of NULL
532 // so callers can distinguish "empty layout" from "no layout".
533 if (!extLayout) {
534 extLayout = "";
535 }
536 return extLayout;
537 }
538
539 #if !TARGET_OS_WIN32
540 #pragma mark Compiler SPI entry points
541 #endif
542
543
544 /*******************************************************
545 *
546 * Entry points used by the compiler - the real API!
547 *
548 *
549 * A Block can reference four different kinds of things that require help when the Block is copied to the heap.
550 * 1) C++ stack based objects
551 * 2) References to Objective-C objects
552 * 3) Other Blocks
553 * 4) __block variables
554 *
555 * In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
556 *
557 * The flags parameter of _Block_object_assign and _Block_object_dispose is set to
558 * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
559 * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
560 * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
561 * If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
562 *
563 * So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
564 *
565 * When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
566 *
567 * So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
568 * __block id 128+3 (0x83)
569 * __block (^Block) 128+7 (0x87)
570 * __weak __block id 128+3+16 (0x93)
571 * __weak __block (^Block) 128+7+16 (0x97)
572 *
573 *
574 ********************************************************/
575
576 //
577 // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
578 // to do the assignment.
579 //
580 void
581 _Block_object_assign(void *destArg, const void *object, const int flags)
582 {
583 const void **dest = (const void **)destArg;
584 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
585 case BLOCK_FIELD_IS_OBJECT:
586 /*******
587 * id object = ...;
588 * [^{ object; } copy];
589 ********/
590
591 _Block_retain_object(object);
592 *dest = object;
593 break;
594
595 case BLOCK_FIELD_IS_BLOCK:
596 /*******
597 * void (^object)(void) = ...;
598 * [^{ object; } copy];
599 ********/
600
601 *dest = _Block_copy(object);
602 break;
603
604 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
605 case BLOCK_FIELD_IS_BYREF:
606 /*******
607 * // copy the onstack __block container to the heap
608 * // Note this __weak is old GC-weak/MRC-unretained.
609 * // ARC-style __weak is handled by the copy helper directly.
610 * __block ... x;
611 * __weak __block ... x;
612 * [^{ x; } copy];
613 ********/
614
615 *dest = _Block_byref_copy(object);
616 break;
617
618 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
619 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
620 /*******
621 * // copy the actual field held in the __block container
622 * // Note this is MRC unretained __block only.
623 * // ARC retained __block is handled by the copy helper directly.
624 * __block id object;
625 * __block void (^object)(void);
626 * [^{ object; } copy];
627 ********/
628
629 *dest = object;
630 break;
631
632 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
633 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
634 /*******
635 * // copy the actual field held in the __block container
636 * // Note this __weak is old GC-weak/MRC-unretained.
637 * // ARC-style __weak is handled by the copy helper directly.
638 * __weak __block id object;
639 * __weak __block void (^object)(void);
640 * [^{ object; } copy];
641 ********/
642
643 *dest = object;
644 break;
645
646 default:
647 break;
648 }
649 }
650
651 // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
652 // to help dispose of the contents
653 void
654 _Block_object_dispose(const void *object, const int flags)
655 {
656 switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
657 case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
658 case BLOCK_FIELD_IS_BYREF:
659 // get rid of the __block data structure held in a Block
660 _Block_byref_release(object);
661 break;
662 case BLOCK_FIELD_IS_BLOCK:
663 _Block_release(object);
664 break;
665 case BLOCK_FIELD_IS_OBJECT:
666 _Block_release_object(object);
667 break;
668 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
669 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
670 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
671 case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
672 break;
673 default:
674 break;
675 }
676 }
677
678
679 // Workaround for <rdar://26015603> dylib with no __DATA segment fails to rebase
680 __attribute__((used))
681 static int let_there_be_data = 42;