From: Apple Date: Thu, 28 Jan 2010 22:35:33 +0000 (+0000) Subject: Libc-594.9.1.tar.gz X-Git-Tag: mac-os-x-1064^0 X-Git-Url: https://git.saurik.com/apple/libc.git/commitdiff_plain/511daa4c8e95ca3a9f4a918942d9350855df1da0 Libc-594.9.1.tar.gz --- diff --git a/arm/string/Makefile.inc b/arm/string/Makefile.inc index 73dcb7f..9e1f368 100644 --- a/arm/string/Makefile.inc +++ b/arm/string/Makefile.inc @@ -9,11 +9,8 @@ MDSRCS += \ bzero.s \ ffs.s \ memcmp.s \ + memset_pattern.s \ strcmp.s \ strlen.s -.if defined(FEATURE_ARM_ARCH_6) -MDSRCS += memset_pattern.s -.endif - SUPPRESSSRCS += bcmp.c memcpy.c memmove.c memset.c strlen.c diff --git a/arm/string/memset_pattern.s b/arm/string/memset_pattern.s index 59fa868..1d0eb4f 100755 --- a/arm/string/memset_pattern.s +++ b/arm/string/memset_pattern.s @@ -22,9 +22,6 @@ */ #include - -#if defined(_ARM_ARCH_6) - #include /* @@ -37,8 +34,8 @@ * The memset() is implemented in the bzero.s file. * * This is a reasonably well optimized version of memset_pattern* routines - * implemented for the ARM9 and ARM11 processors using the ARMv6 instruction - * set. These routines use the ARM's core registers. + * implemented for ARM processors using the ARMv4 and later instruction sets. + * These routines use the ARM's core registers. * * The algorithm is to align the destination pointer on a 16 byte boundary * and then blast data 64 bytes at a time, in two stores of 32 bytes per loop. @@ -80,7 +77,9 @@ _memset_pattern4: /* move 'len' into r1, get 4-byte pattern in r2 */ mov r6, r2 /* temporarily move 'len' in to r6 */ - ldr r2, [r1]/* load 4-byte pattern into r2 */ + bl L_GetPatternWord /* get unaligned pattern word in r5 */ + mov r2, r5 /* move pattern word into r2 */ + mov r0, r12 /* r0 was clobbered - restore it */ mov r1, r6 /* move 'len' from r6 to r1 */ mov r3, r2 /* copy 4-byte pattern into r3, r4 and r5 registers */ @@ -108,7 +107,8 @@ L_Short: cmp r2, #0 /* more bytes left? */ bne L_Bytewise ldm sp!, {r8, r10-r11} /* restores registers from stack */ - ldm sp!, {r4-r7, pc} /* restore & return from subroutine */ + ldm sp!, {r4-r7, lr} /* restore & return from subroutine */ + bx lr /* 'len' is long enough to justify aligning the destination pointer */ /* */ @@ -176,7 +176,8 @@ L_Store15BytesAndRotatePattern: mrs r11, cpsr /*copy cpsr in to r11 */ subs r1, r1, r11, lsr #28 ldmeq sp!, {r8, r10-r11} /* restores registers from stack */ - ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */ + ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */ + bxeq lr /* By the time we reach here, we are 16-byte aligned and r2-r5 contains */ /* rotated pattern. Now lets make sure we are 32-byte aligned. */ @@ -211,7 +212,8 @@ L_Loop64: /* return if 'len' is zero */ adds r1, r1, #64 /* readjust length; previously subtracted extra 64*/ ldmeq sp!, {r8, r10-r11} /* restores registers from stack */ - ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */ + ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */ + bxeq lr L_AlignedLessThan64: /* do we have 16 or more bytes left */ @@ -220,7 +222,8 @@ L_AlignedLessThan64: subsge r1, r1, #16 bgt L_AlignedLessThan64 ldmeq sp!, {r8, r10-r11} /* restores registers from stack */ - ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */ + ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */ + bxeq lr L_AlignedLessThan16: /* store last up-to 15 bytes */ @@ -240,7 +243,8 @@ L_AlignedLessThan16: strbvs r2, [r12], #1 /* v is set, store 1 byte */ ldm sp!, {r8, r10-r11} /* restores registers from stack */ - ldm sp!, {r4-r7, pc} /* restore & return from subroutine */ + ldm sp!, {r4-r7, lr} /* restore & return from subroutine */ + bx lr /*----------------------------------------------------------------------------*/ /* void memset_pattern8(void *ptr, const void *pattern8, size_t len); */ @@ -274,8 +278,11 @@ _memset_pattern8: /* move 'len' into r1, get 8-byte pattern in r2-r3 */ mov r6, r2 /* temporarily move 'len' in to r6 */ - ldr r2, [r1], #4 /* load 8-byte pattern into r2-r3 */ - ldr r3, [r1], #4 + bl L_GetPatternWord /* get unaligned pattern word in r5 */ + mov r2, r5 /* move pattern word into r2 */ + bl L_GetPatternWord + mov r3, r5 + mov r0, r12 /* r0 was clobbered - restore it */ mov r1, r6 /* move 'len' from r6 to r1 */ mov r4, r2 /* copy 8-byte pattern into r4-r5 registers */ @@ -315,13 +322,30 @@ _memset_pattern16: /* move 'len' into r1, get 16-byte pattern in r2-r5 */ mov r6, r2 /* temporarily move 'len' in to r6 */ - ldr r2, [r1], #4 /* load 16-byte pattern into r2-r5 */ - ldr r3, [r1], #4 - ldr r4, [r1], #4 - ldr r5, [r1], #4 + bl L_GetPatternWord /* get unaligned pattern word in r5 */ + mov r2, r5 /* move pattern word into r2 */ + bl L_GetPatternWord + mov r3, r5 + bl L_GetPatternWord + mov r4, r5 + bl L_GetPatternWord + mov r0, r12 /* r0 was clobbered - restore it */ mov r1, r6 /* move 'len' from r6 to r1 */ b L_NotShort /* yes */ -#endif /* _ARM_ARCH_6 */ +/*----------------------------------------------------------------------------*/ +/* Get an unaligned word at r1, returning it in r5. */ +/* Increments r1 by 4, clobbers r0. */ +/* This is tailored to fit the register usage by the call sites. */ +/*----------------------------------------------------------------------------*/ +L_GetPatternWord: + ldrb r5, [r1], #1 /* get the 1st byte at r1 */ + ldrb r0, [r1], #1 /* get the 2nd byte at r1 */ + orr r5, r5, r0, lsl #8 /* move into bits 15:8 */ + ldrb r0, [r1], #1 /* get the 3rd byte */ + orr r5, r5, r0, lsl #16 /* bits 23:16 */ + ldrb r0, [r1], #1 /* get the 4th byte */ + orr r5, r5, r0, lsl #24 /* bits 31:24 */ + bx lr diff --git a/darwin/_dirhelper.c b/darwin/_dirhelper.c index c25abd1..0a07a5a 100644 --- a/darwin/_dirhelper.c +++ b/darwin/_dirhelper.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "dirhelper.h" #include "dirhelper_priv.h" @@ -104,8 +105,12 @@ encode_uuid_uid(uuid_t uuid, uid_t uid, char *str) char * __user_local_dirname(uid_t uid, dirhelper_which_t which, char *path, size_t pathlen) { +#if TARGET_OS_EMBEDDED + char *tmpdir; +#else uuid_t uuid; char str[ENCODEDSIZE + 1]; +#endif int res; if(which < 0 || which > DIRHELPER_USER_LOCAL_LAST) { @@ -113,6 +118,15 @@ __user_local_dirname(uid_t uid, dirhelper_which_t which, char *path, size_t path return NULL; } +#if TARGET_OS_EMBEDDED + tmpdir = getenv("TMPDIR"); + if(!tmpdir) { + errno = EINVAL; + return NULL; + } + + res = snprintf(path, pathlen, "%s/%s", tmpdir, subdirs[which]); +#else res = mbr_uid_to_uuid(uid, uuid); if(res != 0) { errno = res; @@ -129,6 +143,7 @@ __user_local_dirname(uid_t uid, dirhelper_which_t which, char *path, size_t path res = snprintf(path, pathlen, "%s%.*s/%s/%s", VAR_FOLDERS_PATH, BUCKETLEN, str, str, subdirs[which]); +#endif if(res >= pathlen) { errno = EINVAL; return NULL; /* buffer too small */ diff --git a/gen/asl.c b/gen/asl.c index 8f1f34d..86fd838 100644 --- a/gen/asl.c +++ b/gen/asl.c @@ -46,6 +46,7 @@ #include #include #include +#include "asl_core.h" #include #define streq(A, B) (strcmp(A, B) == 0) @@ -221,24 +222,11 @@ _asl_notify_close() pthread_mutex_unlock(&_asl_global.lock); } -aslclient -asl_open(const char *ident, const char *facility, uint32_t opts) +static void +_asl_get_global_server_port() { - char *name, *x; - asl_client_t *asl; kern_return_t kstatus; - asl = (asl_client_t *)calloc(1, sizeof(asl_client_t)); - if (asl == NULL) - { - errno = ENOMEM; - return NULL; - } - - asl->options = opts; - - asl->sock = -1; - pthread_mutex_lock(&(_asl_global.port_lock)); if (_asl_global.server_port == MACH_PORT_NULL) @@ -255,6 +243,41 @@ asl_open(const char *ident, const char *facility, uint32_t opts) } pthread_mutex_unlock(&(_asl_global.port_lock)); +} + +static void +_asl_release_global_server_port() +{ + pthread_mutex_lock(&(_asl_global.port_lock)); + + if (_asl_global.port_count > 0) _asl_global.port_count--; + if (_asl_global.port_count == 0) + { + mach_port_deallocate(mach_task_self(), _asl_global.server_port); + _asl_global.server_port = MACH_PORT_NULL; + } + + pthread_mutex_unlock(&(_asl_global.port_lock)); +} + +aslclient +asl_open(const char *ident, const char *facility, uint32_t opts) +{ + char *name, *x; + asl_client_t *asl; + + asl = (asl_client_t *)calloc(1, sizeof(asl_client_t)); + if (asl == NULL) + { + errno = ENOMEM; + return NULL; + } + + asl->options = opts; + + asl->sock = -1; + + _asl_get_global_server_port(); asl->pid = getpid(); asl->uid = getuid(); @@ -3382,8 +3405,14 @@ asl_unset(aslmsg a, const char *key) * returns: a set of messages that can be iterated over using aslresp_next(), * and the values can be retrieved using aslresp_get. */ -aslresponse -asl_search(aslclient ac, aslmsg a) + +/* + * This routine searches the ASL datastore on disk (/var/log/asl). + * It is called my asl_search if syslogd is not running or if syslogd + * indicates that an in-memory store is not being used. + */ +static aslresponse +_asl_search_store(aslclient ac, aslmsg a) { asl_search_result_t query, *out; asl_msg_t *q, *qlist[1]; @@ -3412,7 +3441,7 @@ asl_search(aslclient ac, aslmsg a) out = NULL; last_id = 0; - qlist[0] = a; + qlist[0] = (asl_msg_t *)a; memset(&query, 0, sizeof(asl_search_result_t)); query.count = 1; query.msg = qlist; @@ -3423,6 +3452,185 @@ asl_search(aslclient ac, aslmsg a) return out; } +static uint32_t +_asl_search_concat_results(asl_search_result_t *batch, asl_search_result_t **out) +{ + uint32_t i, j; + + if (out == NULL) return ASL_STATUS_FAILED; + + /* nothing to do if batch is NULL or contains no messages */ + if (batch == NULL) return 0; + if (batch->count == 0) + { + aslresponse_free(batch); + return 0; + } + + if (*out == NULL) *out = (asl_search_result_t *)calloc(1, sizeof(asl_search_result_t)); + if (*out == NULL) + { + aslresponse_free(batch); + return ASL_STATUS_FAILED; + } + + if ((*out)->count == 0) + { + (*out)->msg = (asl_msg_t **)calloc(batch->count, sizeof(asl_msg_t *)); + } + else + { + (*out)->msg = (asl_msg_t **)reallocf((*out)->msg, ((*out)->count + batch->count) * sizeof(asl_msg_t *)); + } + + if ((*out)->msg == NULL) + { + aslresponse_free(batch); + free(*out); + *out = NULL; + return ASL_STATUS_FAILED; + } + + for (i = 0, j = (*out)->count; i < batch->count; i++, j++) (*out)->msg[j] = batch->msg[i]; + + (*out)->count += batch->count; + free(batch->msg); + free(batch); + return ASL_STATUS_OK; +} + +static aslresponse +_asl_search_memory(aslclient ac, aslmsg a) +{ + asl_search_result_t *batch, *out; + char *qstr, *str, *res; + uint32_t i, len, reslen, status; + uint64_t cmax, qmin; + kern_return_t kstatus; + security_token_t sec; + caddr_t vmstr; + + if (a == NULL) return 0; + + _asl_get_global_server_port(); + if (_asl_global.server_port == MACH_PORT_NULL) return NULL; + + len = 0; + qstr = asl_msg_to_string((asl_msg_t *)a, &len); + + str = NULL; + if (qstr == NULL) + { + asprintf(&str, "0\n"); + len = 3; + } + else + { + asprintf(&str, "1\n%s\n", qstr); + len += 4; + free(qstr); + } + + if (str == NULL) + { + _asl_release_global_server_port(); + return NULL; + } + + /* + * Fetch a batch of results each time through the loop. + * Fetching small batches rebuces the load on syslogd. + */ + out = NULL; + qmin = 0; + cmax = 0; + + forever + { + res = NULL; + reslen = 0; + sec.val[0] = -1; + sec.val[1] = -1; + status = ASL_STATUS_OK; + + kstatus = vm_allocate(mach_task_self(), (vm_address_t *)&vmstr, len, TRUE); + if (kstatus != KERN_SUCCESS) + { + _asl_release_global_server_port(); + return NULL; + } + + memmove(vmstr, str, len); + + status = 0; + kstatus = _asl_server_query(_asl_global.server_port, vmstr, len, qmin, FETCH_BATCH, 0, (caddr_t *)&res, &reslen, &cmax, (int *)&status, &sec); + if (kstatus != KERN_SUCCESS) break; + if (res == NULL) break; + + batch = asl_list_from_string(res); + vm_deallocate(mach_task_self(), (vm_address_t)res, reslen); + + status = _asl_search_concat_results(batch, &out); + if (status != ASL_STATUS_OK) break; + if (i < FETCH_BATCH) break; + + if (cmax > qmin) qmin = cmax + 1; + } + + free(str); + + _asl_release_global_server_port(); + return out; +} + +int +asl_store_location() +{ + kern_return_t kstatus; + char *res; + uint32_t reslen, status; + uint64_t cmax; + security_token_t sec; + + _asl_get_global_server_port(); + if (_asl_global.server_port == MACH_PORT_NULL) return ASL_STORE_LOCATION_FILE; + + res = NULL; + reslen = 0; + cmax = 0; + sec.val[0] = -1; + sec.val[1] = -1; + status = ASL_STATUS_OK; + + kstatus = _asl_server_query(_asl_global.server_port, NULL, 0, 0, -1, 0, (caddr_t *)&res, &reslen, &cmax, (int *)&status, &sec); + _asl_release_global_server_port(); + + /* res should never be returned, but just to be certain we don't leak VM ... */ + if (res != NULL) vm_deallocate(mach_task_self(), (vm_address_t)res, reslen); + + if (kstatus != KERN_SUCCESS) return ASL_STORE_LOCATION_FILE; + + if (status == ASL_STATUS_OK) return ASL_STORE_LOCATION_MEMORY; + return ASL_STORE_LOCATION_FILE; +} + +aslresponse +asl_search(aslclient ac, aslmsg a) +{ + int where; + asl_search_result_t *out; + + /* prevents fetching and destroying the send right twice if nobody has already lookup up the port */ + _asl_get_global_server_port(); + + where = asl_store_location(); + if (where == ASL_STORE_LOCATION_FILE) out = _asl_search_store(ac, a); + else out = _asl_search_memory(ac, a); + + _asl_release_global_server_port(); + return out; +} + /* * aslresponse_next: Iterate over responses returned from asl_search() * a: a response returned from asl_search(); diff --git a/gen/asl_private.h b/gen/asl_private.h index dfcce84..a9c699b 100644 --- a/gen/asl_private.h +++ b/gen/asl_private.h @@ -59,6 +59,9 @@ #define ASL_OPT_IGNORE "ignore" #define ASL_OPT_STORE "store" +#define ASL_STORE_LOCATION_FILE 0 +#define ASL_STORE_LOCATION_MEMORY 1 + typedef struct __aslclient { uint32_t options; @@ -103,6 +106,7 @@ __BEGIN_DECLS int asl_add_output(aslclient asl, int fd, const char *msg_fmt, const char *time_fmt, uint32_t text_encoding); int asl_remove_output(aslclient asl, int fd); char *asl_format_message(aslmsg msg, const char *msg_fmt, const char *time_fmt, uint32_t text_encoding, uint32_t *outlen); +int asl_store_location(); __END_DECLS diff --git a/gen/magazine_malloc.c b/gen/magazine_malloc.c index a1fb6f0..a7c8700 100644 --- a/gen/magazine_malloc.c +++ b/gen/magazine_malloc.c @@ -37,6 +37,8 @@ -I/System/Library/Frameworks/System.framework/PrivateHeaders/ -funit-at-a-time \ -dynamiclib -Wall -arch x86_64 -arch i386 -arch ppc */ +#include + #include "scalable_malloc.h" #include "malloc_printf.h" #include "_simple.h" @@ -451,7 +453,11 @@ typedef struct { boolean_t did_madvise_reusable; } large_entry_t; +#if !TARGET_OS_EMBEDDED #define LARGE_CACHE 1 +#else +#define LARGE_CACHE 0 +#endif #if !LARGE_CACHE #warning LARGE_CACHE turned off #endif @@ -560,6 +566,10 @@ typedef struct szone_s { // vm_allocate()'d, so page-aligned to begin with. int num_tiny_magazines_mask_shift; magazine_t *tiny_magazines; // array of per-processor magazines +#if TARGET_OS_EMBEDDED + uintptr_t last_tiny_advise; +#endif + /* Regions for small objects */ pthread_lock_t small_regions_lock CACHE_ALIGN; size_t num_small_regions; @@ -574,6 +584,10 @@ typedef struct szone_s { // vm_allocate()'d, so page-aligned to begin with. int num_small_magazines_mask_shift; magazine_t *small_magazines; // array of per-processor magazines +#if TARGET_OS_EMBEDDED + uintptr_t last_small_advise; +#endif + /* large objects: all the rest */ pthread_lock_t large_szone_lock CACHE_ALIGN; // One customer at a time for large unsigned num_large_objects_in_use; @@ -623,7 +637,11 @@ static void protect(void *address, size_t size, unsigned protection, unsigned d static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label); static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags); +#if TARGET_OS_EMBEDDED +static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last); +#else static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi); +#endif static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr); static INLINE mag_index_t mag_get_thread_index(szone_t *szone) ALWAYSINLINE; @@ -1002,7 +1020,11 @@ deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags) } static int +#if TARGET_OS_EMBEDDED +madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last) +#else madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi) +#endif { if (pgHi > pgLo) { size_t len = pgHi - pgLo; @@ -1011,8 +1033,22 @@ madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi) if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) memset((void *)pgLo, 0xed, len); // Scribble on MADV_FREEd memory #endif + +#if TARGET_OS_EMBEDDED + if (last) { + if (*last == pgLo) + return 0; + + *last = pgLo; + } +#endif + MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, len); // DTrace USDT Probe +#if TARGET_OS_EMBEDDED + if (-1 == madvise((void *)pgLo, len, MADV_FREE)) { +#else if (-1 == madvise((void *)pgLo, len, MADV_FREE_REUSABLE)) { +#endif /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ #if DEBUG_MALLOC szone_error(szone, 1, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed", (void *)pgLo, NULL); @@ -1969,7 +2005,11 @@ tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) { uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t)); if (pgLo < pgHi) { +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, r, pgLo, pgHi, NULL); +#else madvise_free_range(szone, r, pgLo, pgHi); +#endif did_advise = TRUE; } break; @@ -1986,7 +2026,11 @@ tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) { uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); if (pgLo < pgHi) { +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, r, pgLo, pgHi, NULL); +#else madvise_free_range(szone, r, pgLo, pgHi); +#endif did_advise = TRUE; } } @@ -2165,6 +2209,7 @@ tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe +#if !TARGET_OS_EMBEDDED if (-1 == madvise((void *)sparse_region, TINY_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) { /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ #if DEBUG_MALLOC @@ -2172,6 +2217,7 @@ tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t #endif return 0; } +#endif return 1; } @@ -2287,6 +2333,7 @@ tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_inde size_t bytes_used = node->bytes_used - original_size; node->bytes_used = bytes_used; +#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ if (szone->num_tiny_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary /* NOTHING */ @@ -2311,6 +2358,7 @@ tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_inde tiny_free_do_recirc_to_depot(szone, tiny_mag_ptr, mag_index); } else { +#endif // Freed to Depot. N.B. Lock on tiny_magazines[DEPOT_MAGAZINE_INDEX] is already held uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t); uintptr_t round_safe = round_page(safe_ptr); @@ -2325,21 +2373,34 @@ tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_inde uintptr_t rnd_safe_follow = round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent), &szone->last_tiny_advise); +#else madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent)); +#endif } else if (did_prepend) { // Coalesced preceding with original_ptr uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent, &szone->last_tiny_advise); +#else madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent); +#endif } else if (did_append) { // Coalesced original_ptr with following uintptr_t rnd_safe_follow = round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent), &szone->last_tiny_advise); +#else madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent)); +#endif } else { // Isolated free cannot exceed 496 bytes, thus round_safe == trunc_extent, and so never get here. /* madvise_free_range(szone, region, round_safe, trunc_extent); */ } } +#if !TARGET_OS_EMBEDDED if (0 < bytes_used) { /* Depot'd region is still live. Leave it in place on the Depot's recirculation list so as to avoid thrashing between the Depot's free list and a magazines's free list @@ -2351,6 +2412,7 @@ tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_inde tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node); // FIXME: depot_ptr is simply tiny_mag_ptr? } } +#endif } // Allocates from the last region or a freshly allocated region @@ -3539,7 +3601,11 @@ small_free_scan_depot_madvise_free(szone_t *szone, magazine_t *depot_ptr, region uintptr_t pgHi = trunc_page(start + SMALL_REGION_SIZE - sizeof(msize_t)); if (pgLo < pgHi) { +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, r, pgLo, pgHi, NULL); +#else madvise_free_range(szone, r, pgLo, pgHi); +#endif did_advise = TRUE; } break; @@ -3556,7 +3622,11 @@ small_free_scan_depot_madvise_free(szone_t *szone, magazine_t *depot_ptr, region uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t)); if (pgLo < pgHi) { +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, r, pgLo, pgHi, NULL); +#else madvise_free_range(szone, r, pgLo, pgHi); +#endif did_advise = TRUE; } } @@ -3735,6 +3805,7 @@ small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe +#if !TARGET_OS_EMBEDDED if (-1 == madvise((void *)sparse_region, SMALL_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) { /* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */ #if DEBUG_MALLOC @@ -3742,6 +3813,7 @@ small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index #endif return 0; } +#endif return 1; } @@ -3825,6 +3897,7 @@ small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_in size_t bytes_used = node->bytes_used - original_size; node->bytes_used = bytes_used; +#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms /* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */ if (szone->num_small_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary /* NOTHING */ @@ -3850,6 +3923,7 @@ small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_in small_free_do_recirc_to_depot(szone, small_mag_ptr, mag_index); } else { +#endif // Freed to Depot. N.B. Lock on small_magazines[DEPOT_MAGAZINE_INDEX] is already held uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t); uintptr_t round_safe = round_page(safe_ptr); @@ -3864,20 +3938,37 @@ small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_in uintptr_t rnd_safe_follow = round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent), &szone->last_small_advise); +#else madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent)); +#endif } else if (did_prepend) { // Coalesced preceding with original_ptr uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent, &szone->last_small_advise); +#else madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent); +#endif } else if (did_append) { // Coalesced original_ptr with following uintptr_t rnd_safe_follow = round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t)); +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent), &szone->last_small_advise); +#else madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent)); +#endif } else // Isolated free +#if TARGET_OS_EMBEDDED + madvise_free_range(szone, region, round_safe, trunc_extent, &szone->last_small_advise); +#else madvise_free_range(szone, region, round_safe, trunc_extent); +#endif } +#if !TARGET_OS_EMBEDDED if (0 < bytes_used) { /* Depot'd region is still live. Leave it in place on the Depot's recirculation list so as to avoid thrashing between the Depot's free list and a magazines's free list @@ -3889,6 +3980,7 @@ small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_in small_free_try_depot_unmap_no_lock(szone, depot_ptr, node); } } +#endif } // Allocates from the last region or a freshly allocated region @@ -6658,6 +6750,9 @@ create_scalable_zone(size_t initial_size, unsigned debug_flags) // Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)] szone->num_tiny_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) +#if TARGET_OS_EMBEDDED + szone->last_tiny_advise = 0; +#endif // Init the tiny_magazine locks LOCK_INIT(szone->tiny_regions_lock); @@ -6692,6 +6787,9 @@ create_scalable_zone(size_t initial_size, unsigned debug_flags) // Reduce i by 1 to obtain a mask covering [0 .. (num_small_magazines - 1)] szone->num_small_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) +#if TARGET_OS_EMBEDDED + szone->last_small_advise = 0; +#endif // Init the small_magazine locks LOCK_INIT(szone->small_regions_lock); diff --git a/include/libkern/OSThermalNotification.h b/include/libkern/OSThermalNotification.h index da74a59..ab61e5a 100644 --- a/include/libkern/OSThermalNotification.h +++ b/include/libkern/OSThermalNotification.h @@ -40,8 +40,12 @@ __BEGIN_DECLS typedef enum { OSThermalNotificationLevelAny = -1, OSThermalNotificationLevelNormal = 0, + OSThermalNotificationLevel70PercentTorch = 1, OSThermalNotificationLevel70PercentBacklight = 3, + OSThermalNotificationLevel50PercentTorch = 3, OSThermalNotificationLevel50PercentBacklight = 5, + OSThermalNotificationLevelDisableTorch = 5, + OSThermalNotificationLevel25PercentBacklight = 7, OSThermalNotificationLevelAppTerminate = 12, OSThermalNotificationLevelDeviceRestart = 16 } OSThermalNotificationLevel; diff --git a/stdlib/psort-fbsd.c b/stdlib/psort-fbsd.c index 45e58f5..a95ce60 100644 --- a/stdlib/psort-fbsd.c +++ b/stdlib/psort-fbsd.c @@ -394,7 +394,7 @@ psort(void *a, size_t n, size_t es, cmp_t *cmp) #endif shared.cmp = cmp; shared.es = es; - shared.queue = dispatch_get_concurrent_queue(0); + shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER; shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; args->a = a; diff --git a/stdlib/psort.c.patch b/stdlib/psort.c.patch index 76b3cb9..b718226 100644 --- a/stdlib/psort.c.patch +++ b/stdlib/psort.c.patch @@ -283,7 +283,7 @@ +#endif + shared.cmp = cmp; + shared.es = es; -+ shared.queue = dispatch_get_concurrent_queue(0); ++ shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER; + shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; + args->a = a; diff --git a/stdlib/psort_b-fbsd.c b/stdlib/psort_b-fbsd.c index 45e58f5..a95ce60 100644 --- a/stdlib/psort_b-fbsd.c +++ b/stdlib/psort_b-fbsd.c @@ -394,7 +394,7 @@ psort(void *a, size_t n, size_t es, cmp_t *cmp) #endif shared.cmp = cmp; shared.es = es; - shared.queue = dispatch_get_concurrent_queue(0); + shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER; shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; args->a = a; diff --git a/stdlib/psort_r-fbsd.c b/stdlib/psort_r-fbsd.c index 45e58f5..a95ce60 100644 --- a/stdlib/psort_r-fbsd.c +++ b/stdlib/psort_r-fbsd.c @@ -394,7 +394,7 @@ psort(void *a, size_t n, size_t es, cmp_t *cmp) #endif shared.cmp = cmp; shared.es = es; - shared.queue = dispatch_get_concurrent_queue(0); + shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER; shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER; args->a = a;