bzero.s \
ffs.s \
memcmp.s \
+ memset_pattern.s \
strcmp.s \
strlen.s
-.if defined(FEATURE_ARM_ARCH_6)
-MDSRCS += memset_pattern.s
-.endif
-
SUPPRESSSRCS += bcmp.c memcpy.c memmove.c memset.c strlen.c
*/
#include <arm/arch.h>
-
-#if defined(_ARM_ARCH_6)
-
#include <mach/machine/asm.h>
/*
* The memset() is implemented in the bzero.s file.
*
* This is a reasonably well optimized version of memset_pattern* routines
- * implemented for the ARM9 and ARM11 processors using the ARMv6 instruction
- * set. These routines use the ARM's core registers.
+ * implemented for ARM processors using the ARMv4 and later instruction sets.
+ * These routines use the ARM's core registers.
*
* The algorithm is to align the destination pointer on a 16 byte boundary
* and then blast data 64 bytes at a time, in two stores of 32 bytes per loop.
/* move 'len' into r1, get 4-byte pattern in r2 */
mov r6, r2 /* temporarily move 'len' in to r6 */
- ldr r2, [r1]/* load 4-byte pattern into r2 */
+ bl L_GetPatternWord /* get unaligned pattern word in r5 */
+ mov r2, r5 /* move pattern word into r2 */
+ mov r0, r12 /* r0 was clobbered - restore it */
mov r1, r6 /* move 'len' from r6 to r1 */
mov r3, r2 /* copy 4-byte pattern into r3, r4 and r5 registers */
cmp r2, #0 /* more bytes left? */
bne L_Bytewise
ldm sp!, {r8, r10-r11} /* restores registers from stack */
- ldm sp!, {r4-r7, pc} /* restore & return from subroutine */
+ ldm sp!, {r4-r7, lr} /* restore & return from subroutine */
+ bx lr
/* 'len' is long enough to justify aligning the destination pointer */
/* */
mrs r11, cpsr /*copy cpsr in to r11 */
subs r1, r1, r11, lsr #28
ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
- ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+ ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */
+ bxeq lr
/* By the time we reach here, we are 16-byte aligned and r2-r5 contains */
/* rotated pattern. Now lets make sure we are 32-byte aligned. */
/* return if 'len' is zero */
adds r1, r1, #64 /* readjust length; previously subtracted extra 64*/
ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
- ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+ ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */
+ bxeq lr
L_AlignedLessThan64:
/* do we have 16 or more bytes left */
subsge r1, r1, #16
bgt L_AlignedLessThan64
ldmeq sp!, {r8, r10-r11} /* restores registers from stack */
- ldmeq sp!, {r4-r7, pc} /* restore & return from subroutine */
+ ldmeq sp!, {r4-r7, lr} /* restore & return from subroutine */
+ bxeq lr
L_AlignedLessThan16:
/* store last up-to 15 bytes */
strbvs r2, [r12], #1 /* v is set, store 1 byte */
ldm sp!, {r8, r10-r11} /* restores registers from stack */
- ldm sp!, {r4-r7, pc} /* restore & return from subroutine */
+ ldm sp!, {r4-r7, lr} /* restore & return from subroutine */
+ bx lr
/*----------------------------------------------------------------------------*/
/* void memset_pattern8(void *ptr, const void *pattern8, size_t len); */
/* move 'len' into r1, get 8-byte pattern in r2-r3 */
mov r6, r2 /* temporarily move 'len' in to r6 */
- ldr r2, [r1], #4 /* load 8-byte pattern into r2-r3 */
- ldr r3, [r1], #4
+ bl L_GetPatternWord /* get unaligned pattern word in r5 */
+ mov r2, r5 /* move pattern word into r2 */
+ bl L_GetPatternWord
+ mov r3, r5
+ mov r0, r12 /* r0 was clobbered - restore it */
mov r1, r6 /* move 'len' from r6 to r1 */
mov r4, r2 /* copy 8-byte pattern into r4-r5 registers */
/* move 'len' into r1, get 16-byte pattern in r2-r5 */
mov r6, r2 /* temporarily move 'len' in to r6 */
- ldr r2, [r1], #4 /* load 16-byte pattern into r2-r5 */
- ldr r3, [r1], #4
- ldr r4, [r1], #4
- ldr r5, [r1], #4
+ bl L_GetPatternWord /* get unaligned pattern word in r5 */
+ mov r2, r5 /* move pattern word into r2 */
+ bl L_GetPatternWord
+ mov r3, r5
+ bl L_GetPatternWord
+ mov r4, r5
+ bl L_GetPatternWord
+ mov r0, r12 /* r0 was clobbered - restore it */
mov r1, r6 /* move 'len' from r6 to r1 */
b L_NotShort /* yes */
-#endif /* _ARM_ARCH_6 */
+/*----------------------------------------------------------------------------*/
+/* Get an unaligned word at r1, returning it in r5. */
+/* Increments r1 by 4, clobbers r0. */
+/* This is tailored to fit the register usage by the call sites. */
+/*----------------------------------------------------------------------------*/
+L_GetPatternWord:
+ ldrb r5, [r1], #1 /* get the 1st byte at r1 */
+ ldrb r0, [r1], #1 /* get the 2nd byte at r1 */
+ orr r5, r5, r0, lsl #8 /* move into bits 15:8 */
+ ldrb r0, [r1], #1 /* get the 3rd byte */
+ orr r5, r5, r0, lsl #16 /* bits 23:16 */
+ ldrb r0, [r1], #1 /* get the 4th byte */
+ orr r5, r5, r0, lsl #24 /* bits 31:24 */
+ bx lr
#include <uuid/uuid.h>
#include <string.h>
#include <libkern/OSByteOrder.h>
+#include <TargetConditionals.h>
#include "dirhelper.h"
#include "dirhelper_priv.h"
char *
__user_local_dirname(uid_t uid, dirhelper_which_t which, char *path, size_t pathlen)
{
+#if TARGET_OS_EMBEDDED
+ char *tmpdir;
+#else
uuid_t uuid;
char str[ENCODEDSIZE + 1];
+#endif
int res;
if(which < 0 || which > DIRHELPER_USER_LOCAL_LAST) {
return NULL;
}
+#if TARGET_OS_EMBEDDED
+ tmpdir = getenv("TMPDIR");
+ if(!tmpdir) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ res = snprintf(path, pathlen, "%s/%s", tmpdir, subdirs[which]);
+#else
res = mbr_uid_to_uuid(uid, uuid);
if(res != 0) {
errno = res;
res = snprintf(path, pathlen,
"%s%.*s/%s/%s",
VAR_FOLDERS_PATH, BUCKETLEN, str, str, subdirs[which]);
+#endif
if(res >= pathlen) {
errno = EINVAL;
return NULL; /* buffer too small */
#include <sys/types.h>
#include <servers/bootstrap.h>
#include <pthread.h>
+#include "asl_core.h"
#include <asl_ipc.h>
#define streq(A, B) (strcmp(A, B) == 0)
pthread_mutex_unlock(&_asl_global.lock);
}
-aslclient
-asl_open(const char *ident, const char *facility, uint32_t opts)
+static void
+_asl_get_global_server_port()
{
- char *name, *x;
- asl_client_t *asl;
kern_return_t kstatus;
- asl = (asl_client_t *)calloc(1, sizeof(asl_client_t));
- if (asl == NULL)
- {
- errno = ENOMEM;
- return NULL;
- }
-
- asl->options = opts;
-
- asl->sock = -1;
-
pthread_mutex_lock(&(_asl_global.port_lock));
if (_asl_global.server_port == MACH_PORT_NULL)
}
pthread_mutex_unlock(&(_asl_global.port_lock));
+}
+
+static void
+_asl_release_global_server_port()
+{
+ pthread_mutex_lock(&(_asl_global.port_lock));
+
+ if (_asl_global.port_count > 0) _asl_global.port_count--;
+ if (_asl_global.port_count == 0)
+ {
+ mach_port_deallocate(mach_task_self(), _asl_global.server_port);
+ _asl_global.server_port = MACH_PORT_NULL;
+ }
+
+ pthread_mutex_unlock(&(_asl_global.port_lock));
+}
+
+aslclient
+asl_open(const char *ident, const char *facility, uint32_t opts)
+{
+ char *name, *x;
+ asl_client_t *asl;
+
+ asl = (asl_client_t *)calloc(1, sizeof(asl_client_t));
+ if (asl == NULL)
+ {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ asl->options = opts;
+
+ asl->sock = -1;
+
+ _asl_get_global_server_port();
asl->pid = getpid();
asl->uid = getuid();
* returns: a set of messages that can be iterated over using aslresp_next(),
* and the values can be retrieved using aslresp_get.
*/
-aslresponse
-asl_search(aslclient ac, aslmsg a)
+
+/*
+ * This routine searches the ASL datastore on disk (/var/log/asl).
+ * It is called my asl_search if syslogd is not running or if syslogd
+ * indicates that an in-memory store is not being used.
+ */
+static aslresponse
+_asl_search_store(aslclient ac, aslmsg a)
{
asl_search_result_t query, *out;
asl_msg_t *q, *qlist[1];
out = NULL;
last_id = 0;
- qlist[0] = a;
+ qlist[0] = (asl_msg_t *)a;
memset(&query, 0, sizeof(asl_search_result_t));
query.count = 1;
query.msg = qlist;
return out;
}
+static uint32_t
+_asl_search_concat_results(asl_search_result_t *batch, asl_search_result_t **out)
+{
+ uint32_t i, j;
+
+ if (out == NULL) return ASL_STATUS_FAILED;
+
+ /* nothing to do if batch is NULL or contains no messages */
+ if (batch == NULL) return 0;
+ if (batch->count == 0)
+ {
+ aslresponse_free(batch);
+ return 0;
+ }
+
+ if (*out == NULL) *out = (asl_search_result_t *)calloc(1, sizeof(asl_search_result_t));
+ if (*out == NULL)
+ {
+ aslresponse_free(batch);
+ return ASL_STATUS_FAILED;
+ }
+
+ if ((*out)->count == 0)
+ {
+ (*out)->msg = (asl_msg_t **)calloc(batch->count, sizeof(asl_msg_t *));
+ }
+ else
+ {
+ (*out)->msg = (asl_msg_t **)reallocf((*out)->msg, ((*out)->count + batch->count) * sizeof(asl_msg_t *));
+ }
+
+ if ((*out)->msg == NULL)
+ {
+ aslresponse_free(batch);
+ free(*out);
+ *out = NULL;
+ return ASL_STATUS_FAILED;
+ }
+
+ for (i = 0, j = (*out)->count; i < batch->count; i++, j++) (*out)->msg[j] = batch->msg[i];
+
+ (*out)->count += batch->count;
+ free(batch->msg);
+ free(batch);
+ return ASL_STATUS_OK;
+}
+
+static aslresponse
+_asl_search_memory(aslclient ac, aslmsg a)
+{
+ asl_search_result_t *batch, *out;
+ char *qstr, *str, *res;
+ uint32_t i, len, reslen, status;
+ uint64_t cmax, qmin;
+ kern_return_t kstatus;
+ security_token_t sec;
+ caddr_t vmstr;
+
+ if (a == NULL) return 0;
+
+ _asl_get_global_server_port();
+ if (_asl_global.server_port == MACH_PORT_NULL) return NULL;
+
+ len = 0;
+ qstr = asl_msg_to_string((asl_msg_t *)a, &len);
+
+ str = NULL;
+ if (qstr == NULL)
+ {
+ asprintf(&str, "0\n");
+ len = 3;
+ }
+ else
+ {
+ asprintf(&str, "1\n%s\n", qstr);
+ len += 4;
+ free(qstr);
+ }
+
+ if (str == NULL)
+ {
+ _asl_release_global_server_port();
+ return NULL;
+ }
+
+ /*
+ * Fetch a batch of results each time through the loop.
+ * Fetching small batches rebuces the load on syslogd.
+ */
+ out = NULL;
+ qmin = 0;
+ cmax = 0;
+
+ forever
+ {
+ res = NULL;
+ reslen = 0;
+ sec.val[0] = -1;
+ sec.val[1] = -1;
+ status = ASL_STATUS_OK;
+
+ kstatus = vm_allocate(mach_task_self(), (vm_address_t *)&vmstr, len, TRUE);
+ if (kstatus != KERN_SUCCESS)
+ {
+ _asl_release_global_server_port();
+ return NULL;
+ }
+
+ memmove(vmstr, str, len);
+
+ status = 0;
+ kstatus = _asl_server_query(_asl_global.server_port, vmstr, len, qmin, FETCH_BATCH, 0, (caddr_t *)&res, &reslen, &cmax, (int *)&status, &sec);
+ if (kstatus != KERN_SUCCESS) break;
+ if (res == NULL) break;
+
+ batch = asl_list_from_string(res);
+ vm_deallocate(mach_task_self(), (vm_address_t)res, reslen);
+
+ status = _asl_search_concat_results(batch, &out);
+ if (status != ASL_STATUS_OK) break;
+ if (i < FETCH_BATCH) break;
+
+ if (cmax > qmin) qmin = cmax + 1;
+ }
+
+ free(str);
+
+ _asl_release_global_server_port();
+ return out;
+}
+
+int
+asl_store_location()
+{
+ kern_return_t kstatus;
+ char *res;
+ uint32_t reslen, status;
+ uint64_t cmax;
+ security_token_t sec;
+
+ _asl_get_global_server_port();
+ if (_asl_global.server_port == MACH_PORT_NULL) return ASL_STORE_LOCATION_FILE;
+
+ res = NULL;
+ reslen = 0;
+ cmax = 0;
+ sec.val[0] = -1;
+ sec.val[1] = -1;
+ status = ASL_STATUS_OK;
+
+ kstatus = _asl_server_query(_asl_global.server_port, NULL, 0, 0, -1, 0, (caddr_t *)&res, &reslen, &cmax, (int *)&status, &sec);
+ _asl_release_global_server_port();
+
+ /* res should never be returned, but just to be certain we don't leak VM ... */
+ if (res != NULL) vm_deallocate(mach_task_self(), (vm_address_t)res, reslen);
+
+ if (kstatus != KERN_SUCCESS) return ASL_STORE_LOCATION_FILE;
+
+ if (status == ASL_STATUS_OK) return ASL_STORE_LOCATION_MEMORY;
+ return ASL_STORE_LOCATION_FILE;
+}
+
+aslresponse
+asl_search(aslclient ac, aslmsg a)
+{
+ int where;
+ asl_search_result_t *out;
+
+ /* prevents fetching and destroying the send right twice if nobody has already lookup up the port */
+ _asl_get_global_server_port();
+
+ where = asl_store_location();
+ if (where == ASL_STORE_LOCATION_FILE) out = _asl_search_store(ac, a);
+ else out = _asl_search_memory(ac, a);
+
+ _asl_release_global_server_port();
+ return out;
+}
+
/*
* aslresponse_next: Iterate over responses returned from asl_search()
* a: a response returned from asl_search();
#define ASL_OPT_IGNORE "ignore"
#define ASL_OPT_STORE "store"
+#define ASL_STORE_LOCATION_FILE 0
+#define ASL_STORE_LOCATION_MEMORY 1
+
typedef struct __aslclient
{
uint32_t options;
int asl_add_output(aslclient asl, int fd, const char *msg_fmt, const char *time_fmt, uint32_t text_encoding);
int asl_remove_output(aslclient asl, int fd);
char *asl_format_message(aslmsg msg, const char *msg_fmt, const char *time_fmt, uint32_t text_encoding, uint32_t *outlen);
+int asl_store_location();
__END_DECLS
-I/System/Library/Frameworks/System.framework/PrivateHeaders/ -funit-at-a-time \
-dynamiclib -Wall -arch x86_64 -arch i386 -arch ppc */
+#include <TargetConditionals.h>
+
#include "scalable_malloc.h"
#include "malloc_printf.h"
#include "_simple.h"
boolean_t did_madvise_reusable;
} large_entry_t;
+#if !TARGET_OS_EMBEDDED
#define LARGE_CACHE 1
+#else
+#define LARGE_CACHE 0
+#endif
#if !LARGE_CACHE
#warning LARGE_CACHE turned off
#endif
int num_tiny_magazines_mask_shift;
magazine_t *tiny_magazines; // array of per-processor magazines
+#if TARGET_OS_EMBEDDED
+ uintptr_t last_tiny_advise;
+#endif
+
/* Regions for small objects */
pthread_lock_t small_regions_lock CACHE_ALIGN;
size_t num_small_regions;
int num_small_magazines_mask_shift;
magazine_t *small_magazines; // array of per-processor magazines
+#if TARGET_OS_EMBEDDED
+ uintptr_t last_small_advise;
+#endif
+
/* large objects: all the rest */
pthread_lock_t large_szone_lock CACHE_ALIGN; // One customer at a time for large
unsigned num_large_objects_in_use;
static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags,
int vm_page_label);
static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags);
+#if TARGET_OS_EMBEDDED
+static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last);
+#else
static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi);
+#endif
static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr);
static INLINE mag_index_t mag_get_thread_index(szone_t *szone) ALWAYSINLINE;
}
static int
+#if TARGET_OS_EMBEDDED
+madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last)
+#else
madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi)
+#endif
{
if (pgHi > pgLo) {
size_t len = pgHi - pgLo;
if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE)
memset((void *)pgLo, 0xed, len); // Scribble on MADV_FREEd memory
#endif
+
+#if TARGET_OS_EMBEDDED
+ if (last) {
+ if (*last == pgLo)
+ return 0;
+
+ *last = pgLo;
+ }
+#endif
+
MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, len); // DTrace USDT Probe
+#if TARGET_OS_EMBEDDED
+ if (-1 == madvise((void *)pgLo, len, MADV_FREE)) {
+#else
if (-1 == madvise((void *)pgLo, len, MADV_FREE_REUSABLE)) {
+#endif
/* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
#if DEBUG_MALLOC
szone_error(szone, 1, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed", (void *)pgLo, NULL);
uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, r, pgLo, pgHi, NULL);
+#else
madvise_free_range(szone, r, pgLo, pgHi);
+#endif
did_advise = TRUE;
}
break;
uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, r, pgLo, pgHi, NULL);
+#else
madvise_free_range(szone, r, pgLo, pgHi);
+#endif
did_advise = TRUE;
}
}
MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe
+#if !TARGET_OS_EMBEDDED
if (-1 == madvise((void *)sparse_region, TINY_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
/* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
#if DEBUG_MALLOC
#endif
return 0;
}
+#endif
return 1;
}
size_t bytes_used = node->bytes_used - original_size;
node->bytes_used = bytes_used;
+#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms
/* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
if (szone->num_tiny_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
/* NOTHING */
tiny_free_do_recirc_to_depot(szone, tiny_mag_ptr, mag_index);
} else {
+#endif
// Freed to Depot. N.B. Lock on tiny_magazines[DEPOT_MAGAZINE_INDEX] is already held
uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
uintptr_t round_safe = round_page(safe_ptr);
uintptr_t rnd_safe_follow =
round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent), &szone->last_tiny_advise);
+#else
madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent));
+#endif
} else if (did_prepend) { // Coalesced preceding with original_ptr
uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent, &szone->last_tiny_advise);
+#else
madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent);
+#endif
} else if (did_append) { // Coalesced original_ptr with following
uintptr_t rnd_safe_follow =
round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent), &szone->last_tiny_advise);
+#else
madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent));
+#endif
} else { // Isolated free cannot exceed 496 bytes, thus round_safe == trunc_extent, and so never get here.
/* madvise_free_range(szone, region, round_safe, trunc_extent); */
}
}
+#if !TARGET_OS_EMBEDDED
if (0 < bytes_used) {
/* Depot'd region is still live. Leave it in place on the Depot's recirculation list
so as to avoid thrashing between the Depot's free list and a magazines's free list
tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node); // FIXME: depot_ptr is simply tiny_mag_ptr?
}
}
+#endif
}
// Allocates from the last region or a freshly allocated region
uintptr_t pgHi = trunc_page(start + SMALL_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, r, pgLo, pgHi, NULL);
+#else
madvise_free_range(szone, r, pgLo, pgHi);
+#endif
did_advise = TRUE;
}
break;
uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, r, pgLo, pgHi, NULL);
+#else
madvise_free_range(szone, r, pgLo, pgHi);
+#endif
did_advise = TRUE;
}
}
MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (int)BYTES_USED_FOR_SMALL_REGION(sparse_region)); // DTrace USDT Probe
+#if !TARGET_OS_EMBEDDED
if (-1 == madvise((void *)sparse_region, SMALL_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
/* -1 return: VM map entry change makes this unfit for reuse. Something evil lurks. */
#if DEBUG_MALLOC
#endif
return 0;
}
+#endif
return 1;
}
size_t bytes_used = node->bytes_used - original_size;
node->bytes_used = bytes_used;
+#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms
/* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
if (szone->num_small_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
/* NOTHING */
small_free_do_recirc_to_depot(szone, small_mag_ptr, mag_index);
} else {
+#endif
// Freed to Depot. N.B. Lock on small_magazines[DEPOT_MAGAZINE_INDEX] is already held
uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
uintptr_t round_safe = round_page(safe_ptr);
uintptr_t rnd_safe_follow =
round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent), &szone->last_small_advise);
+#else
madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), MIN(rnd_safe_follow, trunc_extent));
+#endif
} else if (did_prepend) { // Coalesced preceding with original_ptr
uintptr_t trunc_safe_prev = trunc_page((uintptr_t)original_ptr - sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent, &szone->last_small_advise);
+#else
madvise_free_range(szone, region, MAX(round_safe, trunc_safe_prev), trunc_extent);
+#endif
} else if (did_append) { // Coalesced original_ptr with following
uintptr_t rnd_safe_follow =
round_page((uintptr_t)original_ptr + original_size + sizeof(free_list_t) + sizeof(msize_t));
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent), &szone->last_small_advise);
+#else
madvise_free_range(szone, region, round_safe, MIN(rnd_safe_follow, trunc_extent));
+#endif
} else // Isolated free
+#if TARGET_OS_EMBEDDED
+ madvise_free_range(szone, region, round_safe, trunc_extent, &szone->last_small_advise);
+#else
madvise_free_range(szone, region, round_safe, trunc_extent);
+#endif
}
+#if !TARGET_OS_EMBEDDED
if (0 < bytes_used) {
/* Depot'd region is still live. Leave it in place on the Depot's recirculation list
so as to avoid thrashing between the Depot's free list and a magazines's free list
small_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
}
}
+#endif
}
// Allocates from the last region or a freshly allocated region
// Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)]
szone->num_tiny_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid)
+#if TARGET_OS_EMBEDDED
+ szone->last_tiny_advise = 0;
+#endif
// Init the tiny_magazine locks
LOCK_INIT(szone->tiny_regions_lock);
// Reduce i by 1 to obtain a mask covering [0 .. (num_small_magazines - 1)]
szone->num_small_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid)
+#if TARGET_OS_EMBEDDED
+ szone->last_small_advise = 0;
+#endif
// Init the small_magazine locks
LOCK_INIT(szone->small_regions_lock);
typedef enum {
OSThermalNotificationLevelAny = -1,
OSThermalNotificationLevelNormal = 0,
+ OSThermalNotificationLevel70PercentTorch = 1,
OSThermalNotificationLevel70PercentBacklight = 3,
+ OSThermalNotificationLevel50PercentTorch = 3,
OSThermalNotificationLevel50PercentBacklight = 5,
+ OSThermalNotificationLevelDisableTorch = 5,
+ OSThermalNotificationLevel25PercentBacklight = 7,
OSThermalNotificationLevelAppTerminate = 12,
OSThermalNotificationLevelDeviceRestart = 16
} OSThermalNotificationLevel;
#endif
shared.cmp = cmp;
shared.es = es;
- shared.queue = dispatch_get_concurrent_queue(0);
+ shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
args->a = a;
+#endif
+ shared.cmp = cmp;
+ shared.es = es;
-+ shared.queue = dispatch_get_concurrent_queue(0);
++ shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+ shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
+ shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
+ args->a = a;
#endif
shared.cmp = cmp;
shared.es = es;
- shared.queue = dispatch_get_concurrent_queue(0);
+ shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
args->a = a;
#endif
shared.cmp = cmp;
shared.es = es;
- shared.queue = dispatch_get_concurrent_queue(0);
+ shared.queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
shared.cond = (pthread_cond_t)PTHREAD_COND_INITIALIZER;
shared.mutex = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
args->a = a;