+ protect((void *)addr, size, PROT_NONE, debug_flags);
+ }
+ return (void *)addr;
+}
+
+static void *
+allocate_pages_securely(szone_t *szone, size_t size, unsigned char align, int vm_page_label)
+{
+ // align specifies a desired alignment (as a log) or 0 if no alignment requested
+ void *vm_addr;
+ uintptr_t addr, aligned_address;
+ size_t delta, allocation_size = MAX(round_page(size), vm_page_size);
+ int alloc_flags = VM_MAKE_TAG(vm_page_label);
+
+ if (szone->debug_flags & DISABLE_ASLR)
+ return allocate_pages(szone, size, align, 0, vm_page_label);
+
+ if (align)
+ allocation_size += (size_t)1 << align;
+
+ if (allocation_size < size) // size_t arithmetic wrapped!
+ return NULL;
+
+retry:
+ vm_addr = mmap((void *)entropic_address /* kernel finds next available range at or above this address */,
+ allocation_size /* size */,
+ PROT_READ | PROT_WRITE /* prot */,
+ MAP_ANON | MAP_PRIVATE /* flags */,
+ alloc_flags /* fd being used to pass "vm_page_label" */,
+ 0 /* offset */);
+ if (MAP_FAILED == vm_addr) {
+ szone_error(szone, 0, "can't allocate region securely", NULL, "*** mmap(size=%lu) failed (error code=%d)\n",
+ size, errno);
+ return NULL;
+ }
+ addr = (uintptr_t)vm_addr;
+
+ // Don't allow allocation to rise above entropic_limit (for tidiness).
+ if (addr + allocation_size > entropic_limit) { // Exhausted current range?
+ uintptr_t t = entropic_address;
+ uintptr_t u = t - ENTROPIC_KABILLION;
+
+ if (u < t) { // provided we don't wrap, unmap and retry, in the expanded entropic range
+ munmap((void *)addr, allocation_size);
+ (void)__sync_bool_compare_and_swap(&entropic_address, t, u); // Just one reduction please
+ goto retry;
+ }
+ // fall through to use what we got
+ }
+
+ if (addr < entropic_address) { // mmap wrapped to find this allocation, expand the entropic range
+ uintptr_t t = entropic_address;
+ uintptr_t u = t - ENTROPIC_KABILLION;
+ if (u < t)
+ (void)__sync_bool_compare_and_swap(&entropic_address, t, u); // Just one reduction please
+ // fall through to use what we got
+ }
+
+ // unmap any excess address range used for alignment padding
+ if (align) {
+ aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1);
+ if (aligned_address != addr) {
+ delta = aligned_address - addr;
+ if (munmap((void *)addr, delta) == -1)
+ malloc_printf("*** munmap unaligned header failed with %d\n", errno);
+ addr = aligned_address;
+ allocation_size -= delta;
+ }
+ if (allocation_size > size) {
+ if (munmap((void *)(addr + size), allocation_size - size) == -1)
+ malloc_printf("*** munmap unaligned footer failed with %d\n", errno);
+ }