+/*
+ * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
+ * and shall return a pointer to the allocated memory in memptr.
+ * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
+ * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
+ *
+ * Upon successful completion, posix_memalign() shall return zero; otherwise,
+ * an error number shall be returned to indicate the error.
+ *
+ * The posix_memalign() function shall fail if:
+ * EINVAL
+ * The value of the alignment parameter is not a power of two multiple of sizeof( void *).
+ * ENOMEM
+ * There is insufficient memory available with the requested alignment.
+ */
+
+int
+posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ void *retval;
+
+ /* POSIX is silent on NULL == memptr !?! */
+
+ retval = malloc_zone_memalign(inline_malloc_default_zone(), alignment, size);
+ if (retval == NULL) {
+ // To avoid testing the alignment constraints redundantly, we'll rely on the
+ // test made in malloc_zone_memalign to vet each request. Only if that test fails
+ // and returns NULL, do we arrive here to detect the bogus alignment and give the
+ // required EINVAL return.
+ if (alignment < sizeof( void *) || // excludes 0 == alignment
+ 0 != (alignment & (alignment - 1))) { // relies on sizeof(void *) being a power of two.
+ return EINVAL;
+ }
+ return ENOMEM;
+ } else {
+ *memptr = retval; // Set iff allocation succeeded
+ return 0;
+ }
+}
+
+static malloc_zone_t *
+find_registered_purgeable_zone(void *ptr) {
+ if (!ptr)
+ return NULL;
+
+ /*
+ * Look for a zone which contains ptr. If that zone does not have the purgeable malloc flag
+ * set, or the allocation is too small, do nothing. Otherwise, set the allocation volatile.
+ * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
+ * and only search those.
+ */
+ size_t size = 0;
+ malloc_zone_t *zone = find_registered_zone(ptr, &size);
+
+ /* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
+ * binary compatibility impact of changing the introspect struct yet. */
+ if (!zone)
+ return NULL;
+
+ /* Check to make sure pointer is page aligned and size is multiple of page size */
+ if ((size < vm_page_size) || ((size % vm_page_size) != 0))
+ return NULL;
+
+ return zone;
+}
+
+void
+malloc_make_purgeable(void *ptr) {
+ malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
+ if (!zone)
+ return;
+
+ int state = VM_PURGABLE_VOLATILE;
+ vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
+ return;
+}
+
+/* Returns true if ptr is valid. Ignore the return value from vm_purgeable_control and only report
+ * state. */
+int
+malloc_make_nonpurgeable(void *ptr) {
+ malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
+ if (!zone)
+ return 0;
+
+ int state = VM_PURGABLE_NONVOLATILE;
+ vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
+
+ if (state == VM_PURGABLE_EMPTY)
+ return EFAULT;
+
+ return 0;
+}
+
+/********* Batch methods ************/
+
+unsigned
+malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) {
+ unsigned (*batch_malloc)(malloc_zone_t *, size_t, void **, unsigned) = zone-> batch_malloc;
+ if (! batch_malloc) return 0;
+ if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
+ internal_check();
+ }
+ unsigned batched = batch_malloc(zone, size, results, num_requested);
+ if (malloc_logger) {
+ unsigned index = 0;
+ while (index < batched) {
+ malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)results[index], 0);
+ index++;
+ }
+ }
+ return batched;
+}
+
+void
+malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) {
+ if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
+ internal_check();
+ }
+ if (malloc_logger) {
+ unsigned index = 0;
+ while (index < num) {
+ malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0);
+ index++;
+ }
+ }
+ void (*batch_free)(malloc_zone_t *, void **, unsigned) = zone-> batch_free;
+ if (batch_free) {
+ batch_free(zone, to_be_freed, num);
+ } else {
+ void (*free_fun)(malloc_zone_t *, void *) = zone->free;
+ while (num--) {
+ void *ptr = *to_be_freed++;
+ free_fun(zone, ptr);
+ }
+ }
+}
+