+ pl = ubc_upl_pageinfo(upl);
+
+ pg_index = upl_offset / PAGE_SIZE;
+ pg_offset = upl_offset & PAGE_MASK;
+ csize = min(PAGE_SIZE - pg_offset, xsize);
+
+ while (xsize && retval == 0) {
+ addr64_t paddr;
+
+ paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset;
+
+ retval = uiomove64(paddr, csize, uio);
+
+ pg_index += 1;
+ pg_offset = 0;
+ xsize -= csize;
+ csize = min(PAGE_SIZE, xsize);
+ }
+ *io_resid = xsize;
+
+ uio->uio_segflg = segflg;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+ (int)uio->uio_offset, xsize, retval, segflg, 0);
+
+ return (retval);
+}
+
+
+int
+cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
+{
+
+ return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
+}
+
+
+static int
+cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
+{
+ int segflg;
+ int io_size;
+ int xsize;
+ int start_offset;
+ int retval = 0;
+ memory_object_control_t control;
+
+ io_size = *io_resid;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
+ (int)uio->uio_offset, 0, io_size, 0, 0);
+
+ control = ubc_getobject(vp, UBC_FLAGS_NONE);
+
+ if (control == MEMORY_OBJECT_CONTROL_NULL) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+ (int)uio->uio_offset, io_size, retval, 3, 0);
+
+ return(0);
+ }
+ segflg = uio->uio_segflg;
+
+ switch(segflg) {
+
+ case UIO_USERSPACE32:
+ case UIO_USERISPACE32:
+ uio->uio_segflg = UIO_PHYS_USERSPACE32;
+ break;
+
+ case UIO_USERSPACE64:
+ case UIO_USERISPACE64:
+ uio->uio_segflg = UIO_PHYS_USERSPACE64;
+ break;
+
+ case UIO_USERSPACE:
+ case UIO_USERISPACE:
+ uio->uio_segflg = UIO_PHYS_USERSPACE;
+ break;
+
+ case UIO_SYSSPACE:
+ uio->uio_segflg = UIO_PHYS_SYSSPACE;
+ break;
+ }
+
+ if ( (io_size = *io_resid) ) {
+ start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
+ xsize = uio_resid(uio);
+
+ retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio,
+ start_offset, io_size, mark_dirty, take_reference);
+ xsize -= uio_resid(uio);
+ io_size -= xsize;
+ }
+ uio->uio_segflg = segflg;
+ *io_resid = io_size;
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
+ (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);
+
+ return(retval);
+}
+
+
+int
+is_file_clean(vnode_t vp, off_t filesize)
+{
+ off_t f_offset;
+ int flags;
+ int total_dirty = 0;
+
+ for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
+ if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
+ if (flags & UPL_POP_DIRTY) {
+ total_dirty++;
+ }
+ }
+ }
+ if (total_dirty)
+ return(EINVAL);
+
+ return (0);
+}
+
+
+
+/*
+ * Dirty region tracking/clustering mechanism.
+ *
+ * This code (vfs_drt_*) provides a mechanism for tracking and clustering
+ * dirty regions within a larger space (file). It is primarily intended to
+ * support clustering in large files with many dirty areas.
+ *
+ * The implementation assumes that the dirty regions are pages.
+ *
+ * To represent dirty pages within the file, we store bit vectors in a
+ * variable-size circular hash.
+ */
+
+/*
+ * Bitvector size. This determines the number of pages we group in a
+ * single hashtable entry. Each hashtable entry is aligned to this
+ * size within the file.
+ */
+#define DRT_BITVECTOR_PAGES 256
+
+/*
+ * File offset handling.
+ *
+ * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES;
+ * the correct formula is (~(DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)
+ */
+#define DRT_ADDRESS_MASK (~((1 << 20) - 1))
+#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
+
+/*
+ * Hashtable address field handling.
+ *
+ * The low-order bits of the hashtable address are used to conserve
+ * space.
+ *
+ * DRT_HASH_COUNT_MASK must be large enough to store the range
+ * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value
+ * to indicate that the bucket is actually unoccupied.
+ */
+#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
+#define DRT_HASH_SET_ADDRESS(scm, i, a) \
+ do { \
+ (scm)->scm_hashtable[(i)].dhe_control = \
+ ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
+ } while (0)
+#define DRT_HASH_COUNT_MASK 0x1ff
+#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
+#define DRT_HASH_SET_COUNT(scm, i, c) \
+ do { \
+ (scm)->scm_hashtable[(i)].dhe_control = \
+ ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
+ } while (0)
+#define DRT_HASH_CLEAR(scm, i) \
+ do { \
+ (scm)->scm_hashtable[(i)].dhe_control = 0; \
+ } while (0)
+#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
+#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
+#define DRT_HASH_COPY(oscm, oi, scm, i) \
+ do { \
+ (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
+ DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
+ } while(0);
+
+
+/*
+ * Hash table moduli.
+ *
+ * Since the hashtable entry's size is dependent on the size of
+ * the bitvector, and since the hashtable size is constrained to
+ * both being prime and fitting within the desired allocation
+ * size, these values need to be manually determined.
+ *
+ * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes.
+ *
+ * The small hashtable allocation is 1024 bytes, so the modulus is 23.
+ * The large hashtable allocation is 16384 bytes, so the modulus is 401.
+ */
+#define DRT_HASH_SMALL_MODULUS 23
+#define DRT_HASH_LARGE_MODULUS 401
+
+/*
+ * Physical memory required before the large hash modulus is permitted.
+ *
+ * On small memory systems, the large hash modulus can lead to phsyical
+ * memory starvation, so we avoid using it there.
+ */
+#define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */
+
+#define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */
+#define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */
+
+/* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */
+
+/*
+ * Hashtable bitvector handling.
+ *
+ * Bitvector fields are 32 bits long.
+ */
+
+#define DRT_HASH_SET_BIT(scm, i, bit) \
+ (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
+
+#define DRT_HASH_CLEAR_BIT(scm, i, bit) \
+ (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
+
+#define DRT_HASH_TEST_BIT(scm, i, bit) \
+ ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
+
+#define DRT_BITVECTOR_CLEAR(scm, i) \
+ bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
+
+#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
+ bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
+ &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
+ (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
+
+
+
+/*
+ * Hashtable entry.
+ */
+struct vfs_drt_hashentry {
+ u_int64_t dhe_control;
+ u_int32_t dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
+};
+
+/*
+ * Dirty Region Tracking structure.
+ *
+ * The hashtable is allocated entirely inside the DRT structure.
+ *
+ * The hash is a simple circular prime modulus arrangement, the structure
+ * is resized from small to large if it overflows.
+ */
+
+struct vfs_drt_clustermap {
+ u_int32_t scm_magic; /* sanity/detection */
+#define DRT_SCM_MAGIC 0x12020003
+ u_int32_t scm_modulus; /* current ring size */
+ u_int32_t scm_buckets; /* number of occupied buckets */
+ u_int32_t scm_lastclean; /* last entry we cleaned */
+ u_int32_t scm_iskips; /* number of slot skips */
+
+ struct vfs_drt_hashentry scm_hashtable[0];
+};
+
+
+#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
+#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
+
+/*
+ * Debugging codes and arguments.
+ */
+#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */
+#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */
+#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */
+#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */
+#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length,
+ * dirty */
+ /* 0, setcount */
+ /* 1 (clean, no map) */
+ /* 2 (map alloc fail) */
+ /* 3, resid (partial) */
+#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
+#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets,
+ * lastclean, iskips */
+
+
+static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
+static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
+static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
+ u_int64_t offset, int *indexp);
+static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
+ u_int64_t offset,
+ int *indexp,
+ int recursed);
+static kern_return_t vfs_drt_do_mark_pages(
+ void **cmapp,
+ u_int64_t offset,
+ u_int length,
+ u_int *setcountp,
+ int dirty);
+static void vfs_drt_trace(
+ struct vfs_drt_clustermap *cmap,
+ int code,
+ int arg1,
+ int arg2,
+ int arg3,
+ int arg4);
+
+
+/*
+ * Allocate and initialise a sparse cluster map.
+ *
+ * Will allocate a new map, resize or compact an existing map.
+ *
+ * XXX we should probably have at least one intermediate map size,
+ * as the 1:16 ratio seems a bit drastic.
+ */
+static kern_return_t
+vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
+{
+ struct vfs_drt_clustermap *cmap, *ocmap;
+ kern_return_t kret;
+ u_int64_t offset;
+ u_int32_t i;
+ int nsize, active_buckets, index, copycount;
+
+ ocmap = NULL;
+ if (cmapp != NULL)
+ ocmap = *cmapp;
+
+ /*
+ * Decide on the size of the new map.
+ */
+ if (ocmap == NULL) {
+ nsize = DRT_HASH_SMALL_MODULUS;
+ } else {
+ /* count the number of active buckets in the old map */
+ active_buckets = 0;
+ for (i = 0; i < ocmap->scm_modulus; i++) {
+ if (!DRT_HASH_VACANT(ocmap, i) &&
+ (DRT_HASH_GET_COUNT(ocmap, i) != 0))
+ active_buckets++;
+ }
+ /*
+ * If we're currently using the small allocation, check to
+ * see whether we should grow to the large one.
+ */
+ if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
+ /*
+ * If the ring is nearly full and we are allowed to
+ * use the large modulus, upgrade.
+ */
+ if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) &&
+ (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) {
+ nsize = DRT_HASH_LARGE_MODULUS;
+ } else {
+ nsize = DRT_HASH_SMALL_MODULUS;
+ }
+ } else {
+ /* already using the large modulus */
+ nsize = DRT_HASH_LARGE_MODULUS;
+ /*
+ * If the ring is completely full, there's
+ * nothing useful for us to do. Behave as
+ * though we had compacted into the new
+ * array and return.
+ */
+ if (active_buckets >= DRT_HASH_LARGE_MODULUS)
+ return(KERN_SUCCESS);
+ }
+ }
+
+ /*
+ * Allocate and initialise the new map.
+ */
+
+ kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
+ (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
+ if (kret != KERN_SUCCESS)
+ return(kret);
+ cmap->scm_magic = DRT_SCM_MAGIC;
+ cmap->scm_modulus = nsize;
+ cmap->scm_buckets = 0;
+ cmap->scm_lastclean = 0;
+ cmap->scm_iskips = 0;
+ for (i = 0; i < cmap->scm_modulus; i++) {
+ DRT_HASH_CLEAR(cmap, i);
+ DRT_HASH_VACATE(cmap, i);
+ DRT_BITVECTOR_CLEAR(cmap, i);
+ }
+
+ /*
+ * If there's an old map, re-hash entries from it into the new map.
+ */
+ copycount = 0;
+ if (ocmap != NULL) {
+ for (i = 0; i < ocmap->scm_modulus; i++) {
+ /* skip empty buckets */
+ if (DRT_HASH_VACANT(ocmap, i) ||
+ (DRT_HASH_GET_COUNT(ocmap, i) == 0))
+ continue;
+ /* get new index */
+ offset = DRT_HASH_GET_ADDRESS(ocmap, i);
+ kret = vfs_drt_get_index(&cmap, offset, &index, 1);
+ if (kret != KERN_SUCCESS) {
+ /* XXX need to bail out gracefully here */
+ panic("vfs_drt: new cluster map mysteriously too small");
+ index = 0;
+ }
+ /* copy */
+ DRT_HASH_COPY(ocmap, i, cmap, index);
+ copycount++;
+ }
+ }
+
+ /* log what we've done */
+ vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
+
+ /*
+ * It's important to ensure that *cmapp always points to
+ * a valid map, so we must overwrite it before freeing
+ * the old map.
+ */
+ *cmapp = cmap;
+ if (ocmap != NULL) {
+ /* emit stats into trace buffer */
+ vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
+ ocmap->scm_modulus,
+ ocmap->scm_buckets,
+ ocmap->scm_lastclean,
+ ocmap->scm_iskips);
+
+ vfs_drt_free_map(ocmap);
+ }
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * Free a sparse cluster map.
+ */
+static kern_return_t
+vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
+{
+ kmem_free(kernel_map, (vm_offset_t)cmap,
+ (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
+ return(KERN_SUCCESS);
+}
+
+
+/*
+ * Find the hashtable slot currently occupied by an entry for the supplied offset.
+ */
+static kern_return_t
+vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
+{
+ int index;
+ u_int32_t i;
+
+ offset = DRT_ALIGN_ADDRESS(offset);
+ index = DRT_HASH(cmap, offset);
+
+ /* traverse the hashtable */
+ for (i = 0; i < cmap->scm_modulus; i++) {
+
+ /*
+ * If the slot is vacant, we can stop.
+ */
+ if (DRT_HASH_VACANT(cmap, index))
+ break;
+
+ /*
+ * If the address matches our offset, we have success.
+ */
+ if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
+ *indexp = index;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Move to the next slot, try again.
+ */
+ index = DRT_HASH_NEXT(cmap, index);
+ }
+ /*
+ * It's not there.
+ */
+ return(KERN_FAILURE);
+}
+
+/*
+ * Find the hashtable slot for the supplied offset. If we haven't allocated
+ * one yet, allocate one and populate the address field. Note that it will
+ * not have a nonzero page count and thus will still technically be free, so
+ * in the case where we are called to clean pages, the slot will remain free.
+ */
+static kern_return_t
+vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
+{
+ struct vfs_drt_clustermap *cmap;
+ kern_return_t kret;
+ u_int32_t index;
+ u_int32_t i;
+
+ cmap = *cmapp;
+
+ /* look for an existing entry */
+ kret = vfs_drt_search_index(cmap, offset, indexp);
+ if (kret == KERN_SUCCESS)
+ return(kret);
+
+ /* need to allocate an entry */
+ offset = DRT_ALIGN_ADDRESS(offset);
+ index = DRT_HASH(cmap, offset);
+
+ /* scan from the index forwards looking for a vacant slot */
+ for (i = 0; i < cmap->scm_modulus; i++) {
+ /* slot vacant? */
+ if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
+ cmap->scm_buckets++;
+ if (index < cmap->scm_lastclean)
+ cmap->scm_lastclean = index;
+ DRT_HASH_SET_ADDRESS(cmap, index, offset);
+ DRT_HASH_SET_COUNT(cmap, index, 0);
+ DRT_BITVECTOR_CLEAR(cmap, index);
+ *indexp = index;
+ vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
+ return(KERN_SUCCESS);
+ }
+ cmap->scm_iskips += i;
+ index = DRT_HASH_NEXT(cmap, index);
+ }
+
+ /*
+ * We haven't found a vacant slot, so the map is full. If we're not
+ * already recursed, try reallocating/compacting it.
+ */
+ if (recursed)
+ return(KERN_FAILURE);
+ kret = vfs_drt_alloc_map(cmapp);
+ if (kret == KERN_SUCCESS) {
+ /* now try to insert again */
+ kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
+ }
+ return(kret);
+}
+
+/*
+ * Implementation of set dirty/clean.
+ *
+ * In the 'clean' case, not finding a map is OK.
+ */
+static kern_return_t
+vfs_drt_do_mark_pages(
+ void **private,
+ u_int64_t offset,
+ u_int length,
+ u_int *setcountp,
+ int dirty)
+{
+ struct vfs_drt_clustermap *cmap, **cmapp;
+ kern_return_t kret;
+ int i, index, pgoff, pgcount, setcount, ecount;
+
+ cmapp = (struct vfs_drt_clustermap **)private;
+ cmap = *cmapp;
+
+ vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
+
+ if (setcountp != NULL)
+ *setcountp = 0;
+
+ /* allocate a cluster map if we don't already have one */
+ if (cmap == NULL) {
+ /* no cluster map, nothing to clean */
+ if (!dirty) {
+ vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
+ return(KERN_SUCCESS);
+ }
+ kret = vfs_drt_alloc_map(cmapp);
+ if (kret != KERN_SUCCESS) {
+ vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
+ return(kret);
+ }
+ }
+ setcount = 0;
+
+ /*
+ * Iterate over the length of the region.
+ */
+ while (length > 0) {
+ /*
+ * Get the hashtable index for this offset.
+ *
+ * XXX this will add blank entries if we are clearing a range
+ * that hasn't been dirtied.
+ */
+ kret = vfs_drt_get_index(cmapp, offset, &index, 0);
+ cmap = *cmapp; /* may have changed! */
+ /* this may be a partial-success return */
+ if (kret != KERN_SUCCESS) {
+ if (setcountp != NULL)
+ *setcountp = setcount;
+ vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
+
+ return(kret);
+ }
+
+ /*
+ * Work out how many pages we're modifying in this
+ * hashtable entry.
+ */
+ pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
+ pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
+
+ /*
+ * Iterate over pages, dirty/clearing as we go.
+ */
+ ecount = DRT_HASH_GET_COUNT(cmap, index);
+ for (i = 0; i < pgcount; i++) {
+ if (dirty) {
+ if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
+ DRT_HASH_SET_BIT(cmap, index, pgoff + i);
+ ecount++;
+ setcount++;
+ }
+ } else {
+ if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
+ DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
+ ecount--;
+ setcount++;
+ }
+ }
+ }
+ DRT_HASH_SET_COUNT(cmap, index, ecount);
+
+ offset += pgcount * PAGE_SIZE;
+ length -= pgcount * PAGE_SIZE;