/*
* Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ * @APPLE_LICENSE_HEADER_END@
*/
/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
/*-
int lru_is_stale = LRU_IS_STALE;
int age_is_stale = AGE_IS_STALE;
int meta_is_stale = META_IS_STALE;
-static int boot_nbuf = 0;
+
/* LIST_INSERT_HEAD() with assertions */
static __inline__ void
binshash(buf_t bp, struct bufhashhdr *dp)
{
-#if DIAGNOSTIC
buf_t nbp;
-#endif /* DIAGNOSTIC */
BHASHENTCHECK(bp);
-#if DIAGNOSTIC
nbp = dp->lh_first;
for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
if(nbp == bp)
panic("buf already in hashlist");
}
-#endif /* DIAGNOSTIC */
blistenterhead(dp, bp);
}
int metabuf;
long whichq;
- nbuf = 0;
/* Initialize the buffer queues ('freelists') and the hash table */
for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
TAILQ_INIT(dp);
- bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
+ bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
- metabuf = max_nbuf_headers/8; /* reserved for meta buf */
+ metabuf = nbuf/8; /* reserved for meta buf */
/* Initialize the buffer headers */
- for (i = 0; i < max_nbuf_headers; i++) {
- nbuf++;
+ for (i = 0; i < nbuf; i++) {
bp = &buf[i];
bufhdrinit(bp);
binshash(bp, &invalhash);
}
- boot_nbuf = nbuf;
-
for (; i < nbuf + niobuf; i++) {
bp = &buf[i];
bufhdrinit(bp);
binsheadfree(bp, &iobufqueue, -1);
}
- /*
+ /*
* allocate lock group attribute and group
*/
- buf_mtx_grp_attr = lck_grp_attr_alloc_init();
+ buf_mtx_grp_attr = lck_grp_attr_alloc_init();
+ //lck_grp_attr_setstat(buf_mtx_grp_attr);
buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
/*
* allocate the lock attribute
*/
buf_mtx_attr = lck_attr_alloc_init();
+ //lck_attr_setdebug(buf_mtx_attr);
/*
* allocate and initialize mutex's for the buffer and iobuffer pools
/* Start I/O for the buffer (keeping credentials). */
SET(bp->b_flags, B_READ | async);
- if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
+ if (cred != NOCRED && bp->b_rcred == NOCRED) {
kauth_cred_ref(cred);
bp->b_rcred = cred;
}
bp->b_datap = (uintptr_t)zalloc(z);
} else {
bp->b_datap = (uintptr_t)NULL;
- kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
+ kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
CLR(bp->b_flags, B_ZALLOC);
}
bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
if ((vm_size_t)bp->b_bufsize < desired_size) {
/* reallocate to a bigger size */
bp->b_datap = (uintptr_t)NULL;
- kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
+ kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
kmem_free(kernel_map, elem, bp->b_bufsize);
} else {
bp->b_datap = (uintptr_t)zalloc(z);
SET(bp->b_flags, B_ZALLOC);
} else
- kmem_alloc_wired(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
+ kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
}
}
bp->b_bufsize = desired_size;
if ((*queue > BQUEUES) || (*queue < 0)
|| (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
*queue = BQ_EMPTY;
- /* need to grow number of bufs, add another one rather than recycling */
- if (nbuf < max_nbuf_headers) {
- /*
- * Increment count now as lock
- * is dropped for allocation.
- * That avoids over commits
- */
- nbuf++;
- goto add_newbufs;
- }
/*
* (*queue == BQUEUES) means no preference
*queue = BQ_EMPTY;
goto found;
}
- /*
- * We have seen is this is hard to trigger.
- * This is an overcommit of nbufs but needed
- * in some scenarios with diskiamges
- */
-
-add_newbufs:
lck_mtx_unlock(buf_mtxp);
/* Create a new temporary buffer header */
buf_hdr_count++;
goto found;
}
- /* subtract already accounted bufcount */
- nbuf--;
-
bufstats.bufs_sleeps++;
/* wait for a free buffer of any kind */
/* the hz value is 100; which leads to 10ms */
ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
msleep(&needbuffer, buf_mtxp, slpflag|(PRIBIO+1), (char *)"getnewbuf", &ts);
+
return (0);
}
static int
bcleanbuf(buf_t bp)
{
+ ucred_t cred;
+
+
/* Remove from the queue */
bremfree_locked(bp);
bp->b_validoff = bp->b_validend = 0;
/* nuke any credentials we were holding */
- if (IS_VALID_CRED(bp->b_rcred)) {
- kauth_cred_unref(&bp->b_rcred);
+ cred = bp->b_rcred;
+ if (cred != NOCRED) {
+ bp->b_rcred = NOCRED;
+ kauth_cred_rele(cred);
}
- if (IS_VALID_CRED(bp->b_wcred)) {
- kauth_cred_unref(&bp->b_wcred);
+ cred = bp->b_wcred;
+ if (cred != NOCRED) {
+ bp->b_wcred = NOCRED;
+ kauth_cred_rele(cred);
}
lck_mtx_lock(buf_mtxp);
buf_t bp;
int nbusy = 0;
- lck_mtx_lock(buf_mtxp);
- for (bp = &buf[boot_nbuf]; --bp >= buf; )
+ for (bp = &buf[nbuf]; --bp >= buf; )
if (!ISSET(bp->b_flags, B_INVAL) && ISSET(bp->b_lflags, BL_BUSY))
nbusy++;
- lck_mtx_unlock(buf_mtxp);
-
return (nbusy);
}