#define LOCKF_DEBUG(mask, ...) /* mask */
#endif /* !LOCKF_DEBUGGING */
-MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
+/*
+ * If you need accounting for KM_LOCKF consider using
+ * ZONE_VIEW_DEFINE to define a view.
+ */
+#define KM_LOCKF KHEAP_DEFAULT
#define NOLOCKF (struct lockf *)0
#define SELF 0x1
static void lf_adjust_assertion(struct lockf *block);
#endif /* IMPORTANCE_INHERITANCE */
-static lck_mtx_t lf_dead_lock;
-static lck_grp_t *lf_dead_lock_grp;
-
-void
-lf_init(void)
-{
- lf_dead_lock_grp = lck_grp_alloc_init("lf_dead_lock", LCK_GRP_ATTR_NULL);
- lck_mtx_init(&lf_dead_lock, lf_dead_lock_grp, LCK_ATTR_NULL);
-}
+static LCK_GRP_DECLARE(lf_dead_lock_grp, "lf_dead_lock");
+static LCK_MTX_DECLARE(lf_dead_lock, &lf_dead_lock_grp);
/*
* lf_advlock
/*
* Create the lockf structure
*/
- MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
+ lock = kheap_alloc(KM_LOCKF, sizeof(struct lockf), Z_WAITOK);
if (lock == NULL) {
return ENOLCK;
}
case F_UNLCK:
error = lf_clearlock(lock);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
break;
case F_GETLK:
error = lf_getlock(lock, fl, -1);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
break;
case F_GETLKPID:
error = lf_getlock(lock, fl, fl->l_pid);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
break;
default:
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
error = EINVAL;
break;
}
lf_move_blocked(lock, adjacent);
- FREE(adjacent, M_LOCKF);
+ kheap_free(KM_LOCKF, adjacent, sizeof(struct lockf));
continue;
}
/* If the lock starts adjacent to us, we can coalesce it */
lf_move_blocked(lock, adjacent);
- FREE(adjacent, M_LOCKF);
+ kheap_free(KM_LOCKF, adjacent, sizeof(struct lockf));
continue;
}
*/
if ((lock->lf_flags & F_WAIT) == 0) {
DTRACE_FSINFO(advlock__nowait, vnode_t, vp);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
return EAGAIN;
}
LOCKF_DEBUG(LF_DBG_DEADLOCK, "lock %p which is me, so EDEADLK\n", lock);
proc_unlock(wproc);
lck_mtx_unlock(&lf_dead_lock);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
return EDEADLK;
}
}
lock->lf_type == F_WRLCK) {
lock->lf_type = F_UNLCK;
if ((error = lf_clearlock(lock)) != 0) {
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
return error;
}
lock->lf_type = F_WRLCK;
if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
lf_wakelock(lock, TRUE);
}
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
/* Return ETIMEDOUT if timeout occoured. */
if (error == EWOULDBLOCK) {
error = ETIMEDOUT;
}
overlap->lf_type = lock->lf_type;
lf_move_blocked(overlap, lock);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
lock = overlap; /* for lf_coalesce_adjacent() */
break;
*/
if (overlap->lf_type == lock->lf_type) {
lf_move_blocked(overlap, lock);
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
lock = overlap; /* for lf_coalesce_adjacent() */
break;
}
* resource shortage.
*/
if (lf_split(overlap, lock)) {
- FREE(lock, M_LOCKF);
+ kheap_free(KM_LOCKF, lock, sizeof(struct lockf));
return ENOLCK;
}
}
} else {
*prev = overlap->lf_next;
}
- FREE(overlap, M_LOCKF);
+ kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
continue;
case OVERLAP_STARTS_BEFORE_LOCK:
case OVERLAP_EQUALS_LOCK:
*prev = overlap->lf_next;
- FREE(overlap, M_LOCKF);
+ kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
break;
case OVERLAP_CONTAINS_LOCK: /* split it */
case OVERLAP_CONTAINED_BY_LOCK:
*prev = overlap->lf_next;
lf = overlap->lf_next;
- FREE(overlap, M_LOCKF);
+ kheap_free(KM_LOCKF, overlap, sizeof(struct lockf));
continue;
case OVERLAP_STARTS_BEFORE_LOCK:
* Make a new lock consisting of the last part of
* the encompassing lock
*/
- MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
+ splitlock = kheap_alloc(KM_LOCKF, sizeof(struct lockf), Z_WAITOK);
if (splitlock == NULL) {
return ENOLCK;
}
lock->lf_type == F_RDLCK ? "shared" :
lock->lf_type == F_WRLCK ? "exclusive" :
lock->lf_type == F_UNLCK ? "unlock" : "unknown",
- (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
+ (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
} else {
printf(" %s, start 0x%016llx, end 0x%016llx",
lock->lf_type == F_RDLCK ? "shared" :
lock->lf_type == F_WRLCK ? "exclusive" :
lock->lf_type == F_UNLCK ? "unlock" : "unknown",
- (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
+ (uint64_t)lock->lf_start, (uint64_t)lock->lf_end);
}
if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
lf->lf_type == F_RDLCK ? "shared" :
lf->lf_type == F_WRLCK ? "exclusive" :
lf->lf_type == F_UNLCK ? "unlock" :
- "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
+ "unknown", (uint64_t)lf->lf_start, (uint64_t)lf->lf_end);
TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
printf("\n\t\tlock request %p for ", (void *)blk);
if (blk->lf_flags & F_POSIX) {
blk->lf_type == F_RDLCK ? "shared" :
blk->lf_type == F_WRLCK ? "exclusive" :
blk->lf_type == F_UNLCK ? "unlock" :
- "unknown", (intmax_t)blk->lf_start,
- (intmax_t)blk->lf_end);
+ "unknown", (uint64_t)blk->lf_start,
+ (uint64_t)blk->lf_end);
if (!TAILQ_EMPTY(&blk->lf_blkhd)) {
panic("lf_printlist: bad list");
}