]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_bio.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_bio.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70 /*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc_internal.h>
80 #include <sys/buf_internal.h>
81 #include <sys/vnode_internal.h>
82 #include <sys/mount_internal.h>
83 #include <sys/trace.h>
84 #include <sys/malloc.h>
85 #include <sys/resourcevar.h>
86 #include <miscfs/specfs/specdev.h>
87 #include <sys/ubc.h>
88 #include <sys/kauth.h>
89 #if DIAGNOSTIC
90 #include <kern/assert.h>
91 #endif /* DIAGNOSTIC */
92 #include <kern/task.h>
93 #include <kern/zalloc.h>
94 #include <kern/lock.h>
95
96 #include <sys/fslog.h> /* fslog_io_error() */
97
98 #include <mach/mach_types.h>
99 #include <mach/memory_object_types.h>
100 #include <kern/sched_prim.h> /* thread_block() */
101
102 #include <vm/vm_kern.h>
103 #include <vm/vm_pageout.h>
104
105 #include <sys/kdebug.h>
106
107 #include <libkern/OSAtomic.h>
108 #include <libkern/OSDebug.h>
109 #include <sys/ubc_internal.h>
110
111 #include <sys/sdt.h>
112 #include <sys/cprotect.h>
113
114
115 #if BALANCE_QUEUES
116 static __inline__ void bufqinc(int q);
117 static __inline__ void bufqdec(int q);
118 #endif
119
120 int bcleanbuf(buf_t bp, boolean_t discard);
121 static int brecover_data(buf_t bp);
122 static boolean_t incore(vnode_t vp, daddr64_t blkno);
123 /* timeout is in msecs */
124 static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
125 static void bremfree_locked(buf_t bp);
126 static void buf_reassign(buf_t bp, vnode_t newvp);
127 static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
128 static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
129 static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
130 static boolean_t buffer_cache_gc(int);
131 static buf_t buf_brelse_shadow(buf_t bp);
132 static void buf_free_meta_store(buf_t bp);
133
134 static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
135 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
136
137
138 __private_extern__ int bdwrite_internal(buf_t, int);
139
140 /* zone allocated buffer headers */
141 static void bufzoneinit(void) __attribute__((section("__TEXT, initcode")));
142 static void bcleanbuf_thread_init(void) __attribute__((section("__TEXT, initcode")));
143 static void bcleanbuf_thread(void);
144
145 static zone_t buf_hdr_zone;
146 static int buf_hdr_count;
147
148
149 /*
150 * Definitions for the buffer hash lists.
151 */
152 #define BUFHASH(dvp, lbn) \
153 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
154 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
155 u_long bufhash;
156
157 static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
158
159 /* Definitions for the buffer stats. */
160 struct bufstats bufstats;
161
162 /* Number of delayed write buffers */
163 long nbdwrite = 0;
164 int blaundrycnt = 0;
165 static int boot_nbuf_headers = 0;
166
167 static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
168
169 static TAILQ_HEAD(ioqueue, buf) iobufqueue;
170 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
171 static int needbuffer;
172 static int need_iobuffer;
173
174 static lck_grp_t *buf_mtx_grp;
175 static lck_attr_t *buf_mtx_attr;
176 static lck_grp_attr_t *buf_mtx_grp_attr;
177 static lck_mtx_t *iobuffer_mtxp;
178 static lck_mtx_t *buf_mtxp;
179
180 static int buf_busycount;
181
182 static __inline__ int
183 buf_timestamp(void)
184 {
185 struct timeval t;
186 microuptime(&t);
187 return (t.tv_sec);
188 }
189
190 /*
191 * Insq/Remq for the buffer free lists.
192 */
193 #if BALANCE_QUEUES
194 #define binsheadfree(bp, dp, whichq) do { \
195 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
196 bufqinc((whichq)); \
197 } while (0)
198
199 #define binstailfree(bp, dp, whichq) do { \
200 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
201 bufqinc((whichq)); \
202 } while (0)
203 #else
204 #define binsheadfree(bp, dp, whichq) do { \
205 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
206 } while (0)
207
208 #define binstailfree(bp, dp, whichq) do { \
209 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
210 } while (0)
211 #endif
212
213
214 #define BHASHENTCHECK(bp) \
215 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
216 panic("%p: b_hash.le_prev is not deadbeef", (bp));
217
218 #define BLISTNONE(bp) \
219 (bp)->b_hash.le_next = (struct buf *)0; \
220 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
221
222 /*
223 * Insq/Remq for the vnode usage lists.
224 */
225 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
226 #define bufremvn(bp) { \
227 LIST_REMOVE(bp, b_vnbufs); \
228 (bp)->b_vnbufs.le_next = NOLIST; \
229 }
230
231 /*
232 * Time in seconds before a buffer on a list is
233 * considered as a stale buffer
234 */
235 #define LRU_IS_STALE 120 /* default value for the LRU */
236 #define AGE_IS_STALE 60 /* default value for the AGE */
237 #define META_IS_STALE 180 /* default value for the BQ_META */
238
239 int lru_is_stale = LRU_IS_STALE;
240 int age_is_stale = AGE_IS_STALE;
241 int meta_is_stale = META_IS_STALE;
242
243 #define MAXLAUNDRY 10
244
245 /* LIST_INSERT_HEAD() with assertions */
246 static __inline__ void
247 blistenterhead(struct bufhashhdr * head, buf_t bp)
248 {
249 if ((bp->b_hash.le_next = (head)->lh_first) != NULL)
250 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
251 (head)->lh_first = bp;
252 bp->b_hash.le_prev = &(head)->lh_first;
253 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
254 panic("blistenterhead: le_prev is deadbeef");
255 }
256
257 static __inline__ void
258 binshash(buf_t bp, struct bufhashhdr *dp)
259 {
260 #if DIAGNOSTIC
261 buf_t nbp;
262 #endif /* DIAGNOSTIC */
263
264 BHASHENTCHECK(bp);
265
266 #if DIAGNOSTIC
267 nbp = dp->lh_first;
268 for(; nbp != NULL; nbp = nbp->b_hash.le_next) {
269 if(nbp == bp)
270 panic("buf already in hashlist");
271 }
272 #endif /* DIAGNOSTIC */
273
274 blistenterhead(dp, bp);
275 }
276
277 static __inline__ void
278 bremhash(buf_t bp)
279 {
280 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
281 panic("bremhash le_prev is deadbeef");
282 if (bp->b_hash.le_next == bp)
283 panic("bremhash: next points to self");
284
285 if (bp->b_hash.le_next != NULL)
286 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
287 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
288 }
289
290 /*
291 * buf_mtxp held.
292 */
293 static __inline__ void
294 bmovelaundry(buf_t bp)
295 {
296 bp->b_whichq = BQ_LAUNDRY;
297 bp->b_timestamp = buf_timestamp();
298 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
299 blaundrycnt++;
300 }
301
302 static __inline__ void
303 buf_release_credentials(buf_t bp)
304 {
305 if (IS_VALID_CRED(bp->b_rcred)) {
306 kauth_cred_unref(&bp->b_rcred);
307 }
308 if (IS_VALID_CRED(bp->b_wcred)) {
309 kauth_cred_unref(&bp->b_wcred);
310 }
311 }
312
313
314 int
315 buf_valid(buf_t bp) {
316
317 if ( (bp->b_flags & (B_DONE | B_DELWRI)) )
318 return 1;
319 return 0;
320 }
321
322 int
323 buf_fromcache(buf_t bp) {
324
325 if ( (bp->b_flags & B_CACHE) )
326 return 1;
327 return 0;
328 }
329
330 void
331 buf_markinvalid(buf_t bp) {
332
333 SET(bp->b_flags, B_INVAL);
334 }
335
336 void
337 buf_markdelayed(buf_t bp) {
338
339 if (!ISSET(bp->b_flags, B_DELWRI)) {
340 SET(bp->b_flags, B_DELWRI);
341
342 OSAddAtomicLong(1, &nbdwrite);
343 buf_reassign(bp, bp->b_vp);
344 }
345 SET(bp->b_flags, B_DONE);
346 }
347
348 void
349 buf_markclean(buf_t bp) {
350
351 if (ISSET(bp->b_flags, B_DELWRI)) {
352 CLR(bp->b_flags, B_DELWRI);
353
354 OSAddAtomicLong(-1, &nbdwrite);
355 buf_reassign(bp, bp->b_vp);
356 }
357 }
358
359 void
360 buf_markeintr(buf_t bp) {
361
362 SET(bp->b_flags, B_EINTR);
363 }
364
365
366 void
367 buf_markaged(buf_t bp) {
368
369 SET(bp->b_flags, B_AGE);
370 }
371
372 int
373 buf_fua(buf_t bp) {
374
375 if ((bp->b_flags & B_FUA) == B_FUA)
376 return 1;
377 return 0;
378 }
379
380 void
381 buf_markfua(buf_t bp) {
382
383 SET(bp->b_flags, B_FUA);
384 }
385
386 #if CONFIG_PROTECT
387 void
388 buf_setcpaddr(buf_t bp, struct cprotect *entry) {
389 bp->b_attr.ba_cpentry = entry;
390 }
391
392 void
393 buf_setcpoff (buf_t bp, uint64_t foffset) {
394 bp->b_attr.ba_cp_file_off = foffset;
395 }
396
397 void *
398 bufattr_cpaddr(bufattr_t bap) {
399 return (bap->ba_cpentry);
400 }
401
402 uint64_t
403 bufattr_cpoff(bufattr_t bap) {
404 return (bap->ba_cp_file_off);
405 }
406
407 void
408 bufattr_setcpaddr(bufattr_t bap, void *cp_entry_addr) {
409 bap->ba_cpentry = cp_entry_addr;
410 }
411
412 void
413 bufattr_setcpoff(bufattr_t bap, uint64_t foffset) {
414 bap->ba_cp_file_off = foffset;
415 }
416
417 #else
418 void *
419 bufattr_cpaddr(bufattr_t bap __unused) {
420 return NULL;
421 }
422
423 uint64_t
424 bufattr_cpoff(bufattr_t bap __unused) {
425 return 0;
426 }
427
428 void
429 bufattr_setcpaddr(bufattr_t bap __unused, void *cp_entry_addr __unused) {
430 }
431
432 void
433 bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) {
434 return;
435 }
436 #endif /* CONFIG_PROTECT */
437
438 bufattr_t
439 bufattr_alloc() {
440 bufattr_t bap;
441 MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
442 if (bap == NULL)
443 return NULL;
444
445 bzero(bap, sizeof(struct bufattr));
446 return bap;
447 }
448
449 void
450 bufattr_free(bufattr_t bap) {
451 if (bap)
452 FREE(bap, M_TEMP);
453 }
454
455 int
456 bufattr_rawencrypted(bufattr_t bap) {
457 if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) )
458 return 1;
459 return 0;
460 }
461
462 int
463 bufattr_throttled(bufattr_t bap) {
464 if ( (bap->ba_flags & BA_THROTTLED_IO) )
465 return 1;
466 return 0;
467 }
468
469 int
470 bufattr_nocache(bufattr_t bap) {
471 if ( (bap->ba_flags & BA_NOCACHE) )
472 return 1;
473 return 0;
474 }
475
476 int
477 bufattr_meta(bufattr_t bap) {
478 if ( (bap->ba_flags & BA_META) )
479 return 1;
480 return 0;
481 }
482
483 int
484 #if !CONFIG_EMBEDDED
485 bufattr_delayidlesleep(bufattr_t bap)
486 #else /* !CONFIG_EMBEDDED */
487 bufattr_delayidlesleep(__unused bufattr_t bap)
488 #endif /* !CONFIG_EMBEDDED */
489 {
490 #if !CONFIG_EMBEDDED
491 if ( (bap->ba_flags & BA_DELAYIDLESLEEP) )
492 return 1;
493 #endif /* !CONFIG_EMBEDDED */
494 return 0;
495 }
496
497 bufattr_t
498 buf_attr(buf_t bp) {
499 return &bp->b_attr;
500 }
501
502 void
503 buf_markstatic(buf_t bp __unused) {
504 SET(bp->b_flags, B_STATICCONTENT);
505 }
506
507 int
508 buf_static(buf_t bp) {
509 if ( (bp->b_flags & B_STATICCONTENT) )
510 return 1;
511 return 0;
512 }
513
514 errno_t
515 buf_error(buf_t bp) {
516
517 return (bp->b_error);
518 }
519
520 void
521 buf_seterror(buf_t bp, errno_t error) {
522
523 if ((bp->b_error = error))
524 SET(bp->b_flags, B_ERROR);
525 else
526 CLR(bp->b_flags, B_ERROR);
527 }
528
529 void
530 buf_setflags(buf_t bp, int32_t flags) {
531
532 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
533 }
534
535 void
536 buf_clearflags(buf_t bp, int32_t flags) {
537
538 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
539 }
540
541 int32_t
542 buf_flags(buf_t bp) {
543
544 return ((bp->b_flags & BUF_X_RDFLAGS));
545 }
546
547 void
548 buf_reset(buf_t bp, int32_t io_flags) {
549
550 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
551 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
552
553 bp->b_error = 0;
554 }
555
556 uint32_t
557 buf_count(buf_t bp) {
558
559 return (bp->b_bcount);
560 }
561
562 void
563 buf_setcount(buf_t bp, uint32_t bcount) {
564
565 bp->b_bcount = bcount;
566 }
567
568 uint32_t
569 buf_size(buf_t bp) {
570
571 return (bp->b_bufsize);
572 }
573
574 void
575 buf_setsize(buf_t bp, uint32_t bufsize) {
576
577 bp->b_bufsize = bufsize;
578 }
579
580 uint32_t
581 buf_resid(buf_t bp) {
582
583 return (bp->b_resid);
584 }
585
586 void
587 buf_setresid(buf_t bp, uint32_t resid) {
588
589 bp->b_resid = resid;
590 }
591
592 uint32_t
593 buf_dirtyoff(buf_t bp) {
594
595 return (bp->b_dirtyoff);
596 }
597
598 uint32_t
599 buf_dirtyend(buf_t bp) {
600
601 return (bp->b_dirtyend);
602 }
603
604 void
605 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) {
606
607 bp->b_dirtyoff = dirtyoff;
608 }
609
610 void
611 buf_setdirtyend(buf_t bp, uint32_t dirtyend) {
612
613 bp->b_dirtyend = dirtyend;
614 }
615
616 uintptr_t
617 buf_dataptr(buf_t bp) {
618
619 return (bp->b_datap);
620 }
621
622 void
623 buf_setdataptr(buf_t bp, uintptr_t data) {
624
625 bp->b_datap = data;
626 }
627
628 vnode_t
629 buf_vnode(buf_t bp) {
630
631 return (bp->b_vp);
632 }
633
634 void
635 buf_setvnode(buf_t bp, vnode_t vp) {
636
637 bp->b_vp = vp;
638 }
639
640
641 void *
642 buf_callback(buf_t bp)
643 {
644 if ( !(bp->b_flags & B_CALL) )
645 return ((void *) NULL);
646
647 return ((void *)bp->b_iodone);
648 }
649
650
651 errno_t
652 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
653 {
654 if (callback)
655 bp->b_flags |= (B_CALL | B_ASYNC);
656 else
657 bp->b_flags &= ~B_CALL;
658 bp->b_transaction = transaction;
659 bp->b_iodone = callback;
660
661 return (0);
662 }
663
664 errno_t
665 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
666 {
667
668 if ( !(bp->b_lflags & BL_IOBUF) )
669 return (EINVAL);
670
671 if (upl)
672 bp->b_flags |= B_CLUSTER;
673 else
674 bp->b_flags &= ~B_CLUSTER;
675 bp->b_upl = upl;
676 bp->b_uploffset = offset;
677
678 return (0);
679 }
680
681 buf_t
682 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
683 {
684 buf_t io_bp;
685
686 if (io_offset < 0 || io_size < 0)
687 return (NULL);
688
689 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount)
690 return (NULL);
691
692 if (bp->b_flags & B_CLUSTER) {
693 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK))
694 return (NULL);
695
696 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount))
697 return (NULL);
698 }
699 io_bp = alloc_io_buf(bp->b_vp, 0);
700
701 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
702
703 if (iodone) {
704 io_bp->b_transaction = arg;
705 io_bp->b_iodone = iodone;
706 io_bp->b_flags |= B_CALL;
707 }
708 if (bp->b_flags & B_CLUSTER) {
709 io_bp->b_upl = bp->b_upl;
710 io_bp->b_uploffset = bp->b_uploffset + io_offset;
711 } else {
712 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
713 }
714 io_bp->b_bcount = io_size;
715
716 return (io_bp);
717 }
718
719
720 int
721 buf_shadow(buf_t bp)
722 {
723 if (bp->b_lflags & BL_SHADOW)
724 return 1;
725 return 0;
726 }
727
728
729 buf_t
730 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
731 {
732 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1));
733 }
734
735 buf_t
736 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
737 {
738 return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0));
739 }
740
741
742 static buf_t
743 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
744 {
745 buf_t io_bp;
746
747 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
748
749 if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
750
751 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
752 return (NULL);
753 }
754 #ifdef BUF_MAKE_PRIVATE
755 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0)
756 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
757 #endif
758 io_bp = alloc_io_buf(bp->b_vp, priv);
759
760 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
761 io_bp->b_blkno = bp->b_blkno;
762 io_bp->b_lblkno = bp->b_lblkno;
763
764 if (iodone) {
765 io_bp->b_transaction = arg;
766 io_bp->b_iodone = iodone;
767 io_bp->b_flags |= B_CALL;
768 }
769 if (force_copy == FALSE) {
770 io_bp->b_bcount = bp->b_bcount;
771 io_bp->b_bufsize = bp->b_bufsize;
772
773 if (external_storage) {
774 io_bp->b_datap = external_storage;
775 #ifdef BUF_MAKE_PRIVATE
776 io_bp->b_data_store = NULL;
777 #endif
778 } else {
779 io_bp->b_datap = bp->b_datap;
780 #ifdef BUF_MAKE_PRIVATE
781 io_bp->b_data_store = bp;
782 #endif
783 }
784 *(buf_t *)(&io_bp->b_orig) = bp;
785
786 lck_mtx_lock_spin(buf_mtxp);
787
788 io_bp->b_lflags |= BL_SHADOW;
789 io_bp->b_shadow = bp->b_shadow;
790 bp->b_shadow = io_bp;
791 bp->b_shadow_ref++;
792
793 #ifdef BUF_MAKE_PRIVATE
794 if (external_storage)
795 io_bp->b_lflags |= BL_EXTERNAL;
796 else
797 bp->b_data_ref++;
798 #endif
799 lck_mtx_unlock(buf_mtxp);
800 } else {
801 if (external_storage) {
802 #ifdef BUF_MAKE_PRIVATE
803 io_bp->b_lflags |= BL_EXTERNAL;
804 #endif
805 io_bp->b_bcount = bp->b_bcount;
806 io_bp->b_bufsize = bp->b_bufsize;
807 io_bp->b_datap = external_storage;
808 } else {
809 allocbuf(io_bp, bp->b_bcount);
810
811 io_bp->b_lflags |= BL_IOBUF_ALLOC;
812 }
813 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
814
815 #ifdef BUF_MAKE_PRIVATE
816 io_bp->b_data_store = NULL;
817 #endif
818 }
819 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
820
821 return (io_bp);
822 }
823
824
825 #ifdef BUF_MAKE_PRIVATE
826 errno_t
827 buf_make_private(buf_t bp)
828 {
829 buf_t ds_bp;
830 buf_t t_bp;
831 struct buf my_buf;
832
833 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
834
835 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
836
837 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
838 return (EINVAL);
839 }
840 my_buf.b_flags = B_META;
841 my_buf.b_datap = (uintptr_t)NULL;
842 allocbuf(&my_buf, bp->b_bcount);
843
844 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
845
846 lck_mtx_lock_spin(buf_mtxp);
847
848 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
849 if ( !ISSET(bp->b_lflags, BL_EXTERNAL))
850 break;
851 }
852 ds_bp = t_bp;
853
854 if (ds_bp == NULL && bp->b_data_ref)
855 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
856
857 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0))
858 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
859
860 if (ds_bp == NULL) {
861 lck_mtx_unlock(buf_mtxp);
862
863 buf_free_meta_store(&my_buf);
864
865 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
866 return (EINVAL);
867 }
868 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
869 if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL))
870 t_bp->b_data_store = ds_bp;
871 }
872 ds_bp->b_data_ref = bp->b_data_ref;
873
874 bp->b_data_ref = 0;
875 bp->b_datap = my_buf.b_datap;
876
877 lck_mtx_unlock(buf_mtxp);
878
879 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
880 return (0);
881 }
882 #endif
883
884
885 void
886 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
887 void (**old_iodone)(buf_t, void *), void **old_transaction)
888 {
889 if (old_iodone)
890 *old_iodone = bp->b_iodone;
891 if (old_transaction)
892 *old_transaction = bp->b_transaction;
893
894 bp->b_transaction = transaction;
895 bp->b_iodone = filter;
896 if (filter)
897 bp->b_flags |= B_FILTER;
898 else
899 bp->b_flags &= ~B_FILTER;
900 }
901
902
903 daddr64_t
904 buf_blkno(buf_t bp) {
905
906 return (bp->b_blkno);
907 }
908
909 daddr64_t
910 buf_lblkno(buf_t bp) {
911
912 return (bp->b_lblkno);
913 }
914
915 void
916 buf_setblkno(buf_t bp, daddr64_t blkno) {
917
918 bp->b_blkno = blkno;
919 }
920
921 void
922 buf_setlblkno(buf_t bp, daddr64_t lblkno) {
923
924 bp->b_lblkno = lblkno;
925 }
926
927 dev_t
928 buf_device(buf_t bp) {
929
930 return (bp->b_dev);
931 }
932
933 errno_t
934 buf_setdevice(buf_t bp, vnode_t vp) {
935
936 if ((vp->v_type != VBLK) && (vp->v_type != VCHR))
937 return EINVAL;
938 bp->b_dev = vp->v_rdev;
939
940 return 0;
941 }
942
943
944 void *
945 buf_drvdata(buf_t bp) {
946
947 return (bp->b_drvdata);
948 }
949
950 void
951 buf_setdrvdata(buf_t bp, void *drvdata) {
952
953 bp->b_drvdata = drvdata;
954 }
955
956 void *
957 buf_fsprivate(buf_t bp) {
958
959 return (bp->b_fsprivate);
960 }
961
962 void
963 buf_setfsprivate(buf_t bp, void *fsprivate) {
964
965 bp->b_fsprivate = fsprivate;
966 }
967
968 kauth_cred_t
969 buf_rcred(buf_t bp) {
970
971 return (bp->b_rcred);
972 }
973
974 kauth_cred_t
975 buf_wcred(buf_t bp) {
976
977 return (bp->b_wcred);
978 }
979
980 void *
981 buf_upl(buf_t bp) {
982
983 return (bp->b_upl);
984 }
985
986 uint32_t
987 buf_uploffset(buf_t bp) {
988
989 return ((uint32_t)(bp->b_uploffset));
990 }
991
992 proc_t
993 buf_proc(buf_t bp) {
994
995 return (bp->b_proc);
996 }
997
998
999 errno_t
1000 buf_map(buf_t bp, caddr_t *io_addr)
1001 {
1002 buf_t real_bp;
1003 vm_offset_t vaddr;
1004 kern_return_t kret;
1005
1006 if ( !(bp->b_flags & B_CLUSTER)) {
1007 *io_addr = (caddr_t)bp->b_datap;
1008 return (0);
1009 }
1010 real_bp = (buf_t)(bp->b_real_bp);
1011
1012 if (real_bp && real_bp->b_datap) {
1013 /*
1014 * b_real_bp is only valid if B_CLUSTER is SET
1015 * if it's non-zero, than someone did a cluster_bp call
1016 * if the backing physical pages were already mapped
1017 * in before the call to cluster_bp (non-zero b_datap),
1018 * than we just use that mapping
1019 */
1020 *io_addr = (caddr_t)real_bp->b_datap;
1021 return (0);
1022 }
1023 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1024
1025 if (kret != KERN_SUCCESS) {
1026 *io_addr = NULL;
1027
1028 return(ENOMEM);
1029 }
1030 vaddr += bp->b_uploffset;
1031
1032 *io_addr = (caddr_t)vaddr;
1033
1034 return (0);
1035 }
1036
1037 errno_t
1038 buf_unmap(buf_t bp)
1039 {
1040 buf_t real_bp;
1041 kern_return_t kret;
1042
1043 if ( !(bp->b_flags & B_CLUSTER))
1044 return (0);
1045 /*
1046 * see buf_map for the explanation
1047 */
1048 real_bp = (buf_t)(bp->b_real_bp);
1049
1050 if (real_bp && real_bp->b_datap)
1051 return (0);
1052
1053 if ((bp->b_lflags & BL_IOBUF) &&
1054 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1055 /*
1056 * ignore pageins... the 'right' thing will
1057 * happen due to the way we handle speculative
1058 * clusters...
1059 *
1060 * when we commit these pages, we'll hit
1061 * it with UPL_COMMIT_INACTIVE which
1062 * will clear the reference bit that got
1063 * turned on when we touched the mapping
1064 */
1065 bp->b_flags |= B_AGE;
1066 }
1067 kret = ubc_upl_unmap(bp->b_upl);
1068
1069 if (kret != KERN_SUCCESS)
1070 return (EINVAL);
1071 return (0);
1072 }
1073
1074
1075 void
1076 buf_clear(buf_t bp) {
1077 caddr_t baddr;
1078
1079 if (buf_map(bp, &baddr) == 0) {
1080 bzero(baddr, bp->b_bcount);
1081 buf_unmap(bp);
1082 }
1083 bp->b_resid = 0;
1084 }
1085
1086 /*
1087 * Read or write a buffer that is not contiguous on disk.
1088 * buffer is marked done/error at the conclusion
1089 */
1090 static int
1091 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1092 {
1093 vnode_t vp = buf_vnode(bp);
1094 buf_t io_bp; /* For reading or writing a single block */
1095 int io_direction;
1096 int io_resid;
1097 size_t io_contig_bytes;
1098 daddr64_t io_blkno;
1099 int error = 0;
1100 int bmap_flags;
1101
1102 /*
1103 * save our starting point... the bp was already mapped
1104 * in buf_strategy before we got called
1105 * no sense doing it again.
1106 */
1107 io_blkno = bp->b_blkno;
1108 /*
1109 * Make sure we redo this mapping for the next I/O
1110 * i.e. this can never be a 'permanent' mapping
1111 */
1112 bp->b_blkno = bp->b_lblkno;
1113
1114 /*
1115 * Get an io buffer to do the deblocking
1116 */
1117 io_bp = alloc_io_buf(devvp, 0);
1118
1119 io_bp->b_lblkno = bp->b_lblkno;
1120 io_bp->b_datap = bp->b_datap;
1121 io_resid = bp->b_bcount;
1122 io_direction = bp->b_flags & B_READ;
1123 io_contig_bytes = contig_bytes;
1124
1125 if (bp->b_flags & B_READ)
1126 bmap_flags = VNODE_READ;
1127 else
1128 bmap_flags = VNODE_WRITE;
1129
1130 for (;;) {
1131 if (io_blkno == -1)
1132 /*
1133 * this is unexepected, but we'll allow for it
1134 */
1135 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1136 else {
1137 io_bp->b_bcount = io_contig_bytes;
1138 io_bp->b_bufsize = io_contig_bytes;
1139 io_bp->b_resid = io_contig_bytes;
1140 io_bp->b_blkno = io_blkno;
1141
1142 buf_reset(io_bp, io_direction);
1143
1144 /*
1145 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
1146 */
1147
1148 if (!ISSET(bp->b_flags, B_READ))
1149 OSAddAtomic(1, &devvp->v_numoutput);
1150
1151 if ((error = VNOP_STRATEGY(io_bp)))
1152 break;
1153 if ((error = (int)buf_biowait(io_bp)))
1154 break;
1155 if (io_bp->b_resid) {
1156 io_resid -= (io_contig_bytes - io_bp->b_resid);
1157 break;
1158 }
1159 }
1160 if ((io_resid -= io_contig_bytes) == 0)
1161 break;
1162 f_offset += io_contig_bytes;
1163 io_bp->b_datap += io_contig_bytes;
1164
1165 /*
1166 * Map the current position to a physical block number
1167 */
1168 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL)))
1169 break;
1170 }
1171 buf_free(io_bp);
1172
1173 if (error)
1174 buf_seterror(bp, error);
1175 bp->b_resid = io_resid;
1176 /*
1177 * This I/O is now complete
1178 */
1179 buf_biodone(bp);
1180
1181 return error;
1182 }
1183
1184
1185 /*
1186 * struct vnop_strategy_args {
1187 * struct buf *a_bp;
1188 * } *ap;
1189 */
1190 errno_t
1191 buf_strategy(vnode_t devvp, void *ap)
1192 {
1193 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1194 vnode_t vp = bp->b_vp;
1195 int bmap_flags;
1196 errno_t error;
1197 #if CONFIG_DTRACE
1198 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
1199 * probe once, with the true phisical
1200 * block in place (b_blkno)
1201 */
1202
1203 #endif
1204
1205 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK)
1206 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
1207 /*
1208 * associate the physical device with
1209 * with this buf_t even if we don't
1210 * end up issuing the I/O...
1211 */
1212 bp->b_dev = devvp->v_rdev;
1213
1214 if (bp->b_flags & B_READ)
1215 bmap_flags = VNODE_READ;
1216 else
1217 bmap_flags = VNODE_WRITE;
1218
1219 if ( !(bp->b_flags & B_CLUSTER)) {
1220
1221 if ( (bp->b_upl) ) {
1222 /*
1223 * we have a UPL associated with this bp
1224 * go through cluster_bp which knows how
1225 * to deal with filesystem block sizes
1226 * that aren't equal to the page size
1227 */
1228 DTRACE_IO1(start, buf_t, bp);
1229 return (cluster_bp(bp));
1230 }
1231 if (bp->b_blkno == bp->b_lblkno) {
1232 off_t f_offset;
1233 size_t contig_bytes;
1234
1235 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1236 DTRACE_IO1(start, buf_t, bp);
1237 buf_seterror(bp, error);
1238 buf_biodone(bp);
1239
1240 return (error);
1241 }
1242
1243 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1244 DTRACE_IO1(start, buf_t, bp);
1245 buf_seterror(bp, error);
1246 buf_biodone(bp);
1247
1248 return (error);
1249 }
1250
1251 DTRACE_IO1(start, buf_t, bp);
1252 #if CONFIG_DTRACE
1253 dtrace_io_start_flag = 1;
1254 #endif /* CONFIG_DTRACE */
1255
1256 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1257 /* Set block number to force biodone later */
1258 bp->b_blkno = -1;
1259 buf_clear(bp);
1260 }
1261 else if ((long)contig_bytes < bp->b_bcount) {
1262 return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes));
1263 }
1264 }
1265
1266 #if CONFIG_DTRACE
1267 if (dtrace_io_start_flag == 0) {
1268 DTRACE_IO1(start, buf_t, bp);
1269 dtrace_io_start_flag = 1;
1270 }
1271 #endif /* CONFIG_DTRACE */
1272
1273 if (bp->b_blkno == -1) {
1274 buf_biodone(bp);
1275 return (0);
1276 }
1277 }
1278
1279 #if CONFIG_DTRACE
1280 if (dtrace_io_start_flag == 0)
1281 DTRACE_IO1(start, buf_t, bp);
1282 #endif /* CONFIG_DTRACE */
1283
1284 #if CONFIG_PROTECT
1285 /* Capture f_offset in the bufattr*/
1286 if (bp->b_attr.ba_cpentry != 0) {
1287 /* No need to go here for older EAs */
1288 if(bp->b_attr.ba_cpentry->cp_flags & CP_OFF_IV_ENABLED) {
1289 off_t f_offset;
1290 if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset)))
1291 return error;
1292
1293 /*
1294 * Attach the file offset to this buffer. The
1295 * bufattr attributes will be passed down the stack
1296 * until they reach IOFlashStorage. IOFlashStorage
1297 * will retain the offset in a local variable when it
1298 * issues its I/Os to the NAND controller.
1299 *
1300 * Note that LwVM may end up splitting this I/O
1301 * into sub-I/Os if it crosses a chunk boundary. In this
1302 * case, LwVM will update this field when it dispatches
1303 * each I/O to IOFlashStorage. But from our perspective
1304 * we have only issued a single I/O.
1305 */
1306 bufattr_setcpoff (&(bp->b_attr), (u_int64_t)f_offset);
1307 }
1308 }
1309 #endif
1310
1311 /*
1312 * we can issue the I/O because...
1313 * either B_CLUSTER is set which
1314 * means that the I/O is properly set
1315 * up to be a multiple of the page size, or
1316 * we were able to successfully set up the
1317 * phsyical block mapping
1318 */
1319 return (VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap));
1320 }
1321
1322
1323
1324 buf_t
1325 buf_alloc(vnode_t vp)
1326 {
1327 return(alloc_io_buf(vp, 0));
1328 }
1329
1330 void
1331 buf_free(buf_t bp) {
1332
1333 free_io_buf(bp);
1334 }
1335
1336
1337 /*
1338 * iterate buffers for the specified vp.
1339 * if BUF_SCAN_DIRTY is set, do the dirty list
1340 * if BUF_SCAN_CLEAN is set, do the clean list
1341 * if neither flag is set, default to BUF_SCAN_DIRTY
1342 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1343 */
1344
1345 struct buf_iterate_info_t {
1346 int flag;
1347 struct buflists *listhead;
1348 };
1349
1350 void
1351 buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1352 {
1353 buf_t bp;
1354 int retval;
1355 struct buflists local_iterblkhd;
1356 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1357 int notify_busy = flags & BUF_NOTIFY_BUSY;
1358 struct buf_iterate_info_t list[2];
1359 int num_lists, i;
1360
1361 if (flags & BUF_SKIP_LOCKED)
1362 lock_flags |= BAC_SKIP_LOCKED;
1363 if (flags & BUF_SKIP_NONLOCKED)
1364 lock_flags |= BAC_SKIP_NONLOCKED;
1365
1366 if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN)))
1367 flags |= BUF_SCAN_DIRTY;
1368
1369 num_lists = 0;
1370
1371 if (flags & BUF_SCAN_DIRTY) {
1372 list[num_lists].flag = VBI_DIRTY;
1373 list[num_lists].listhead = &vp->v_dirtyblkhd;
1374 num_lists++;
1375 }
1376 if (flags & BUF_SCAN_CLEAN) {
1377 list[num_lists].flag = VBI_CLEAN;
1378 list[num_lists].listhead = &vp->v_cleanblkhd;
1379 num_lists++;
1380 }
1381
1382 for (i = 0; i < num_lists; i++) {
1383 lck_mtx_lock(buf_mtxp);
1384
1385 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1386 lck_mtx_unlock(buf_mtxp);
1387 continue;
1388 }
1389 while (!LIST_EMPTY(&local_iterblkhd)) {
1390 bp = LIST_FIRST(&local_iterblkhd);
1391 LIST_REMOVE(bp, b_vnbufs);
1392 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1393
1394 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1395 if (notify_busy) {
1396 bp = NULL;
1397 } else {
1398 continue;
1399 }
1400 }
1401
1402 lck_mtx_unlock(buf_mtxp);
1403
1404 retval = callout(bp, arg);
1405
1406 switch (retval) {
1407 case BUF_RETURNED:
1408 if (bp)
1409 buf_brelse(bp);
1410 break;
1411 case BUF_CLAIMED:
1412 break;
1413 case BUF_RETURNED_DONE:
1414 if (bp)
1415 buf_brelse(bp);
1416 lck_mtx_lock(buf_mtxp);
1417 goto out;
1418 case BUF_CLAIMED_DONE:
1419 lck_mtx_lock(buf_mtxp);
1420 goto out;
1421 }
1422 lck_mtx_lock(buf_mtxp);
1423 } /* while list has more nodes */
1424 out:
1425 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1426 lck_mtx_unlock(buf_mtxp);
1427 } /* for each list */
1428 } /* buf_iterate */
1429
1430
1431 /*
1432 * Flush out and invalidate all buffers associated with a vnode.
1433 */
1434 int
1435 buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1436 {
1437 buf_t bp;
1438 int aflags;
1439 int error = 0;
1440 int must_rescan = 1;
1441 struct buflists local_iterblkhd;
1442
1443
1444 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1445 return (0);
1446
1447 lck_mtx_lock(buf_mtxp);
1448
1449 for (;;) {
1450 if (must_rescan == 0)
1451 /*
1452 * the lists may not be empty, but all that's left at this
1453 * point are metadata or B_LOCKED buffers which are being
1454 * skipped... we know this because we made it through both
1455 * the clean and dirty lists without dropping buf_mtxp...
1456 * each time we drop buf_mtxp we bump "must_rescan"
1457 */
1458 break;
1459 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd))
1460 break;
1461 must_rescan = 0;
1462 /*
1463 * iterate the clean list
1464 */
1465 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1466 goto try_dirty_list;
1467 }
1468 while (!LIST_EMPTY(&local_iterblkhd)) {
1469
1470 bp = LIST_FIRST(&local_iterblkhd);
1471
1472 LIST_REMOVE(bp, b_vnbufs);
1473 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1474
1475 /*
1476 * some filesystems distinguish meta data blocks with a negative logical block #
1477 */
1478 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1479 continue;
1480
1481 aflags = BAC_REMOVE;
1482
1483 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1484 aflags |= BAC_SKIP_LOCKED;
1485
1486 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
1487 if (error == EDEADLK)
1488 /*
1489 * this buffer was marked B_LOCKED...
1490 * we didn't drop buf_mtxp, so we
1491 * we don't need to rescan
1492 */
1493 continue;
1494 if (error == EAGAIN) {
1495 /*
1496 * found a busy buffer... we blocked and
1497 * dropped buf_mtxp, so we're going to
1498 * need to rescan after this pass is completed
1499 */
1500 must_rescan++;
1501 continue;
1502 }
1503 /*
1504 * got some kind of 'real' error out of the msleep
1505 * in buf_acquire_locked, terminate the scan and return the error
1506 */
1507 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1508
1509 lck_mtx_unlock(buf_mtxp);
1510 return (error);
1511 }
1512 lck_mtx_unlock(buf_mtxp);
1513
1514 if (bp->b_flags & B_LOCKED)
1515 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1516
1517 CLR(bp->b_flags, B_LOCKED);
1518 SET(bp->b_flags, B_INVAL);
1519 buf_brelse(bp);
1520
1521 lck_mtx_lock(buf_mtxp);
1522
1523 /*
1524 * by dropping buf_mtxp, we allow new
1525 * buffers to be added to the vnode list(s)
1526 * we'll have to rescan at least once more
1527 * if the queues aren't empty
1528 */
1529 must_rescan++;
1530 }
1531 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1532
1533 try_dirty_list:
1534 /*
1535 * Now iterate on dirty blks
1536 */
1537 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1538 continue;
1539 }
1540 while (!LIST_EMPTY(&local_iterblkhd)) {
1541 bp = LIST_FIRST(&local_iterblkhd);
1542
1543 LIST_REMOVE(bp, b_vnbufs);
1544 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1545
1546 /*
1547 * some filesystems distinguish meta data blocks with a negative logical block #
1548 */
1549 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META)))
1550 continue;
1551
1552 aflags = BAC_REMOVE;
1553
1554 if ( !(flags & BUF_INVALIDATE_LOCKED) )
1555 aflags |= BAC_SKIP_LOCKED;
1556
1557 if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) {
1558 if (error == EDEADLK)
1559 /*
1560 * this buffer was marked B_LOCKED...
1561 * we didn't drop buf_mtxp, so we
1562 * we don't need to rescan
1563 */
1564 continue;
1565 if (error == EAGAIN) {
1566 /*
1567 * found a busy buffer... we blocked and
1568 * dropped buf_mtxp, so we're going to
1569 * need to rescan after this pass is completed
1570 */
1571 must_rescan++;
1572 continue;
1573 }
1574 /*
1575 * got some kind of 'real' error out of the msleep
1576 * in buf_acquire_locked, terminate the scan and return the error
1577 */
1578 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1579
1580 lck_mtx_unlock(buf_mtxp);
1581 return (error);
1582 }
1583 lck_mtx_unlock(buf_mtxp);
1584
1585 if (bp->b_flags & B_LOCKED)
1586 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1587
1588 CLR(bp->b_flags, B_LOCKED);
1589 SET(bp->b_flags, B_INVAL);
1590
1591 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA))
1592 (void) VNOP_BWRITE(bp);
1593 else
1594 buf_brelse(bp);
1595
1596 lck_mtx_lock(buf_mtxp);
1597 /*
1598 * by dropping buf_mtxp, we allow new
1599 * buffers to be added to the vnode list(s)
1600 * we'll have to rescan at least once more
1601 * if the queues aren't empty
1602 */
1603 must_rescan++;
1604 }
1605 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1606 }
1607 lck_mtx_unlock(buf_mtxp);
1608
1609 return (0);
1610 }
1611
1612 void
1613 buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) {
1614
1615 (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1616 return;
1617 }
1618
1619 int
1620 buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) {
1621 buf_t bp;
1622 int writes_issued = 0;
1623 errno_t error;
1624 int busy = 0;
1625 struct buflists local_iterblkhd;
1626 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1627 int any_locked = 0;
1628
1629 if (flags & BUF_SKIP_LOCKED)
1630 lock_flags |= BAC_SKIP_LOCKED;
1631 if (flags & BUF_SKIP_NONLOCKED)
1632 lock_flags |= BAC_SKIP_NONLOCKED;
1633 loop:
1634 lck_mtx_lock(buf_mtxp);
1635
1636 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1637 while (!LIST_EMPTY(&local_iterblkhd)) {
1638 bp = LIST_FIRST(&local_iterblkhd);
1639 LIST_REMOVE(bp, b_vnbufs);
1640 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1641
1642 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1643 busy++;
1644 }
1645 if (error) {
1646 /*
1647 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1648 * we may want to do somethign differently if a locked or unlocked
1649 * buffer was encountered (depending on the arg specified).
1650 * In this case, we know that one of those two was set, and the
1651 * buf acquisition failed above.
1652 *
1653 * If it failed with EDEADLK, then save state which can be emitted
1654 * later on to the caller. Most callers should not care.
1655 */
1656 if (error == EDEADLK) {
1657 any_locked++;
1658 }
1659 continue;
1660 }
1661 lck_mtx_unlock(buf_mtxp);
1662
1663 bp->b_flags &= ~B_LOCKED;
1664
1665 /*
1666 * Wait for I/O associated with indirect blocks to complete,
1667 * since there is no way to quickly wait for them below.
1668 */
1669 if ((bp->b_vp == vp) || (wait == 0))
1670 (void) buf_bawrite(bp);
1671 else
1672 (void) VNOP_BWRITE(bp);
1673 writes_issued++;
1674
1675 lck_mtx_lock(buf_mtxp);
1676 }
1677 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1678 }
1679 lck_mtx_unlock(buf_mtxp);
1680
1681 if (wait) {
1682 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1683
1684 if (vp->v_dirtyblkhd.lh_first && busy) {
1685 /*
1686 * we had one or more BUSY buffers on
1687 * the dirtyblock list... most likely
1688 * these are due to delayed writes that
1689 * were moved to the bclean queue but
1690 * have not yet been 'written'.
1691 * if we issued some writes on the
1692 * previous pass, we try again immediately
1693 * if we didn't, we'll sleep for some time
1694 * to allow the state to change...
1695 */
1696 if (writes_issued == 0) {
1697 (void)tsleep((caddr_t)&vp->v_numoutput,
1698 PRIBIO + 1, "vnode_flushdirtyblks", hz/20);
1699 }
1700 writes_issued = 0;
1701 busy = 0;
1702
1703 goto loop;
1704 }
1705 }
1706
1707 return any_locked;
1708 }
1709
1710
1711 /*
1712 * called with buf_mtxp held...
1713 * this lock protects the queue manipulation
1714 */
1715 static int
1716 buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1717 {
1718 struct buflists * listheadp;
1719
1720 if (flags & VBI_DIRTY)
1721 listheadp = &vp->v_dirtyblkhd;
1722 else
1723 listheadp = &vp->v_cleanblkhd;
1724
1725 while (vp->v_iterblkflags & VBI_ITER) {
1726 vp->v_iterblkflags |= VBI_ITERWANT;
1727 msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
1728 }
1729 if (LIST_EMPTY(listheadp)) {
1730 LIST_INIT(iterheadp);
1731 return(EINVAL);
1732 }
1733 vp->v_iterblkflags |= VBI_ITER;
1734
1735 iterheadp->lh_first = listheadp->lh_first;
1736 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1737 LIST_INIT(listheadp);
1738
1739 return(0);
1740 }
1741
1742 /*
1743 * called with buf_mtxp held...
1744 * this lock protects the queue manipulation
1745 */
1746 static void
1747 buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1748 {
1749 struct buflists * listheadp;
1750 buf_t bp;
1751
1752 if (flags & VBI_DIRTY)
1753 listheadp = &vp->v_dirtyblkhd;
1754 else
1755 listheadp = &vp->v_cleanblkhd;
1756
1757 while (!LIST_EMPTY(iterheadp)) {
1758 bp = LIST_FIRST(iterheadp);
1759 LIST_REMOVE(bp, b_vnbufs);
1760 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1761 }
1762 vp->v_iterblkflags &= ~VBI_ITER;
1763
1764 if (vp->v_iterblkflags & VBI_ITERWANT) {
1765 vp->v_iterblkflags &= ~VBI_ITERWANT;
1766 wakeup(&vp->v_iterblkflags);
1767 }
1768 }
1769
1770
1771 static void
1772 bremfree_locked(buf_t bp)
1773 {
1774 struct bqueues *dp = NULL;
1775 int whichq;
1776
1777 whichq = bp->b_whichq;
1778
1779 if (whichq == -1) {
1780 if (bp->b_shadow_ref == 0)
1781 panic("bremfree_locked: %p not on freelist", bp);
1782 /*
1783 * there are clones pointing to 'bp'...
1784 * therefore, it was not put on a freelist
1785 * when buf_brelse was last called on 'bp'
1786 */
1787 return;
1788 }
1789 /*
1790 * We only calculate the head of the freelist when removing
1791 * the last element of the list as that is the only time that
1792 * it is needed (e.g. to reset the tail pointer).
1793 *
1794 * NB: This makes an assumption about how tailq's are implemented.
1795 */
1796 if (bp->b_freelist.tqe_next == NULL) {
1797 dp = &bufqueues[whichq];
1798
1799 if (dp->tqh_last != &bp->b_freelist.tqe_next)
1800 panic("bremfree: lost tail");
1801 }
1802 TAILQ_REMOVE(dp, bp, b_freelist);
1803
1804 #if BALANCE_QUEUES
1805 bufqdec(whichq);
1806 #endif
1807 if (whichq == BQ_LAUNDRY)
1808 blaundrycnt--;
1809
1810 bp->b_whichq = -1;
1811 bp->b_timestamp = 0;
1812 bp->b_shadow = 0;
1813 }
1814
1815 /*
1816 * Associate a buffer with a vnode.
1817 * buf_mtxp must be locked on entry
1818 */
1819 static void
1820 bgetvp_locked(vnode_t vp, buf_t bp)
1821 {
1822
1823 if (bp->b_vp != vp)
1824 panic("bgetvp_locked: not free");
1825
1826 if (vp->v_type == VBLK || vp->v_type == VCHR)
1827 bp->b_dev = vp->v_rdev;
1828 else
1829 bp->b_dev = NODEV;
1830 /*
1831 * Insert onto list for new vnode.
1832 */
1833 bufinsvn(bp, &vp->v_cleanblkhd);
1834 }
1835
1836 /*
1837 * Disassociate a buffer from a vnode.
1838 * buf_mtxp must be locked on entry
1839 */
1840 static void
1841 brelvp_locked(buf_t bp)
1842 {
1843 /*
1844 * Delete from old vnode list, if on one.
1845 */
1846 if (bp->b_vnbufs.le_next != NOLIST)
1847 bufremvn(bp);
1848
1849 bp->b_vp = (vnode_t)NULL;
1850 }
1851
1852 /*
1853 * Reassign a buffer from one vnode to another.
1854 * Used to assign file specific control information
1855 * (indirect blocks) to the vnode to which they belong.
1856 */
1857 static void
1858 buf_reassign(buf_t bp, vnode_t newvp)
1859 {
1860 struct buflists *listheadp;
1861
1862 if (newvp == NULL) {
1863 printf("buf_reassign: NULL");
1864 return;
1865 }
1866 lck_mtx_lock_spin(buf_mtxp);
1867
1868 /*
1869 * Delete from old vnode list, if on one.
1870 */
1871 if (bp->b_vnbufs.le_next != NOLIST)
1872 bufremvn(bp);
1873 /*
1874 * If dirty, put on list of dirty buffers;
1875 * otherwise insert onto list of clean buffers.
1876 */
1877 if (ISSET(bp->b_flags, B_DELWRI))
1878 listheadp = &newvp->v_dirtyblkhd;
1879 else
1880 listheadp = &newvp->v_cleanblkhd;
1881 bufinsvn(bp, listheadp);
1882
1883 lck_mtx_unlock(buf_mtxp);
1884 }
1885
1886 static __inline__ void
1887 bufhdrinit(buf_t bp)
1888 {
1889 bzero((char *)bp, sizeof *bp);
1890 bp->b_dev = NODEV;
1891 bp->b_rcred = NOCRED;
1892 bp->b_wcred = NOCRED;
1893 bp->b_vnbufs.le_next = NOLIST;
1894 bp->b_flags = B_INVAL;
1895
1896 return;
1897 }
1898
1899 /*
1900 * Initialize buffers and hash links for buffers.
1901 */
1902 __private_extern__ void
1903 bufinit(void)
1904 {
1905 buf_t bp;
1906 struct bqueues *dp;
1907 int i;
1908
1909 nbuf_headers = 0;
1910 /* Initialize the buffer queues ('freelists') and the hash table */
1911 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
1912 TAILQ_INIT(dp);
1913 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
1914
1915 buf_busycount = 0;
1916
1917 /* Initialize the buffer headers */
1918 for (i = 0; i < max_nbuf_headers; i++) {
1919 nbuf_headers++;
1920 bp = &buf_headers[i];
1921 bufhdrinit(bp);
1922
1923 BLISTNONE(bp);
1924 dp = &bufqueues[BQ_EMPTY];
1925 bp->b_whichq = BQ_EMPTY;
1926 bp->b_timestamp = buf_timestamp();
1927 binsheadfree(bp, dp, BQ_EMPTY);
1928 binshash(bp, &invalhash);
1929 }
1930 boot_nbuf_headers = nbuf_headers;
1931
1932 TAILQ_INIT(&iobufqueue);
1933 TAILQ_INIT(&delaybufqueue);
1934
1935 for (; i < nbuf_headers + niobuf_headers; i++) {
1936 bp = &buf_headers[i];
1937 bufhdrinit(bp);
1938 bp->b_whichq = -1;
1939 binsheadfree(bp, &iobufqueue, -1);
1940 }
1941
1942 /*
1943 * allocate lock group attribute and group
1944 */
1945 buf_mtx_grp_attr = lck_grp_attr_alloc_init();
1946 buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
1947
1948 /*
1949 * allocate the lock attribute
1950 */
1951 buf_mtx_attr = lck_attr_alloc_init();
1952
1953 /*
1954 * allocate and initialize mutex's for the buffer and iobuffer pools
1955 */
1956 buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1957 iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
1958
1959 if (iobuffer_mtxp == NULL)
1960 panic("couldn't create iobuffer mutex");
1961
1962 if (buf_mtxp == NULL)
1963 panic("couldn't create buf mutex");
1964
1965 /*
1966 * allocate and initialize cluster specific global locks...
1967 */
1968 cluster_init();
1969
1970 printf("using %d buffer headers and %d cluster IO buffer headers\n",
1971 nbuf_headers, niobuf_headers);
1972
1973 /* Set up zones used by the buffer cache */
1974 bufzoneinit();
1975
1976 /* start the bcleanbuf() thread */
1977 bcleanbuf_thread_init();
1978
1979 /* Register a callout for relieving vm pressure */
1980 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
1981 panic("Couldn't register buffer cache callout for vm pressure!\n");
1982 }
1983
1984 #if BALANCE_QUEUES
1985 {
1986 static void bufq_balance_thread_init(void) __attribute__((section("__TEXT, initcode")));
1987 /* create a thread to do dynamic buffer queue balancing */
1988 bufq_balance_thread_init();
1989 }
1990 #endif /* notyet */
1991 }
1992
1993
1994
1995 /*
1996 * Zones for the meta data buffers
1997 */
1998
1999 #define MINMETA 512
2000 #define MAXMETA 8192
2001
2002 struct meta_zone_entry {
2003 zone_t mz_zone;
2004 vm_size_t mz_size;
2005 vm_size_t mz_max;
2006 const char *mz_name;
2007 };
2008
2009 struct meta_zone_entry meta_zones[] = {
2010 {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" },
2011 {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" },
2012 {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" },
2013 {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
2014 {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" },
2015 {NULL, 0, 0, "" } /* End */
2016 };
2017
2018 /*
2019 * Initialize the meta data zones
2020 */
2021 static void
2022 bufzoneinit(void)
2023 {
2024 int i;
2025
2026 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2027 meta_zones[i].mz_zone =
2028 zinit(meta_zones[i].mz_size,
2029 meta_zones[i].mz_max,
2030 PAGE_SIZE,
2031 meta_zones[i].mz_name);
2032 zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE);
2033 }
2034 buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
2035 zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE);
2036 }
2037
2038 static __inline__ zone_t
2039 getbufzone(size_t size)
2040 {
2041 int i;
2042
2043 if ((size % 512) || (size < MINMETA) || (size > MAXMETA))
2044 panic("getbufzone: incorect size = %lu", size);
2045
2046 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2047 if (meta_zones[i].mz_size >= size)
2048 break;
2049 }
2050
2051 return (meta_zones[i].mz_zone);
2052 }
2053
2054
2055
2056 static struct buf *
2057 bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
2058 {
2059 buf_t bp;
2060
2061 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2062
2063 /*
2064 * If buffer does not have data valid, start a read.
2065 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2066 * Therefore, it's valid if it's I/O has completed or been delayed.
2067 */
2068 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2069 struct proc *p;
2070
2071 p = current_proc();
2072
2073 /* Start I/O for the buffer (keeping credentials). */
2074 SET(bp->b_flags, B_READ | async);
2075 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2076 kauth_cred_ref(cred);
2077 bp->b_rcred = cred;
2078 }
2079
2080 VNOP_STRATEGY(bp);
2081
2082 trace(TR_BREADMISS, pack(vp, size), blkno);
2083
2084 /* Pay for the read. */
2085 if (p && p->p_stats)
2086 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
2087
2088 if (async) {
2089 /*
2090 * since we asked for an ASYNC I/O
2091 * the biodone will do the brelse
2092 * we don't want to pass back a bp
2093 * that we don't 'own'
2094 */
2095 bp = NULL;
2096 }
2097 } else if (async) {
2098 buf_brelse(bp);
2099 bp = NULL;
2100 }
2101
2102 trace(TR_BREADHIT, pack(vp, size), blkno);
2103
2104 return (bp);
2105 }
2106
2107 /*
2108 * Perform the reads for buf_breadn() and buf_meta_breadn().
2109 * Trivial modification to the breada algorithm presented in Bach (p.55).
2110 */
2111 static errno_t
2112 do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
2113 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
2114 {
2115 buf_t bp;
2116 int i;
2117
2118 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2119
2120 /*
2121 * For each of the read-ahead blocks, start a read, if necessary.
2122 */
2123 for (i = 0; i < nrablks; i++) {
2124 /* If it's in the cache, just go on to next one. */
2125 if (incore(vp, rablks[i]))
2126 continue;
2127
2128 /* Get a buffer for the read-ahead block */
2129 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
2130 }
2131
2132 /* Otherwise, we had to start a read for it; wait until it's valid. */
2133 return (buf_biowait(bp));
2134 }
2135
2136
2137 /*
2138 * Read a disk block.
2139 * This algorithm described in Bach (p.54).
2140 */
2141 errno_t
2142 buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2143 {
2144 buf_t bp;
2145
2146 /* Get buffer for block. */
2147 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2148
2149 /* Wait for the read to complete, and return result. */
2150 return (buf_biowait(bp));
2151 }
2152
2153 /*
2154 * Read a disk block. [bread() for meta-data]
2155 * This algorithm described in Bach (p.54).
2156 */
2157 errno_t
2158 buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2159 {
2160 buf_t bp;
2161
2162 /* Get buffer for block. */
2163 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2164
2165 /* Wait for the read to complete, and return result. */
2166 return (buf_biowait(bp));
2167 }
2168
2169 /*
2170 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2171 */
2172 errno_t
2173 buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2174 {
2175 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ));
2176 }
2177
2178 /*
2179 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2180 * [buf_breadn() for meta-data]
2181 */
2182 errno_t
2183 buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2184 {
2185 return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META));
2186 }
2187
2188 /*
2189 * Block write. Described in Bach (p.56)
2190 */
2191 errno_t
2192 buf_bwrite(buf_t bp)
2193 {
2194 int sync, wasdelayed;
2195 errno_t rv;
2196 proc_t p = current_proc();
2197 vnode_t vp = bp->b_vp;
2198
2199 if (bp->b_datap == 0) {
2200 if (brecover_data(bp) == 0)
2201 return (0);
2202 }
2203 /* Remember buffer type, to switch on it later. */
2204 sync = !ISSET(bp->b_flags, B_ASYNC);
2205 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2206 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2207
2208 if (wasdelayed)
2209 OSAddAtomicLong(-1, &nbdwrite);
2210
2211 if (!sync) {
2212 /*
2213 * If not synchronous, pay for the I/O operation and make
2214 * sure the buf is on the correct vnode queue. We have
2215 * to do this now, because if we don't, the vnode may not
2216 * be properly notified that its I/O has completed.
2217 */
2218 if (wasdelayed)
2219 buf_reassign(bp, vp);
2220 else
2221 if (p && p->p_stats)
2222 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2223 }
2224 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2225
2226 /* Initiate disk write. Make sure the appropriate party is charged. */
2227
2228 OSAddAtomic(1, &vp->v_numoutput);
2229
2230 VNOP_STRATEGY(bp);
2231
2232 if (sync) {
2233 /*
2234 * If I/O was synchronous, wait for it to complete.
2235 */
2236 rv = buf_biowait(bp);
2237
2238 /*
2239 * Pay for the I/O operation, if it's not been paid for, and
2240 * make sure it's on the correct vnode queue. (async operatings
2241 * were payed for above.)
2242 */
2243 if (wasdelayed)
2244 buf_reassign(bp, vp);
2245 else
2246 if (p && p->p_stats)
2247 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2248
2249 /* Release the buffer. */
2250 // XXXdbg - only if the unused bit is set
2251 if (!ISSET(bp->b_flags, B_NORELSE)) {
2252 buf_brelse(bp);
2253 } else {
2254 CLR(bp->b_flags, B_NORELSE);
2255 }
2256
2257 return (rv);
2258 } else {
2259 return (0);
2260 }
2261 }
2262
2263 int
2264 vn_bwrite(struct vnop_bwrite_args *ap)
2265 {
2266 return (buf_bwrite(ap->a_bp));
2267 }
2268
2269 /*
2270 * Delayed write.
2271 *
2272 * The buffer is marked dirty, but is not queued for I/O.
2273 * This routine should be used when the buffer is expected
2274 * to be modified again soon, typically a small write that
2275 * partially fills a buffer.
2276 *
2277 * NB: magnetic tapes cannot be delayed; they must be
2278 * written in the order that the writes are requested.
2279 *
2280 * Described in Leffler, et al. (pp. 208-213).
2281 *
2282 * Note: With the ability to allocate additional buffer
2283 * headers, we can get in to the situation where "too" many
2284 * buf_bdwrite()s can create situation where the kernel can create
2285 * buffers faster than the disks can service. Doing a buf_bawrite() in
2286 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
2287 */
2288 __private_extern__ int
2289 bdwrite_internal(buf_t bp, int return_error)
2290 {
2291 proc_t p = current_proc();
2292 vnode_t vp = bp->b_vp;
2293
2294 /*
2295 * If the block hasn't been seen before:
2296 * (1) Mark it as having been seen,
2297 * (2) Charge for the write.
2298 * (3) Make sure it's on its vnode's correct block list,
2299 */
2300 if (!ISSET(bp->b_flags, B_DELWRI)) {
2301 SET(bp->b_flags, B_DELWRI);
2302 if (p && p->p_stats)
2303 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2304 OSAddAtomicLong(1, &nbdwrite);
2305 buf_reassign(bp, vp);
2306 }
2307
2308 /*
2309 * if we're not LOCKED, but the total number of delayed writes
2310 * has climbed above 75% of the total buffers in the system
2311 * return an error if the caller has indicated that it can
2312 * handle one in this case, otherwise schedule the I/O now
2313 * this is done to prevent us from allocating tons of extra
2314 * buffers when dealing with virtual disks (i.e. DiskImages),
2315 * because additional buffers are dynamically allocated to prevent
2316 * deadlocks from occurring
2317 *
2318 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2319 * buffer is part of a transaction and can't go to disk until
2320 * the LOCKED bit is cleared.
2321 */
2322 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) {
2323 if (return_error)
2324 return (EAGAIN);
2325 /*
2326 * If the vnode has "too many" write operations in progress
2327 * wait for them to finish the IO
2328 */
2329 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
2330
2331 return (buf_bawrite(bp));
2332 }
2333
2334 /* Otherwise, the "write" is done, so mark and release the buffer. */
2335 SET(bp->b_flags, B_DONE);
2336 buf_brelse(bp);
2337 return (0);
2338 }
2339
2340 errno_t
2341 buf_bdwrite(buf_t bp)
2342 {
2343 return (bdwrite_internal(bp, 0));
2344 }
2345
2346
2347 /*
2348 * Asynchronous block write; just an asynchronous buf_bwrite().
2349 *
2350 * Note: With the abilitty to allocate additional buffer
2351 * headers, we can get in to the situation where "too" many
2352 * buf_bawrite()s can create situation where the kernel can create
2353 * buffers faster than the disks can service.
2354 * We limit the number of "in flight" writes a vnode can have to
2355 * avoid this.
2356 */
2357 static int
2358 bawrite_internal(buf_t bp, int throttle)
2359 {
2360 vnode_t vp = bp->b_vp;
2361
2362 if (vp) {
2363 if (throttle)
2364 /*
2365 * If the vnode has "too many" write operations in progress
2366 * wait for them to finish the IO
2367 */
2368 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2369 else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE)
2370 /*
2371 * return to the caller and
2372 * let him decide what to do
2373 */
2374 return (EWOULDBLOCK);
2375 }
2376 SET(bp->b_flags, B_ASYNC);
2377
2378 return (VNOP_BWRITE(bp));
2379 }
2380
2381 errno_t
2382 buf_bawrite(buf_t bp)
2383 {
2384 return (bawrite_internal(bp, 1));
2385 }
2386
2387
2388
2389 static void
2390 buf_free_meta_store(buf_t bp)
2391 {
2392 if (bp->b_bufsize) {
2393 if (ISSET(bp->b_flags, B_ZALLOC)) {
2394 zone_t z;
2395
2396 z = getbufzone(bp->b_bufsize);
2397 zfree(z, (void *)bp->b_datap);
2398 } else
2399 kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
2400
2401 bp->b_datap = (uintptr_t)NULL;
2402 bp->b_bufsize = 0;
2403 }
2404 }
2405
2406
2407 static buf_t
2408 buf_brelse_shadow(buf_t bp)
2409 {
2410 buf_t bp_head;
2411 buf_t bp_temp;
2412 buf_t bp_return = NULL;
2413 #ifdef BUF_MAKE_PRIVATE
2414 buf_t bp_data;
2415 int data_ref = 0;
2416 #endif
2417 int need_wakeup = 0;
2418
2419 lck_mtx_lock_spin(buf_mtxp);
2420
2421 bp_head = (buf_t)bp->b_orig;
2422
2423 if (bp_head->b_whichq != -1)
2424 panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
2425
2426 #ifdef BUF_MAKE_PRIVATE
2427 if (bp_data = bp->b_data_store) {
2428 bp_data->b_data_ref--;
2429 /*
2430 * snapshot the ref count so that we can check it
2431 * outside of the lock... we only want the guy going
2432 * from 1 -> 0 to try and release the storage
2433 */
2434 data_ref = bp_data->b_data_ref;
2435 }
2436 #endif
2437 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2438
2439 bp_head->b_shadow_ref--;
2440
2441 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow);
2442
2443 if (bp_temp == NULL)
2444 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2445
2446 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2447
2448 #ifdef BUF_MAKE_PRIVATE
2449 /*
2450 * we're about to free the current 'owner' of the data buffer and
2451 * there is at least one other shadow buf_t still pointing at it
2452 * so transfer it to the first shadow buf left in the chain
2453 */
2454 if (bp == bp_data && data_ref) {
2455 if ((bp_data = bp_head->b_shadow) == NULL)
2456 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2457
2458 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow)
2459 bp_temp->b_data_store = bp_data;
2460 bp_data->b_data_ref = data_ref;
2461 }
2462 #endif
2463 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow)
2464 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2465 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0)
2466 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2467
2468 if (bp_head->b_shadow_ref == 0) {
2469 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2470
2471 CLR(bp_head->b_flags, B_AGE);
2472 bp_head->b_timestamp = buf_timestamp();
2473
2474 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2475 bp_head->b_whichq = BQ_LOCKED;
2476 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2477 } else {
2478 bp_head->b_whichq = BQ_META;
2479 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2480 }
2481 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2482 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2483
2484 bp_return = bp_head;
2485 }
2486 if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2487 CLR(bp_head->b_lflags, BL_WANTED_REF);
2488 need_wakeup = 1;
2489 }
2490 }
2491 lck_mtx_unlock(buf_mtxp);
2492
2493 if (need_wakeup) {
2494 wakeup(bp_head);
2495 }
2496
2497 #ifdef BUF_MAKE_PRIVATE
2498 if (bp == bp_data && data_ref == 0)
2499 buf_free_meta_store(bp);
2500
2501 bp->b_data_store = NULL;
2502 #endif
2503 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2504
2505 return (bp_return);
2506 }
2507
2508
2509 /*
2510 * Release a buffer on to the free lists.
2511 * Described in Bach (p. 46).
2512 */
2513 void
2514 buf_brelse(buf_t bp)
2515 {
2516 struct bqueues *bufq;
2517 long whichq;
2518 upl_t upl;
2519 int need_wakeup = 0;
2520 int need_bp_wakeup = 0;
2521
2522
2523 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY))
2524 panic("buf_brelse: bad buffer = %p\n", bp);
2525
2526 #ifdef JOE_DEBUG
2527 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2528
2529 bp->b_lastbrelse = current_thread();
2530 bp->b_tag = 0;
2531 #endif
2532 if (bp->b_lflags & BL_IOBUF) {
2533 buf_t shadow_master_bp = NULL;
2534
2535 if (ISSET(bp->b_lflags, BL_SHADOW))
2536 shadow_master_bp = buf_brelse_shadow(bp);
2537 else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC))
2538 buf_free_meta_store(bp);
2539 free_io_buf(bp);
2540
2541 if (shadow_master_bp) {
2542 bp = shadow_master_bp;
2543 goto finish_shadow_master;
2544 }
2545 return;
2546 }
2547
2548 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
2549 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2550 bp->b_flags, 0);
2551
2552 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2553
2554 /*
2555 * if we're invalidating a buffer that has the B_FILTER bit
2556 * set then call the b_iodone function so it gets cleaned
2557 * up properly.
2558 *
2559 * the HFS journal code depends on this
2560 */
2561 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2562 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2563 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2564 void *arg = bp->b_transaction;
2565
2566 CLR(bp->b_flags, B_FILTER); /* but note callout done */
2567 bp->b_iodone = NULL;
2568 bp->b_transaction = NULL;
2569
2570 if (iodone_func == NULL) {
2571 panic("brelse: bp @ %p has NULL b_iodone!\n", bp);
2572 }
2573 (*iodone_func)(bp, arg);
2574 }
2575 }
2576 /*
2577 * I/O is done. Cleanup the UPL state
2578 */
2579 upl = bp->b_upl;
2580
2581 if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2582 kern_return_t kret;
2583 int upl_flags;
2584
2585 if (upl == NULL) {
2586 if ( !ISSET(bp->b_flags, B_INVAL)) {
2587 kret = ubc_create_upl(bp->b_vp,
2588 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2589 bp->b_bufsize,
2590 &upl,
2591 NULL,
2592 UPL_PRECIOUS);
2593
2594 if (kret != KERN_SUCCESS)
2595 panic("brelse: Failed to create UPL");
2596 #if UPL_DEBUG
2597 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2598 #endif /* UPL_DEBUG */
2599 }
2600 } else {
2601 if (bp->b_datap) {
2602 kret = ubc_upl_unmap(upl);
2603
2604 if (kret != KERN_SUCCESS)
2605 panic("ubc_upl_unmap failed");
2606 bp->b_datap = (uintptr_t)NULL;
2607 }
2608 }
2609 if (upl) {
2610 if (bp->b_flags & (B_ERROR | B_INVAL)) {
2611 if (bp->b_flags & (B_READ | B_INVAL))
2612 upl_flags = UPL_ABORT_DUMP_PAGES;
2613 else
2614 upl_flags = 0;
2615
2616 ubc_upl_abort(upl, upl_flags);
2617 } else {
2618 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY))
2619 upl_flags = UPL_COMMIT_SET_DIRTY ;
2620 else
2621 upl_flags = UPL_COMMIT_CLEAR_DIRTY ;
2622
2623 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2624 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2625 }
2626 bp->b_upl = NULL;
2627 }
2628 } else {
2629 if ( (upl) )
2630 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2631 }
2632
2633 /*
2634 * If it's locked, don't report an error; try again later.
2635 */
2636 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
2637 CLR(bp->b_flags, B_ERROR);
2638 /*
2639 * If it's not cacheable, or an error, mark it invalid.
2640 */
2641 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
2642 SET(bp->b_flags, B_INVAL);
2643
2644 if ((bp->b_bufsize <= 0) ||
2645 ISSET(bp->b_flags, B_INVAL) ||
2646 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2647
2648 boolean_t delayed_buf_free_meta_store = FALSE;
2649
2650 /*
2651 * If it's invalid or empty, dissociate it from its vnode,
2652 * release its storage if B_META, and
2653 * clean it up a bit and put it on the EMPTY queue
2654 */
2655 if (ISSET(bp->b_flags, B_DELWRI))
2656 OSAddAtomicLong(-1, &nbdwrite);
2657
2658 if (ISSET(bp->b_flags, B_META)) {
2659 if (bp->b_shadow_ref)
2660 delayed_buf_free_meta_store = TRUE;
2661 else
2662 buf_free_meta_store(bp);
2663 }
2664 /*
2665 * nuke any credentials we were holding
2666 */
2667 buf_release_credentials(bp);
2668
2669 lck_mtx_lock_spin(buf_mtxp);
2670
2671 if (bp->b_shadow_ref) {
2672 SET(bp->b_lflags, BL_WAITSHADOW);
2673
2674 lck_mtx_unlock(buf_mtxp);
2675
2676 return;
2677 }
2678 if (delayed_buf_free_meta_store == TRUE) {
2679
2680 lck_mtx_unlock(buf_mtxp);
2681 finish_shadow_master:
2682 buf_free_meta_store(bp);
2683
2684 lck_mtx_lock_spin(buf_mtxp);
2685 }
2686 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2687
2688 if (bp->b_vp)
2689 brelvp_locked(bp);
2690
2691 bremhash(bp);
2692 BLISTNONE(bp);
2693 binshash(bp, &invalhash);
2694
2695 bp->b_whichq = BQ_EMPTY;
2696 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2697 } else {
2698
2699 /*
2700 * It has valid data. Put it on the end of the appropriate
2701 * queue, so that it'll stick around for as long as possible.
2702 */
2703 if (ISSET(bp->b_flags, B_LOCKED))
2704 whichq = BQ_LOCKED; /* locked in core */
2705 else if (ISSET(bp->b_flags, B_META))
2706 whichq = BQ_META; /* meta-data */
2707 else if (ISSET(bp->b_flags, B_AGE))
2708 whichq = BQ_AGE; /* stale but valid data */
2709 else
2710 whichq = BQ_LRU; /* valid data */
2711 bufq = &bufqueues[whichq];
2712
2713 bp->b_timestamp = buf_timestamp();
2714
2715 lck_mtx_lock_spin(buf_mtxp);
2716
2717 /*
2718 * the buf_brelse_shadow routine doesn't take 'ownership'
2719 * of the parent buf_t... it updates state that is protected by
2720 * the buf_mtxp, and checks for BL_BUSY to determine whether to
2721 * put the buf_t back on a free list. b_shadow_ref is protected
2722 * by the lock, and since we have not yet cleared B_BUSY, we need
2723 * to check it while holding the lock to insure that one of us
2724 * puts this buf_t back on a free list when it is safe to do so
2725 */
2726 if (bp->b_shadow_ref == 0) {
2727 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2728 bp->b_whichq = whichq;
2729 binstailfree(bp, bufq, whichq);
2730 } else {
2731 /*
2732 * there are still cloned buf_t's pointing
2733 * at this guy... need to keep it off the
2734 * freelists until a buf_brelse is done on
2735 * the last clone
2736 */
2737 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2738 }
2739 }
2740 if (needbuffer) {
2741 /*
2742 * needbuffer is a global
2743 * we're currently using buf_mtxp to protect it
2744 * delay doing the actual wakeup until after
2745 * we drop buf_mtxp
2746 */
2747 needbuffer = 0;
2748 need_wakeup = 1;
2749 }
2750 if (ISSET(bp->b_lflags, BL_WANTED)) {
2751 /*
2752 * delay the actual wakeup until after we
2753 * clear BL_BUSY and we've dropped buf_mtxp
2754 */
2755 need_bp_wakeup = 1;
2756 }
2757 /*
2758 * Unlock the buffer.
2759 */
2760 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2761 buf_busycount--;
2762
2763 lck_mtx_unlock(buf_mtxp);
2764
2765 if (need_wakeup) {
2766 /*
2767 * Wake up any processes waiting for any buffer to become free.
2768 */
2769 wakeup(&needbuffer);
2770 }
2771 if (need_bp_wakeup) {
2772 /*
2773 * Wake up any proceeses waiting for _this_ buffer to become free.
2774 */
2775 wakeup(bp);
2776 }
2777 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
2778 bp, bp->b_datap, bp->b_flags, 0, 0);
2779 }
2780
2781 /*
2782 * Determine if a block is in the cache.
2783 * Just look on what would be its hash chain. If it's there, return
2784 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2785 * we normally don't return the buffer, unless the caller explicitly
2786 * wants us to.
2787 */
2788 static boolean_t
2789 incore(vnode_t vp, daddr64_t blkno)
2790 {
2791 boolean_t retval;
2792 struct bufhashhdr *dp;
2793
2794 dp = BUFHASH(vp, blkno);
2795
2796 lck_mtx_lock_spin(buf_mtxp);
2797
2798 if (incore_locked(vp, blkno, dp))
2799 retval = TRUE;
2800 else
2801 retval = FALSE;
2802 lck_mtx_unlock(buf_mtxp);
2803
2804 return (retval);
2805 }
2806
2807
2808 static buf_t
2809 incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
2810 {
2811 struct buf *bp;
2812
2813 /* Search hash chain */
2814 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
2815 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
2816 !ISSET(bp->b_flags, B_INVAL)) {
2817 return (bp);
2818 }
2819 }
2820 return (NULL);
2821 }
2822
2823 void
2824 buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
2825 {
2826 buf_t bp;
2827 struct bufhashhdr *dp;
2828
2829 dp = BUFHASH(vp, blkno);
2830
2831 lck_mtx_lock_spin(buf_mtxp);
2832
2833 for (;;) {
2834 if ((bp = incore_locked(vp, blkno, dp)) == NULL)
2835 break;
2836
2837 if (bp->b_shadow_ref == 0)
2838 break;
2839
2840 SET(bp->b_lflags, BL_WANTED_REF);
2841
2842 (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL);
2843 }
2844 lck_mtx_unlock(buf_mtxp);
2845 }
2846
2847 /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
2848 /*
2849 * Get a block of requested size that is associated with
2850 * a given vnode and block offset. If it is found in the
2851 * block cache, mark it as having been found, make it busy
2852 * and return it. Otherwise, return an empty block of the
2853 * correct size. It is up to the caller to insure that the
2854 * cached blocks be of the correct size.
2855 */
2856 buf_t
2857 buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
2858 {
2859 buf_t bp;
2860 int err;
2861 upl_t upl;
2862 upl_page_info_t *pl;
2863 kern_return_t kret;
2864 int ret_only_valid;
2865 struct timespec ts;
2866 int upl_flags;
2867 struct bufhashhdr *dp;
2868
2869 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
2870 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
2871
2872 ret_only_valid = operation & BLK_ONLYVALID;
2873 operation &= ~BLK_ONLYVALID;
2874 dp = BUFHASH(vp, blkno);
2875 start:
2876 lck_mtx_lock_spin(buf_mtxp);
2877
2878 if ((bp = incore_locked(vp, blkno, dp))) {
2879 /*
2880 * Found in the Buffer Cache
2881 */
2882 if (ISSET(bp->b_lflags, BL_BUSY)) {
2883 /*
2884 * but is busy
2885 */
2886 switch (operation) {
2887 case BLK_READ:
2888 case BLK_WRITE:
2889 case BLK_META:
2890 SET(bp->b_lflags, BL_WANTED);
2891 bufstats.bufs_busyincore++;
2892
2893 /*
2894 * don't retake the mutex after being awakened...
2895 * the time out is in msecs
2896 */
2897 ts.tv_sec = (slptimeo/1000);
2898 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
2899
2900 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
2901 (uintptr_t)blkno, size, operation, 0, 0);
2902
2903 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
2904
2905 /*
2906 * Callers who call with PCATCH or timeout are
2907 * willing to deal with the NULL pointer
2908 */
2909 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo)))
2910 return (NULL);
2911 goto start;
2912 /*NOTREACHED*/
2913 break;
2914
2915 default:
2916 /*
2917 * unknown operation requested
2918 */
2919 panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
2920 /*NOTREACHED*/
2921 break;
2922 }
2923 } else {
2924 /*
2925 * buffer in core and not busy
2926 */
2927 SET(bp->b_lflags, BL_BUSY);
2928 SET(bp->b_flags, B_CACHE);
2929 buf_busycount++;
2930
2931 bremfree_locked(bp);
2932 bufstats.bufs_incore++;
2933
2934 lck_mtx_unlock(buf_mtxp);
2935 #ifdef JOE_DEBUG
2936 bp->b_owner = current_thread();
2937 bp->b_tag = 1;
2938 #endif
2939 if ( (bp->b_upl) )
2940 panic("buffer has UPL, but not marked BUSY: %p", bp);
2941
2942 if ( !ret_only_valid && bp->b_bufsize != size)
2943 allocbuf(bp, size);
2944
2945 upl_flags = 0;
2946 switch (operation) {
2947 case BLK_WRITE:
2948 /*
2949 * "write" operation: let the UPL subsystem
2950 * know that we intend to modify the buffer
2951 * cache pages we're gathering.
2952 */
2953 upl_flags |= UPL_WILL_MODIFY;
2954 case BLK_READ:
2955 upl_flags |= UPL_PRECIOUS;
2956 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2957 kret = ubc_create_upl(vp,
2958 ubc_blktooff(vp, bp->b_lblkno),
2959 bp->b_bufsize,
2960 &upl,
2961 &pl,
2962 upl_flags);
2963 if (kret != KERN_SUCCESS)
2964 panic("Failed to create UPL");
2965
2966 bp->b_upl = upl;
2967
2968 if (upl_valid_page(pl, 0)) {
2969 if (upl_dirty_page(pl, 0))
2970 SET(bp->b_flags, B_WASDIRTY);
2971 else
2972 CLR(bp->b_flags, B_WASDIRTY);
2973 } else
2974 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
2975
2976 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
2977
2978 if (kret != KERN_SUCCESS)
2979 panic("getblk: ubc_upl_map() failed with (%d)", kret);
2980 }
2981 break;
2982
2983 case BLK_META:
2984 /*
2985 * VM is not involved in IO for the meta data
2986 * buffer already has valid data
2987 */
2988 break;
2989
2990 default:
2991 panic("getblk: paging or unknown operation for incore buffer- %d\n", operation);
2992 /*NOTREACHED*/
2993 break;
2994 }
2995 }
2996 } else { /* not incore() */
2997 int queue = BQ_EMPTY; /* Start with no preference */
2998
2999 if (ret_only_valid) {
3000 lck_mtx_unlock(buf_mtxp);
3001 return (NULL);
3002 }
3003 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/)
3004 operation = BLK_META;
3005
3006 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL)
3007 goto start;
3008
3009 /*
3010 * getnewbuf may block for a number of different reasons...
3011 * if it does, it's then possible for someone else to
3012 * create a buffer for the same block and insert it into
3013 * the hash... if we see it incore at this point we dump
3014 * the buffer we were working on and start over
3015 */
3016 if (incore_locked(vp, blkno, dp)) {
3017 SET(bp->b_flags, B_INVAL);
3018 binshash(bp, &invalhash);
3019
3020 lck_mtx_unlock(buf_mtxp);
3021
3022 buf_brelse(bp);
3023 goto start;
3024 }
3025 /*
3026 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3027 * CALLED! BE CAREFUL.
3028 */
3029
3030 /*
3031 * mark the buffer as B_META if indicated
3032 * so that when buffer is released it will goto META queue
3033 */
3034 if (operation == BLK_META)
3035 SET(bp->b_flags, B_META);
3036
3037 bp->b_blkno = bp->b_lblkno = blkno;
3038 bp->b_vp = vp;
3039
3040 /*
3041 * Insert in the hash so that incore() can find it
3042 */
3043 binshash(bp, BUFHASH(vp, blkno));
3044
3045 bgetvp_locked(vp, bp);
3046
3047 lck_mtx_unlock(buf_mtxp);
3048
3049 allocbuf(bp, size);
3050
3051 upl_flags = 0;
3052 switch (operation) {
3053 case BLK_META:
3054 /*
3055 * buffer data is invalid...
3056 *
3057 * I don't want to have to retake buf_mtxp,
3058 * so the miss and vmhits counters are done
3059 * with Atomic updates... all other counters
3060 * in bufstats are protected with either
3061 * buf_mtxp or iobuffer_mtxp
3062 */
3063 OSAddAtomicLong(1, &bufstats.bufs_miss);
3064 break;
3065
3066 case BLK_WRITE:
3067 /*
3068 * "write" operation: let the UPL subsystem know
3069 * that we intend to modify the buffer cache pages
3070 * we're gathering.
3071 */
3072 upl_flags |= UPL_WILL_MODIFY;
3073 case BLK_READ:
3074 { off_t f_offset;
3075 size_t contig_bytes;
3076 int bmap_flags;
3077
3078 if ( (bp->b_upl) )
3079 panic("bp already has UPL: %p",bp);
3080
3081 f_offset = ubc_blktooff(vp, blkno);
3082
3083 upl_flags |= UPL_PRECIOUS;
3084 kret = ubc_create_upl(vp,
3085 f_offset,
3086 bp->b_bufsize,
3087 &upl,
3088 &pl,
3089 upl_flags);
3090
3091 if (kret != KERN_SUCCESS)
3092 panic("Failed to create UPL");
3093 #if UPL_DEBUG
3094 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3095 #endif /* UPL_DEBUG */
3096 bp->b_upl = upl;
3097
3098 if (upl_valid_page(pl, 0)) {
3099
3100 if (operation == BLK_READ)
3101 bmap_flags = VNODE_READ;
3102 else
3103 bmap_flags = VNODE_WRITE;
3104
3105 SET(bp->b_flags, B_CACHE | B_DONE);
3106
3107 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
3108
3109 bp->b_validoff = 0;
3110 bp->b_dirtyoff = 0;
3111
3112 if (upl_dirty_page(pl, 0)) {
3113 /* page is dirty */
3114 SET(bp->b_flags, B_WASDIRTY);
3115
3116 bp->b_validend = bp->b_bcount;
3117 bp->b_dirtyend = bp->b_bcount;
3118 } else {
3119 /* page is clean */
3120 bp->b_validend = bp->b_bcount;
3121 bp->b_dirtyend = 0;
3122 }
3123 /*
3124 * try to recreate the physical block number associated with
3125 * this buffer...
3126 */
3127 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))
3128 panic("getblk: VNOP_BLOCKMAP failed");
3129 /*
3130 * if the extent represented by this buffer
3131 * is not completely physically contiguous on
3132 * disk, than we can't cache the physical mapping
3133 * in the buffer header
3134 */
3135 if ((long)contig_bytes < bp->b_bcount)
3136 bp->b_blkno = bp->b_lblkno;
3137 } else {
3138 OSAddAtomicLong(1, &bufstats.bufs_miss);
3139 }
3140 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3141
3142 if (kret != KERN_SUCCESS)
3143 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3144 break;
3145 }
3146 default:
3147 panic("getblk: paging or unknown operation - %x", operation);
3148 /*NOTREACHED*/
3149 break;
3150 }
3151 }
3152 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
3153 bp, bp->b_datap, bp->b_flags, 3, 0);
3154
3155 #ifdef JOE_DEBUG
3156 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3157 #endif
3158 return (bp);
3159 }
3160
3161 /*
3162 * Get an empty, disassociated buffer of given size.
3163 */
3164 buf_t
3165 buf_geteblk(int size)
3166 {
3167 buf_t bp = NULL;
3168 int queue = BQ_EMPTY;
3169
3170 do {
3171 lck_mtx_lock_spin(buf_mtxp);
3172
3173 bp = getnewbuf(0, 0, &queue);
3174 } while (bp == NULL);
3175
3176 SET(bp->b_flags, (B_META|B_INVAL));
3177
3178 #if DIAGNOSTIC
3179 assert(queue == BQ_EMPTY);
3180 #endif /* DIAGNOSTIC */
3181 /* XXX need to implement logic to deal with other queues */
3182
3183 binshash(bp, &invalhash);
3184 bufstats.bufs_eblk++;
3185
3186 lck_mtx_unlock(buf_mtxp);
3187
3188 allocbuf(bp, size);
3189
3190 return (bp);
3191 }
3192
3193 uint32_t
3194 buf_redundancy_flags(buf_t bp)
3195 {
3196 return bp->b_redundancy_flags;
3197 }
3198
3199 void
3200 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3201 {
3202 SET(bp->b_redundancy_flags, flags);
3203 }
3204
3205 void
3206 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3207 {
3208 CLR(bp->b_redundancy_flags, flags);
3209 }
3210
3211 /*
3212 * With UBC, there is no need to expand / shrink the file data
3213 * buffer. The VM uses the same pages, hence no waste.
3214 * All the file data buffers can have one size.
3215 * In fact expand / shrink would be an expensive operation.
3216 *
3217 * Only exception to this is meta-data buffers. Most of the
3218 * meta data operations are smaller than PAGE_SIZE. Having the
3219 * meta-data buffers grow and shrink as needed, optimizes use
3220 * of the kernel wired memory.
3221 */
3222
3223 int
3224 allocbuf(buf_t bp, int size)
3225 {
3226 vm_size_t desired_size;
3227
3228 desired_size = roundup(size, CLBYTES);
3229
3230 if (desired_size < PAGE_SIZE)
3231 desired_size = PAGE_SIZE;
3232 if (desired_size > MAXBSIZE)
3233 panic("allocbuf: buffer larger than MAXBSIZE requested");
3234
3235 if (ISSET(bp->b_flags, B_META)) {
3236 zone_t zprev, z;
3237 int nsize = roundup(size, MINMETA);
3238
3239 if (bp->b_datap) {
3240 vm_offset_t elem = (vm_offset_t)bp->b_datap;
3241
3242 if (ISSET(bp->b_flags, B_ZALLOC)) {
3243 if (bp->b_bufsize < nsize) {
3244 /* reallocate to a bigger size */
3245
3246 zprev = getbufzone(bp->b_bufsize);
3247 if (nsize <= MAXMETA) {
3248 desired_size = nsize;
3249 z = getbufzone(nsize);
3250 /* b_datap not really a ptr */
3251 *(void **)(&bp->b_datap) = zalloc(z);
3252 } else {
3253 bp->b_datap = (uintptr_t)NULL;
3254 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3255 CLR(bp->b_flags, B_ZALLOC);
3256 }
3257 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3258 zfree(zprev, (void *)elem);
3259 } else {
3260 desired_size = bp->b_bufsize;
3261 }
3262
3263 } else {
3264 if ((vm_size_t)bp->b_bufsize < desired_size) {
3265 /* reallocate to a bigger size */
3266 bp->b_datap = (uintptr_t)NULL;
3267 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3268 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3269 kmem_free(kernel_map, elem, bp->b_bufsize);
3270 } else {
3271 desired_size = bp->b_bufsize;
3272 }
3273 }
3274 } else {
3275 /* new allocation */
3276 if (nsize <= MAXMETA) {
3277 desired_size = nsize;
3278 z = getbufzone(nsize);
3279 /* b_datap not really a ptr */
3280 *(void **)(&bp->b_datap) = zalloc(z);
3281 SET(bp->b_flags, B_ZALLOC);
3282 } else
3283 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size);
3284 }
3285
3286 if (bp->b_datap == 0)
3287 panic("allocbuf: NULL b_datap");
3288 }
3289 bp->b_bufsize = desired_size;
3290 bp->b_bcount = size;
3291
3292 return (0);
3293 }
3294
3295 /*
3296 * Get a new buffer from one of the free lists.
3297 *
3298 * Request for a queue is passes in. The queue from which the buffer was taken
3299 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3300 * BQUEUE means no preference. Use heuristics in that case.
3301 * Heuristics is as follows:
3302 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3303 * If none available block till one is made available.
3304 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3305 * Pick the most stale buffer.
3306 * If found buffer was marked delayed write, start the async. write
3307 * and restart the search.
3308 * Initialize the fields and disassociate the buffer from the vnode.
3309 * Remove the buffer from the hash. Return the buffer and the queue
3310 * on which it was found.
3311 *
3312 * buf_mtxp is held upon entry
3313 * returns with buf_mtxp locked if new buf available
3314 * returns with buf_mtxp UNlocked if new buf NOT available
3315 */
3316
3317 static buf_t
3318 getnewbuf(int slpflag, int slptimeo, int * queue)
3319 {
3320 buf_t bp;
3321 buf_t lru_bp;
3322 buf_t age_bp;
3323 buf_t meta_bp;
3324 int age_time, lru_time, bp_time, meta_time;
3325 int req = *queue; /* save it for restarts */
3326 struct timespec ts;
3327
3328 start:
3329 /*
3330 * invalid request gets empty queue
3331 */
3332 if ((*queue >= BQUEUES) || (*queue < 0)
3333 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
3334 *queue = BQ_EMPTY;
3335
3336
3337 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first))
3338 goto found;
3339
3340 /*
3341 * need to grow number of bufs, add another one rather than recycling
3342 */
3343 if (nbuf_headers < max_nbuf_headers) {
3344 /*
3345 * Increment count now as lock
3346 * is dropped for allocation.
3347 * That avoids over commits
3348 */
3349 nbuf_headers++;
3350 goto add_newbufs;
3351 }
3352 /* Try for the requested queue first */
3353 bp = bufqueues[*queue].tqh_first;
3354 if (bp)
3355 goto found;
3356
3357 /* Unable to use requested queue */
3358 age_bp = bufqueues[BQ_AGE].tqh_first;
3359 lru_bp = bufqueues[BQ_LRU].tqh_first;
3360 meta_bp = bufqueues[BQ_META].tqh_first;
3361
3362 if (!age_bp && !lru_bp && !meta_bp) {
3363 /*
3364 * Unavailble on AGE or LRU or META queues
3365 * Try the empty list first
3366 */
3367 bp = bufqueues[BQ_EMPTY].tqh_first;
3368 if (bp) {
3369 *queue = BQ_EMPTY;
3370 goto found;
3371 }
3372 /*
3373 * We have seen is this is hard to trigger.
3374 * This is an overcommit of nbufs but needed
3375 * in some scenarios with diskiamges
3376 */
3377
3378 add_newbufs:
3379 lck_mtx_unlock(buf_mtxp);
3380
3381 /* Create a new temporary buffer header */
3382 bp = (struct buf *)zalloc(buf_hdr_zone);
3383
3384 if (bp) {
3385 bufhdrinit(bp);
3386 bp->b_whichq = BQ_EMPTY;
3387 bp->b_timestamp = buf_timestamp();
3388 BLISTNONE(bp);
3389 SET(bp->b_flags, B_HDRALLOC);
3390 *queue = BQ_EMPTY;
3391 }
3392 lck_mtx_lock_spin(buf_mtxp);
3393
3394 if (bp) {
3395 binshash(bp, &invalhash);
3396 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3397 buf_hdr_count++;
3398 goto found;
3399 }
3400 /* subtract already accounted bufcount */
3401 nbuf_headers--;
3402
3403 bufstats.bufs_sleeps++;
3404
3405 /* wait for a free buffer of any kind */
3406 needbuffer = 1;
3407 /* hz value is 100 */
3408 ts.tv_sec = (slptimeo/1000);
3409 /* the hz value is 100; which leads to 10ms */
3410 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
3411
3412 msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts);
3413 return (NULL);
3414 }
3415
3416 /* Buffer available either on AGE or LRU or META */
3417 bp = NULL;
3418 *queue = -1;
3419
3420 /* Buffer available either on AGE or LRU */
3421 if (!age_bp) {
3422 bp = lru_bp;
3423 *queue = BQ_LRU;
3424 } else if (!lru_bp) {
3425 bp = age_bp;
3426 *queue = BQ_AGE;
3427 } else { /* buffer available on both AGE and LRU */
3428 int t = buf_timestamp();
3429
3430 age_time = t - age_bp->b_timestamp;
3431 lru_time = t - lru_bp->b_timestamp;
3432 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3433 bp = age_bp;
3434 *queue = BQ_AGE;
3435 /*
3436 * we should probably re-timestamp eveything in the
3437 * queues at this point with the current time
3438 */
3439 } else {
3440 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3441 bp = lru_bp;
3442 *queue = BQ_LRU;
3443 } else {
3444 bp = age_bp;
3445 *queue = BQ_AGE;
3446 }
3447 }
3448 }
3449
3450 if (!bp) { /* Neither on AGE nor on LRU */
3451 bp = meta_bp;
3452 *queue = BQ_META;
3453 } else if (meta_bp) {
3454 int t = buf_timestamp();
3455
3456 bp_time = t - bp->b_timestamp;
3457 meta_time = t - meta_bp->b_timestamp;
3458
3459 if (!(bp_time < 0) && !(meta_time < 0)) {
3460 /* time not set backwards */
3461 int bp_is_stale;
3462 bp_is_stale = (*queue == BQ_LRU) ?
3463 lru_is_stale : age_is_stale;
3464
3465 if ((meta_time >= meta_is_stale) &&
3466 (bp_time < bp_is_stale)) {
3467 bp = meta_bp;
3468 *queue = BQ_META;
3469 }
3470 }
3471 }
3472 found:
3473 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY))
3474 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
3475
3476 /* Clean it */
3477 if (bcleanbuf(bp, FALSE)) {
3478 /*
3479 * moved to the laundry thread, buffer not ready
3480 */
3481 *queue = req;
3482 goto start;
3483 }
3484 return (bp);
3485 }
3486
3487
3488 /*
3489 * Clean a buffer.
3490 * Returns 0 if buffer is ready to use,
3491 * Returns 1 if issued a buf_bawrite() to indicate
3492 * that the buffer is not ready.
3493 *
3494 * buf_mtxp is held upon entry
3495 * returns with buf_mtxp locked
3496 */
3497 int
3498 bcleanbuf(buf_t bp, boolean_t discard)
3499 {
3500 /* Remove from the queue */
3501 bremfree_locked(bp);
3502
3503 #ifdef JOE_DEBUG
3504 bp->b_owner = current_thread();
3505 bp->b_tag = 2;
3506 #endif
3507 /*
3508 * If buffer was a delayed write, start the IO by queuing
3509 * it on the LAUNDRY queue, and return 1
3510 */
3511 if (ISSET(bp->b_flags, B_DELWRI)) {
3512 if (discard) {
3513 SET(bp->b_lflags, BL_WANTDEALLOC);
3514 }
3515
3516 bmovelaundry(bp);
3517
3518 lck_mtx_unlock(buf_mtxp);
3519
3520 wakeup(&bufqueues[BQ_LAUNDRY]);
3521 /*
3522 * and give it a chance to run
3523 */
3524 (void)thread_block(THREAD_CONTINUE_NULL);
3525
3526 lck_mtx_lock_spin(buf_mtxp);
3527
3528 return (1);
3529 }
3530 #ifdef JOE_DEBUG
3531 bp->b_owner = current_thread();
3532 bp->b_tag = 8;
3533 #endif
3534 /*
3535 * Buffer is no longer on any free list... we own it
3536 */
3537 SET(bp->b_lflags, BL_BUSY);
3538 buf_busycount++;
3539
3540 bremhash(bp);
3541
3542 /*
3543 * disassociate us from our vnode, if we had one...
3544 */
3545 if (bp->b_vp)
3546 brelvp_locked(bp);
3547
3548 lck_mtx_unlock(buf_mtxp);
3549
3550 BLISTNONE(bp);
3551
3552 if (ISSET(bp->b_flags, B_META))
3553 buf_free_meta_store(bp);
3554
3555 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3556
3557 buf_release_credentials(bp);
3558
3559 /* If discarding, just move to the empty queue */
3560 if (discard) {
3561 lck_mtx_lock_spin(buf_mtxp);
3562 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3563 bp->b_whichq = BQ_EMPTY;
3564 binshash(bp, &invalhash);
3565 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3566 CLR(bp->b_lflags, BL_BUSY);
3567 buf_busycount--;
3568 } else {
3569 /* Not discarding: clean up and prepare for reuse */
3570 bp->b_bufsize = 0;
3571 bp->b_datap = (uintptr_t)NULL;
3572 bp->b_upl = (void *)NULL;
3573 /*
3574 * preserve the state of whether this buffer
3575 * was allocated on the fly or not...
3576 * the only other flag that should be set at
3577 * this point is BL_BUSY...
3578 */
3579 #ifdef JOE_DEBUG
3580 bp->b_owner = current_thread();
3581 bp->b_tag = 3;
3582 #endif
3583 bp->b_lflags = BL_BUSY;
3584 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3585 bp->b_dev = NODEV;
3586 bp->b_blkno = bp->b_lblkno = 0;
3587 bp->b_iodone = NULL;
3588 bp->b_error = 0;
3589 bp->b_resid = 0;
3590 bp->b_bcount = 0;
3591 bp->b_dirtyoff = bp->b_dirtyend = 0;
3592 bp->b_validoff = bp->b_validend = 0;
3593 bzero(&bp->b_attr, sizeof(struct bufattr));
3594
3595 lck_mtx_lock_spin(buf_mtxp);
3596 }
3597 return (0);
3598 }
3599
3600
3601
3602 errno_t
3603 buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3604 {
3605 buf_t bp;
3606 errno_t error;
3607 struct bufhashhdr *dp;
3608
3609 dp = BUFHASH(vp, lblkno);
3610
3611 relook:
3612 lck_mtx_lock_spin(buf_mtxp);
3613
3614 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3615 lck_mtx_unlock(buf_mtxp);
3616 return (0);
3617 }
3618 if (ISSET(bp->b_lflags, BL_BUSY)) {
3619 if ( !ISSET(flags, BUF_WAIT)) {
3620 lck_mtx_unlock(buf_mtxp);
3621 return (EBUSY);
3622 }
3623 SET(bp->b_lflags, BL_WANTED);
3624
3625 error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
3626
3627 if (error) {
3628 return (error);
3629 }
3630 goto relook;
3631 }
3632 bremfree_locked(bp);
3633 SET(bp->b_lflags, BL_BUSY);
3634 SET(bp->b_flags, B_INVAL);
3635 buf_busycount++;
3636 #ifdef JOE_DEBUG
3637 bp->b_owner = current_thread();
3638 bp->b_tag = 4;
3639 #endif
3640 lck_mtx_unlock(buf_mtxp);
3641 buf_brelse(bp);
3642
3643 return (0);
3644 }
3645
3646
3647 void
3648 buf_drop(buf_t bp)
3649 {
3650 int need_wakeup = 0;
3651
3652 lck_mtx_lock_spin(buf_mtxp);
3653
3654 if (ISSET(bp->b_lflags, BL_WANTED)) {
3655 /*
3656 * delay the actual wakeup until after we
3657 * clear BL_BUSY and we've dropped buf_mtxp
3658 */
3659 need_wakeup = 1;
3660 }
3661 #ifdef JOE_DEBUG
3662 bp->b_owner = current_thread();
3663 bp->b_tag = 9;
3664 #endif
3665 /*
3666 * Unlock the buffer.
3667 */
3668 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
3669 buf_busycount--;
3670
3671 lck_mtx_unlock(buf_mtxp);
3672
3673 if (need_wakeup) {
3674 /*
3675 * Wake up any proceeses waiting for _this_ buffer to become free.
3676 */
3677 wakeup(bp);
3678 }
3679 }
3680
3681
3682 errno_t
3683 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) {
3684 errno_t error;
3685
3686 lck_mtx_lock_spin(buf_mtxp);
3687
3688 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
3689
3690 lck_mtx_unlock(buf_mtxp);
3691
3692 return (error);
3693 }
3694
3695
3696 static errno_t
3697 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
3698 {
3699 errno_t error;
3700 struct timespec ts;
3701
3702 if (ISSET(bp->b_flags, B_LOCKED)) {
3703 if ((flags & BAC_SKIP_LOCKED))
3704 return (EDEADLK);
3705 } else {
3706 if ((flags & BAC_SKIP_NONLOCKED))
3707 return (EDEADLK);
3708 }
3709 if (ISSET(bp->b_lflags, BL_BUSY)) {
3710 /*
3711 * since the lck_mtx_lock may block, the buffer
3712 * may become BUSY, so we need to
3713 * recheck for a NOWAIT request
3714 */
3715 if (flags & BAC_NOWAIT)
3716 return (EBUSY);
3717 SET(bp->b_lflags, BL_WANTED);
3718
3719 /* the hz value is 100; which leads to 10ms */
3720 ts.tv_sec = (slptimeo/100);
3721 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
3722 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
3723
3724 if (error)
3725 return (error);
3726 return (EAGAIN);
3727 }
3728 if (flags & BAC_REMOVE)
3729 bremfree_locked(bp);
3730 SET(bp->b_lflags, BL_BUSY);
3731 buf_busycount++;
3732
3733 #ifdef JOE_DEBUG
3734 bp->b_owner = current_thread();
3735 bp->b_tag = 5;
3736 #endif
3737 return (0);
3738 }
3739
3740
3741 /*
3742 * Wait for operations on the buffer to complete.
3743 * When they do, extract and return the I/O's error value.
3744 */
3745 errno_t
3746 buf_biowait(buf_t bp)
3747 {
3748 while (!ISSET(bp->b_flags, B_DONE)) {
3749
3750 lck_mtx_lock_spin(buf_mtxp);
3751
3752 if (!ISSET(bp->b_flags, B_DONE)) {
3753 DTRACE_IO1(wait__start, buf_t, bp);
3754 (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL);
3755 DTRACE_IO1(wait__done, buf_t, bp);
3756 } else
3757 lck_mtx_unlock(buf_mtxp);
3758 }
3759 /* check for interruption of I/O (e.g. via NFS), then errors. */
3760 if (ISSET(bp->b_flags, B_EINTR)) {
3761 CLR(bp->b_flags, B_EINTR);
3762 return (EINTR);
3763 } else if (ISSET(bp->b_flags, B_ERROR))
3764 return (bp->b_error ? bp->b_error : EIO);
3765 else
3766 return (0);
3767 }
3768
3769
3770 /*
3771 * Mark I/O complete on a buffer.
3772 *
3773 * If a callback has been requested, e.g. the pageout
3774 * daemon, do so. Otherwise, awaken waiting processes.
3775 *
3776 * [ Leffler, et al., says on p.247:
3777 * "This routine wakes up the blocked process, frees the buffer
3778 * for an asynchronous write, or, for a request by the pagedaemon
3779 * process, invokes a procedure specified in the buffer structure" ]
3780 *
3781 * In real life, the pagedaemon (or other system processes) wants
3782 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
3783 * (for swap pager, that puts swap buffers on the free lists (!!!),
3784 * for the vn device, that puts malloc'd buffers on the free lists!)
3785 */
3786 extern struct timeval priority_IO_timestamp_for_root;
3787 extern int hard_throttle_on_root;
3788
3789 void
3790 buf_biodone(buf_t bp)
3791 {
3792 mount_t mp;
3793
3794 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
3795 bp, bp->b_datap, bp->b_flags, 0, 0);
3796
3797 if (ISSET(bp->b_flags, B_DONE))
3798 panic("biodone already");
3799
3800 if (ISSET(bp->b_flags, B_ERROR)) {
3801 fslog_io_error(bp);
3802 }
3803
3804 if (bp->b_vp && bp->b_vp->v_mount) {
3805 mp = bp->b_vp->v_mount;
3806 } else {
3807 mp = NULL;
3808 }
3809
3810 if (mp && (bp->b_flags & B_READ) == 0) {
3811 update_last_io_time(mp);
3812 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
3813 } else if (mp) {
3814 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
3815 }
3816
3817 if (kdebug_enable) {
3818 int code = DKIO_DONE;
3819
3820 if (bp->b_flags & B_READ)
3821 code |= DKIO_READ;
3822 if (bp->b_flags & B_ASYNC)
3823 code |= DKIO_ASYNC;
3824
3825 if (bp->b_flags & B_META)
3826 code |= DKIO_META;
3827 else if (bp->b_flags & B_PAGEIO)
3828 code |= DKIO_PAGING;
3829
3830 if (bp->b_flags & B_THROTTLED_IO)
3831 code |= DKIO_THROTTLE;
3832 else if (bp->b_flags & B_PASSIVE)
3833 code |= DKIO_PASSIVE;
3834
3835 if (bp->b_attr.ba_flags & BA_NOCACHE)
3836 code |= DKIO_NOCACHE;
3837
3838 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
3839 bp, (uintptr_t)bp->b_vp,
3840 bp->b_resid, bp->b_error, 0);
3841 }
3842 if ((bp->b_vp != NULLVP) &&
3843 ((bp->b_flags & (B_THROTTLED_IO | B_PASSIVE | B_IOSTREAMING | B_PAGEIO | B_READ | B_THROTTLED_IO | B_PASSIVE)) == (B_PAGEIO | B_READ)) &&
3844 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV)) {
3845 microuptime(&priority_IO_timestamp_for_root);
3846 hard_throttle_on_root = 0;
3847 }
3848
3849 /*
3850 * I/O was done, so don't believe
3851 * the DIRTY state from VM anymore...
3852 * and we need to reset the THROTTLED/PASSIVE
3853 * indicators
3854 */
3855 CLR(bp->b_flags, (B_WASDIRTY | B_THROTTLED_IO | B_PASSIVE));
3856 CLR(bp->b_attr.ba_flags, (BA_META | BA_NOCACHE));
3857 #if !CONFIG_EMBEDDED
3858 CLR(bp->b_attr.ba_flags, (BA_THROTTLED_IO | BA_DELAYIDLESLEEP));
3859 #else
3860 CLR(bp->b_attr.ba_flags, BA_THROTTLED_IO);
3861 #endif /* !CONFIG_EMBEDDED */
3862 DTRACE_IO1(done, buf_t, bp);
3863
3864 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW))
3865 /*
3866 * wake up any writer's blocked
3867 * on throttle or waiting for I/O
3868 * to drain
3869 */
3870 vnode_writedone(bp->b_vp);
3871
3872 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
3873 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
3874 void *arg = bp->b_transaction;
3875 int callout = ISSET(bp->b_flags, B_CALL);
3876
3877 if (iodone_func == NULL)
3878 panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
3879
3880 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
3881 bp->b_iodone = NULL;
3882 bp->b_transaction = NULL;
3883
3884 if (callout)
3885 SET(bp->b_flags, B_DONE); /* note that it's done */
3886
3887 (*iodone_func)(bp, arg);
3888
3889 if (callout) {
3890 /*
3891 * assumes that the callback function takes
3892 * ownership of the bp and deals with releasing it if necessary
3893 */
3894 goto biodone_done;
3895 }
3896 /*
3897 * in this case the call back function is acting
3898 * strictly as a filter... it does not take
3899 * ownership of the bp and is expecting us
3900 * to finish cleaning up... this is currently used
3901 * by the HFS journaling code
3902 */
3903 }
3904 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
3905 SET(bp->b_flags, B_DONE); /* note that it's done */
3906
3907 buf_brelse(bp);
3908 } else { /* or just wakeup the buffer */
3909 /*
3910 * by taking the mutex, we serialize
3911 * the buf owner calling buf_biowait so that we'll
3912 * only see him in one of 2 states...
3913 * state 1: B_DONE wasn't set and he's
3914 * blocked in msleep
3915 * state 2: he's blocked trying to take the
3916 * mutex before looking at B_DONE
3917 * BL_WANTED is cleared in case anyone else
3918 * is blocked waiting for the buffer... note
3919 * that we haven't cleared B_BUSY yet, so if
3920 * they do get to run, their going to re-set
3921 * BL_WANTED and go back to sleep
3922 */
3923 lck_mtx_lock_spin(buf_mtxp);
3924
3925 CLR(bp->b_lflags, BL_WANTED);
3926 SET(bp->b_flags, B_DONE); /* note that it's done */
3927
3928 lck_mtx_unlock(buf_mtxp);
3929
3930 wakeup(bp);
3931 }
3932 biodone_done:
3933 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
3934 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
3935 }
3936
3937 /*
3938 * Return a count of buffers on the "locked" queue.
3939 */
3940 int
3941 count_lock_queue(void)
3942 {
3943 buf_t bp;
3944 int n = 0;
3945
3946 lck_mtx_lock_spin(buf_mtxp);
3947
3948 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
3949 bp = bp->b_freelist.tqe_next)
3950 n++;
3951 lck_mtx_unlock(buf_mtxp);
3952
3953 return (n);
3954 }
3955
3956 /*
3957 * Return a count of 'busy' buffers. Used at the time of shutdown.
3958 * note: This is also called from the mach side in debug context in kdp.c
3959 */
3960 int
3961 count_busy_buffers(void)
3962 {
3963 return buf_busycount + bufstats.bufs_iobufinuse;
3964 }
3965
3966 #if DIAGNOSTIC
3967 /*
3968 * Print out statistics on the current allocation of the buffer pool.
3969 * Can be enabled to print out on every ``sync'' by setting "syncprt"
3970 * in vfs_syscalls.c using sysctl.
3971 */
3972 void
3973 vfs_bufstats()
3974 {
3975 int i, j, count;
3976 struct buf *bp;
3977 struct bqueues *dp;
3978 int counts[MAXBSIZE/CLBYTES+1];
3979 static char *bname[BQUEUES] =
3980 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
3981
3982 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
3983 count = 0;
3984 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3985 counts[j] = 0;
3986
3987 lck_mtx_lock(buf_mtxp);
3988
3989 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
3990 counts[bp->b_bufsize/CLBYTES]++;
3991 count++;
3992 }
3993 lck_mtx_unlock(buf_mtxp);
3994
3995 printf("%s: total-%d", bname[i], count);
3996 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
3997 if (counts[j] != 0)
3998 printf(", %d-%d", j * CLBYTES, counts[j]);
3999 printf("\n");
4000 }
4001 }
4002 #endif /* DIAGNOSTIC */
4003
4004 #define NRESERVEDIOBUFS 128
4005
4006
4007 buf_t
4008 alloc_io_buf(vnode_t vp, int priv)
4009 {
4010 buf_t bp;
4011
4012 lck_mtx_lock_spin(iobuffer_mtxp);
4013
4014 while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
4015 (bp = iobufqueue.tqh_first) == NULL) {
4016 bufstats.bufs_iobufsleeps++;
4017
4018 need_iobuffer = 1;
4019 (void) msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf", NULL);
4020 }
4021 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4022
4023 bufstats.bufs_iobufinuse++;
4024 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax)
4025 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4026
4027 lck_mtx_unlock(iobuffer_mtxp);
4028
4029 /*
4030 * initialize various fields
4031 * we don't need to hold the mutex since the buffer
4032 * is now private... the vp should have a reference
4033 * on it and is not protected by this mutex in any event
4034 */
4035 bp->b_timestamp = 0;
4036 bp->b_proc = NULL;
4037
4038 bp->b_datap = 0;
4039 bp->b_flags = 0;
4040 bp->b_lflags = BL_BUSY | BL_IOBUF;
4041 bp->b_redundancy_flags = 0;
4042 bp->b_blkno = bp->b_lblkno = 0;
4043 #ifdef JOE_DEBUG
4044 bp->b_owner = current_thread();
4045 bp->b_tag = 6;
4046 #endif
4047 bp->b_iodone = NULL;
4048 bp->b_error = 0;
4049 bp->b_resid = 0;
4050 bp->b_bcount = 0;
4051 bp->b_bufsize = 0;
4052 bp->b_upl = NULL;
4053 bp->b_vp = vp;
4054 bzero(&bp->b_attr, sizeof(struct bufattr));
4055
4056 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR))
4057 bp->b_dev = vp->v_rdev;
4058 else
4059 bp->b_dev = NODEV;
4060
4061 return (bp);
4062 }
4063
4064
4065 void
4066 free_io_buf(buf_t bp)
4067 {
4068 int need_wakeup = 0;
4069
4070 /*
4071 * put buffer back on the head of the iobufqueue
4072 */
4073 bp->b_vp = NULL;
4074 bp->b_flags = B_INVAL;
4075
4076 lck_mtx_lock_spin(iobuffer_mtxp);
4077
4078 binsheadfree(bp, &iobufqueue, -1);
4079
4080 if (need_iobuffer) {
4081 /*
4082 * Wake up any processes waiting because they need an io buffer
4083 *
4084 * do the wakeup after we drop the mutex... it's possible that the
4085 * wakeup will be superfluous if need_iobuffer gets set again and
4086 * another thread runs this path, but it's highly unlikely, doesn't
4087 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4088 * trying to grab a task related lock...
4089 */
4090 need_iobuffer = 0;
4091 need_wakeup = 1;
4092 }
4093 if (bufstats.bufs_iobufinuse <= 0)
4094 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4095
4096 bufstats.bufs_iobufinuse--;
4097
4098 lck_mtx_unlock(iobuffer_mtxp);
4099
4100 if (need_wakeup)
4101 wakeup(&need_iobuffer);
4102 }
4103
4104
4105 void
4106 buf_list_lock(void)
4107 {
4108 lck_mtx_lock_spin(buf_mtxp);
4109 }
4110
4111 void
4112 buf_list_unlock(void)
4113 {
4114 lck_mtx_unlock(buf_mtxp);
4115 }
4116
4117 /*
4118 * If getnewbuf() calls bcleanbuf() on the same thread
4119 * there is a potential for stack overrun and deadlocks.
4120 * So we always handoff the work to a worker thread for completion
4121 */
4122
4123
4124 static void
4125 bcleanbuf_thread_init(void)
4126 {
4127 thread_t thread = THREAD_NULL;
4128
4129 /* create worker thread */
4130 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4131 thread_deallocate(thread);
4132 }
4133
4134 typedef int (*bcleanbufcontinuation)(int);
4135
4136 static void
4137 bcleanbuf_thread(void)
4138 {
4139 struct buf *bp;
4140 int error = 0;
4141 int loopcnt = 0;
4142
4143 for (;;) {
4144 lck_mtx_lock_spin(buf_mtxp);
4145
4146 while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4147 (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
4148 }
4149
4150 /*
4151 * Remove from the queue
4152 */
4153 bremfree_locked(bp);
4154
4155 /*
4156 * Buffer is no longer on any free list
4157 */
4158 SET(bp->b_lflags, BL_BUSY);
4159 buf_busycount++;
4160
4161 #ifdef JOE_DEBUG
4162 bp->b_owner = current_thread();
4163 bp->b_tag = 10;
4164 #endif
4165
4166 lck_mtx_unlock(buf_mtxp);
4167 /*
4168 * do the IO
4169 */
4170 error = bawrite_internal(bp, 0);
4171
4172 if (error) {
4173 bp->b_whichq = BQ_LAUNDRY;
4174 bp->b_timestamp = buf_timestamp();
4175
4176 lck_mtx_lock_spin(buf_mtxp);
4177
4178 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4179 blaundrycnt++;
4180
4181 /* we never leave a busy page on the laundry queue */
4182 CLR(bp->b_lflags, BL_BUSY);
4183 buf_busycount--;
4184 #ifdef JOE_DEBUG
4185 bp->b_owner = current_thread();
4186 bp->b_tag = 11;
4187 #endif
4188
4189 lck_mtx_unlock(buf_mtxp);
4190
4191 if (loopcnt > MAXLAUNDRY) {
4192 /*
4193 * bawrite_internal() can return errors if we're throttled. If we've
4194 * done several I/Os and failed, give the system some time to unthrottle
4195 * the vnode
4196 */
4197 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
4198 loopcnt = 0;
4199 } else {
4200 /* give other threads a chance to run */
4201 (void)thread_block(THREAD_CONTINUE_NULL);
4202 loopcnt++;
4203 }
4204 }
4205 }
4206 }
4207
4208
4209 static int
4210 brecover_data(buf_t bp)
4211 {
4212 int upl_offset;
4213 upl_t upl;
4214 upl_page_info_t *pl;
4215 kern_return_t kret;
4216 vnode_t vp = bp->b_vp;
4217 int upl_flags;
4218
4219
4220 if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0)
4221 goto dump_buffer;
4222
4223 upl_flags = UPL_PRECIOUS;
4224 if (! (buf_flags(bp) & B_READ)) {
4225 /*
4226 * "write" operation: let the UPL subsystem know
4227 * that we intend to modify the buffer cache pages we're
4228 * gathering.
4229 */
4230 upl_flags |= UPL_WILL_MODIFY;
4231 }
4232
4233 kret = ubc_create_upl(vp,
4234 ubc_blktooff(vp, bp->b_lblkno),
4235 bp->b_bufsize,
4236 &upl,
4237 &pl,
4238 upl_flags);
4239 if (kret != KERN_SUCCESS)
4240 panic("Failed to create UPL");
4241
4242 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4243
4244 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4245 ubc_upl_abort(upl, 0);
4246 goto dump_buffer;
4247 }
4248 }
4249 bp->b_upl = upl;
4250
4251 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4252
4253 if (kret != KERN_SUCCESS)
4254 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4255 return (1);
4256
4257 dump_buffer:
4258 bp->b_bufsize = 0;
4259 SET(bp->b_flags, B_INVAL);
4260 buf_brelse(bp);
4261
4262 return(0);
4263 }
4264
4265 boolean_t
4266 buffer_cache_gc(int all)
4267 {
4268 buf_t bp;
4269 boolean_t did_large_zfree = FALSE;
4270 boolean_t need_wakeup = FALSE;
4271 int now = buf_timestamp();
4272 uint32_t found = 0;
4273 struct bqueues privq;
4274 int thresh_hold = BUF_STALE_THRESHHOLD;
4275
4276 if (all)
4277 thresh_hold = 0;
4278 /*
4279 * We only care about metadata (incore storage comes from zalloc()).
4280 * Unless "all" is set (used to evict meta data buffers in preparation
4281 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
4282 * that have not been accessed in the last 30s. This limit controls both
4283 * the hold time of the global lock "buf_mtxp" and the length of time
4284 * we spend compute bound in the GC thread which calls this function
4285 */
4286 lck_mtx_lock(buf_mtxp);
4287
4288 do {
4289 found = 0;
4290 TAILQ_INIT(&privq);
4291 need_wakeup = FALSE;
4292
4293 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4294 (now > bp->b_timestamp) &&
4295 (now - bp->b_timestamp > thresh_hold) &&
4296 (found < BUF_MAX_GC_BATCH_SIZE)) {
4297
4298 /* Remove from free list */
4299 bremfree_locked(bp);
4300 found++;
4301
4302 #ifdef JOE_DEBUG
4303 bp->b_owner = current_thread();
4304 bp->b_tag = 12;
4305 #endif
4306
4307 /* If dirty, move to laundry queue and remember to do wakeup */
4308 if (ISSET(bp->b_flags, B_DELWRI)) {
4309 SET(bp->b_lflags, BL_WANTDEALLOC);
4310
4311 bmovelaundry(bp);
4312 need_wakeup = TRUE;
4313
4314 continue;
4315 }
4316
4317 /*
4318 * Mark busy and put on private list. We could technically get
4319 * away without setting BL_BUSY here.
4320 */
4321 SET(bp->b_lflags, BL_BUSY);
4322 buf_busycount++;
4323
4324 /*
4325 * Remove from hash and dissociate from vp.
4326 */
4327 bremhash(bp);
4328 if (bp->b_vp) {
4329 brelvp_locked(bp);
4330 }
4331
4332 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4333 }
4334
4335 if (found == 0) {
4336 break;
4337 }
4338
4339 /* Drop lock for batch processing */
4340 lck_mtx_unlock(buf_mtxp);
4341
4342 /* Wakeup and yield for laundry if need be */
4343 if (need_wakeup) {
4344 wakeup(&bufqueues[BQ_LAUNDRY]);
4345 (void)thread_block(THREAD_CONTINUE_NULL);
4346 }
4347
4348 /* Clean up every buffer on private list */
4349 TAILQ_FOREACH(bp, &privq, b_freelist) {
4350 /* Take note if we've definitely freed at least a page to a zone */
4351 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4352 did_large_zfree = TRUE;
4353 }
4354
4355 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4356
4357 /* Free Storage */
4358 buf_free_meta_store(bp);
4359
4360 /* Release credentials */
4361 buf_release_credentials(bp);
4362
4363 /* Prepare for moving to empty queue */
4364 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4365 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4366 bp->b_whichq = BQ_EMPTY;
4367 BLISTNONE(bp);
4368 }
4369 lck_mtx_lock(buf_mtxp);
4370
4371 /* Back under lock, move them all to invalid hash and clear busy */
4372 TAILQ_FOREACH(bp, &privq, b_freelist) {
4373 binshash(bp, &invalhash);
4374 CLR(bp->b_lflags, BL_BUSY);
4375 buf_busycount--;
4376
4377 #ifdef JOE_DEBUG
4378 if (bp->b_owner != current_thread()) {
4379 panic("Buffer stolen from buffer_cache_gc()");
4380 }
4381 bp->b_owner = current_thread();
4382 bp->b_tag = 13;
4383 #endif
4384 }
4385
4386 /* And do a big bulk move to the empty queue */
4387 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4388
4389 } while (all && (found == BUF_MAX_GC_BATCH_SIZE));
4390
4391 lck_mtx_unlock(buf_mtxp);
4392
4393 return did_large_zfree;
4394 }
4395
4396
4397 /*
4398 * disabled for now
4399 */
4400
4401 #if FLUSH_QUEUES
4402
4403 #define NFLUSH 32
4404
4405 static int
4406 bp_cmp(void *a, void *b)
4407 {
4408 buf_t *bp_a = *(buf_t **)a,
4409 *bp_b = *(buf_t **)b;
4410 daddr64_t res;
4411
4412 // don't have to worry about negative block
4413 // numbers so this is ok to do.
4414 //
4415 res = (bp_a->b_blkno - bp_b->b_blkno);
4416
4417 return (int)res;
4418 }
4419
4420
4421 int
4422 bflushq(int whichq, mount_t mp)
4423 {
4424 buf_t bp, next;
4425 int i, buf_count;
4426 int total_writes = 0;
4427 static buf_t flush_table[NFLUSH];
4428
4429 if (whichq < 0 || whichq >= BQUEUES) {
4430 return (0);
4431 }
4432
4433 restart:
4434 lck_mtx_lock(buf_mtxp);
4435
4436 bp = TAILQ_FIRST(&bufqueues[whichq]);
4437
4438 for (buf_count = 0; bp; bp = next) {
4439 next = bp->b_freelist.tqe_next;
4440
4441 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4442 continue;
4443 }
4444
4445 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4446
4447 bremfree_locked(bp);
4448 #ifdef JOE_DEBUG
4449 bp->b_owner = current_thread();
4450 bp->b_tag = 7;
4451 #endif
4452 SET(bp->b_lflags, BL_BUSY);
4453 buf_busycount++;
4454
4455 flush_table[buf_count] = bp;
4456 buf_count++;
4457 total_writes++;
4458
4459 if (buf_count >= NFLUSH) {
4460 lck_mtx_unlock(buf_mtxp);
4461
4462 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4463
4464 for (i = 0; i < buf_count; i++) {
4465 buf_bawrite(flush_table[i]);
4466 }
4467 goto restart;
4468 }
4469 }
4470 }
4471 lck_mtx_unlock(buf_mtxp);
4472
4473 if (buf_count > 0) {
4474 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4475
4476 for (i = 0; i < buf_count; i++) {
4477 buf_bawrite(flush_table[i]);
4478 }
4479 }
4480
4481 return (total_writes);
4482 }
4483 #endif
4484
4485
4486 #if BALANCE_QUEUES
4487
4488 /* XXX move this to a separate file */
4489
4490 /*
4491 * NOTE: THIS CODE HAS NOT BEEN UPDATED
4492 * WITH RESPECT TO THE NEW LOCKING MODEL
4493 */
4494
4495
4496 /*
4497 * Dynamic Scaling of the Buffer Queues
4498 */
4499
4500 typedef long long blsize_t;
4501
4502 blsize_t MAXNBUF; /* initialize to (sane_size / PAGE_SIZE) */
4503 /* Global tunable limits */
4504 blsize_t nbufh; /* number of buffer headers */
4505 blsize_t nbuflow; /* minimum number of buffer headers required */
4506 blsize_t nbufhigh; /* maximum number of buffer headers allowed */
4507 blsize_t nbuftarget; /* preferred number of buffer headers */
4508
4509 /*
4510 * assertions:
4511 *
4512 * 1. 0 < nbuflow <= nbufh <= nbufhigh
4513 * 2. nbufhigh <= MAXNBUF
4514 * 3. 0 < nbuflow <= nbuftarget <= nbufhigh
4515 * 4. nbufh can not be set by sysctl().
4516 */
4517
4518 /* Per queue tunable limits */
4519
4520 struct bufqlim {
4521 blsize_t bl_nlow; /* minimum number of buffer headers required */
4522 blsize_t bl_num; /* number of buffer headers on the queue */
4523 blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */
4524 blsize_t bl_target; /* preferred number of buffer headers */
4525 long bl_stale; /* Seconds after which a buffer is considered stale */
4526 } bufqlim[BQUEUES];
4527
4528 /*
4529 * assertions:
4530 *
4531 * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh
4532 * 2. bl_nlhigh <= MAXNBUF
4533 * 3. bufqlim[BQ_META].bl_nlow != 0
4534 * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent
4535 * file system IO operations)
4536 * 5. bl_num can not be set by sysctl().
4537 * 6. bl_nhigh <= nbufhigh
4538 */
4539
4540 /*
4541 * Rationale:
4542 * ----------
4543 * Defining it blsize_t as long permits 2^31 buffer headers per queue.
4544 * Which can describe (2^31 * PAGE_SIZE) memory per queue.
4545 *
4546 * These limits are exported to by means of sysctl().
4547 * It was decided to define blsize_t as a 64 bit quantity.
4548 * This will make sure that we will not be required to change it
4549 * as long as we do not exceed 64 bit address space for the kernel.
4550 *
4551 * low and high numbers parameters initialized at compile time
4552 * and boot arguments can be used to override them. sysctl()
4553 * would not change the value. sysctl() can get all the values
4554 * but can set only target. num is the current level.
4555 *
4556 * Advantages of having a "bufqscan" thread doing the balancing are,
4557 * Keep enough bufs on BQ_EMPTY.
4558 * getnewbuf() by default will always select a buffer from the BQ_EMPTY.
4559 * getnewbuf() perfoms best if a buffer was found there.
4560 * Also this minimizes the possibility of starting IO
4561 * from getnewbuf(). That's a performance win, too.
4562 *
4563 * Localize complex logic [balancing as well as time aging]
4564 * to balancebufq().
4565 *
4566 * Simplify getnewbuf() logic by elimination of time aging code.
4567 */
4568
4569 /*
4570 * Algorithm:
4571 * -----------
4572 * The goal of the dynamic scaling of the buffer queues to to keep
4573 * the size of the LRU close to bl_target. Buffers on a queue would
4574 * be time aged.
4575 *
4576 * There would be a thread which will be responsible for "balancing"
4577 * the buffer cache queues.
4578 *
4579 * The scan order would be: AGE, LRU, META, EMPTY.
4580 */
4581
4582 long bufqscanwait = 0;
4583
4584 static void bufqscan_thread();
4585 static int balancebufq(int q);
4586 static int btrimempty(int n);
4587 static __inline__ int initbufqscan(void);
4588 static __inline__ int nextbufq(int q);
4589 static void buqlimprt(int all);
4590
4591
4592 static __inline__ void
4593 bufqinc(int q)
4594 {
4595 if ((q < 0) || (q >= BQUEUES))
4596 return;
4597
4598 bufqlim[q].bl_num++;
4599 return;
4600 }
4601
4602 static __inline__ void
4603 bufqdec(int q)
4604 {
4605 if ((q < 0) || (q >= BQUEUES))
4606 return;
4607
4608 bufqlim[q].bl_num--;
4609 return;
4610 }
4611
4612 static void
4613 bufq_balance_thread_init(void)
4614 {
4615 thread_t thread = THREAD_NULL;
4616
4617 if (bufqscanwait++ == 0) {
4618
4619 /* Initalize globals */
4620 MAXNBUF = (sane_size / PAGE_SIZE);
4621 nbufh = nbuf_headers;
4622 nbuflow = min(nbufh, 100);
4623 nbufhigh = min(MAXNBUF, max(nbufh, 2048));
4624 nbuftarget = (sane_size >> 5) / PAGE_SIZE;
4625 nbuftarget = max(nbuflow, nbuftarget);
4626 nbuftarget = min(nbufhigh, nbuftarget);
4627
4628 /*
4629 * Initialize the bufqlim
4630 */
4631
4632 /* LOCKED queue */
4633 bufqlim[BQ_LOCKED].bl_nlow = 0;
4634 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
4635 bufqlim[BQ_LOCKED].bl_target = 0;
4636 bufqlim[BQ_LOCKED].bl_stale = 30;
4637
4638 /* LRU queue */
4639 bufqlim[BQ_LRU].bl_nlow = 0;
4640 bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4;
4641 bufqlim[BQ_LRU].bl_target = nbuftarget/4;
4642 bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE;
4643
4644 /* AGE queue */
4645 bufqlim[BQ_AGE].bl_nlow = 0;
4646 bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4;
4647 bufqlim[BQ_AGE].bl_target = nbuftarget/4;
4648 bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE;
4649
4650 /* EMPTY queue */
4651 bufqlim[BQ_EMPTY].bl_nlow = 0;
4652 bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4;
4653 bufqlim[BQ_EMPTY].bl_target = nbuftarget/4;
4654 bufqlim[BQ_EMPTY].bl_stale = 600000;
4655
4656 /* META queue */
4657 bufqlim[BQ_META].bl_nlow = 0;
4658 bufqlim[BQ_META].bl_nlhigh = nbufhigh/4;
4659 bufqlim[BQ_META].bl_target = nbuftarget/4;
4660 bufqlim[BQ_META].bl_stale = META_IS_STALE;
4661
4662 /* LAUNDRY queue */
4663 bufqlim[BQ_LOCKED].bl_nlow = 0;
4664 bufqlim[BQ_LOCKED].bl_nlhigh = 32;
4665 bufqlim[BQ_LOCKED].bl_target = 0;
4666 bufqlim[BQ_LOCKED].bl_stale = 30;
4667
4668 buqlimprt(1);
4669 }
4670
4671 /* create worker thread */
4672 kernel_thread_start((thread_continue_t)bufqscan_thread, NULL, &thread);
4673 thread_deallocate(thread);
4674 }
4675
4676 /* The workloop for the buffer balancing thread */
4677 static void
4678 bufqscan_thread()
4679 {
4680 int moretodo = 0;
4681
4682 for(;;) {
4683 do {
4684 int q; /* buffer queue to process */
4685
4686 q = initbufqscan();
4687 for (; q; ) {
4688 moretodo |= balancebufq(q);
4689 q = nextbufq(q);
4690 }
4691 } while (moretodo);
4692
4693 #if DIAGNOSTIC
4694 vfs_bufstats();
4695 buqlimprt(0);
4696 #endif
4697 (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz);
4698 moretodo = 0;
4699 }
4700 }
4701
4702 /* Seed for the buffer queue balancing */
4703 static __inline__ int
4704 initbufqscan()
4705 {
4706 /* Start with AGE queue */
4707 return (BQ_AGE);
4708 }
4709
4710 /* Pick next buffer queue to balance */
4711 static __inline__ int
4712 nextbufq(int q)
4713 {
4714 int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 };
4715
4716 q++;
4717 q %= sizeof(order);
4718 return (order[q]);
4719 }
4720
4721 /* function to balance the buffer queues */
4722 static int
4723 balancebufq(int q)
4724 {
4725 int moretodo = 0;
4726 int n, t;
4727
4728 /* reject invalid q */
4729 if ((q < 0) || (q >= BQUEUES))
4730 goto out;
4731
4732 /* LOCKED or LAUNDRY queue MUST not be balanced */
4733 if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY))
4734 goto out;
4735
4736 n = (bufqlim[q].bl_num - bufqlim[q].bl_target);
4737
4738 /* If queue has less than target nothing more to do */
4739 if (n < 0)
4740 goto out;
4741
4742 if ( n > 8 ) {
4743 /* Balance only a small amount (12.5%) at a time */
4744 n >>= 3;
4745 }
4746
4747 /* EMPTY queue needs special handling */
4748 if (q == BQ_EMPTY) {
4749 moretodo |= btrimempty(n);
4750 goto out;
4751 }
4752
4753 t = buf_timestamp():
4754
4755 for (; n > 0; n--) {
4756 struct buf *bp = bufqueues[q].tqh_first;
4757 if (!bp)
4758 break;
4759
4760 /* check if it's stale */
4761 if ((t - bp->b_timestamp) > bufqlim[q].bl_stale) {
4762 if (bcleanbuf(bp, FALSE)) {
4763 /* buf_bawrite() issued, bp not ready */
4764 moretodo = 1;
4765 } else {
4766 /* release the cleaned buffer to BQ_EMPTY */
4767 SET(bp->b_flags, B_INVAL);
4768 buf_brelse(bp);
4769 }
4770 } else
4771 break;
4772 }
4773
4774 out:
4775 return (moretodo);
4776 }
4777
4778 static int
4779 btrimempty(int n)
4780 {
4781 /*
4782 * When struct buf are allocated dynamically, this would
4783 * reclaim upto 'n' struct buf from the empty queue.
4784 */
4785
4786 return (0);
4787 }
4788
4789 static void
4790 buqlimprt(int all)
4791 {
4792 int i;
4793 static char *bname[BQUEUES] =
4794 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4795
4796 if (all)
4797 for (i = 0; i < BQUEUES; i++) {
4798 printf("%s : ", bname[i]);
4799 printf("min = %ld, ", (long)bufqlim[i].bl_nlow);
4800 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
4801 printf("max = %ld, ", (long)bufqlim[i].bl_nlhigh);
4802 printf("target = %ld, ", (long)bufqlim[i].bl_target);
4803 printf("stale after %ld seconds\n", bufqlim[i].bl_stale);
4804 }
4805 else
4806 for (i = 0; i < BQUEUES; i++) {
4807 printf("%s : ", bname[i]);
4808 printf("cur = %ld, ", (long)bufqlim[i].bl_num);
4809 }
4810 }
4811
4812 #endif
4813
4814