]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_bio.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_bio.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70 /*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc_internal.h>
80 #include <sys/buf_internal.h>
81 #include <sys/vnode_internal.h>
82 #include <sys/mount_internal.h>
83 #include <sys/trace.h>
84 #include <sys/malloc.h>
85 #include <sys/resourcevar.h>
86 #include <miscfs/specfs/specdev.h>
87 #include <sys/ubc.h>
88 #include <sys/kauth.h>
89 #if DIAGNOSTIC
90 #include <kern/assert.h>
91 #endif /* DIAGNOSTIC */
92 #include <kern/task.h>
93 #include <kern/zalloc.h>
94 #include <kern/locks.h>
95 #include <kern/thread.h>
96
97 #include <sys/fslog.h> /* fslog_io_error() */
98 #include <sys/disk.h> /* dk_error_description_t */
99
100 #include <mach/mach_types.h>
101 #include <mach/memory_object_types.h>
102 #include <kern/sched_prim.h> /* thread_block() */
103
104 #include <vm/vm_kern.h>
105 #include <vm/vm_pageout.h>
106
107 #include <sys/kdebug.h>
108
109 #include <libkern/OSAtomic.h>
110 #include <libkern/OSDebug.h>
111 #include <sys/ubc_internal.h>
112
113 #include <sys/sdt.h>
114
115 int bcleanbuf(buf_t bp, boolean_t discard);
116 static int brecover_data(buf_t bp);
117 static boolean_t incore(vnode_t vp, daddr64_t blkno);
118 /* timeout is in msecs */
119 static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
120 static void bremfree_locked(buf_t bp);
121 static void buf_reassign(buf_t bp, vnode_t newvp);
122 static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
123 static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
124 static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
125 static boolean_t buffer_cache_gc(int);
126 static buf_t buf_brelse_shadow(buf_t bp);
127 static void buf_free_meta_store(buf_t bp);
128
129 static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
130 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
131
132
133 int bdwrite_internal(buf_t, int);
134
135 extern void disk_conditioner_delay(buf_t, int, int, uint64_t);
136
137 /* zone allocated buffer headers */
138 static void bufzoneinit(void);
139 static void bcleanbuf_thread_init(void);
140 static void bcleanbuf_thread(void);
141
142 static zone_t buf_hdr_zone;
143 static int buf_hdr_count;
144
145
146 /*
147 * Definitions for the buffer hash lists.
148 */
149 #define BUFHASH(dvp, lbn) \
150 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
151 LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash;
152 u_long bufhash;
153
154 static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
155
156 /* Definitions for the buffer stats. */
157 struct bufstats bufstats;
158
159 /* Number of delayed write buffers */
160 long nbdwrite = 0;
161 int blaundrycnt = 0;
162 static int boot_nbuf_headers = 0;
163
164 static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
165
166 static TAILQ_HEAD(ioqueue, buf) iobufqueue;
167 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
168 static int needbuffer;
169 static int need_iobuffer;
170
171 static lck_grp_t *buf_mtx_grp;
172 static lck_attr_t *buf_mtx_attr;
173 static lck_grp_attr_t *buf_mtx_grp_attr;
174 static lck_mtx_t *iobuffer_mtxp;
175 static lck_mtx_t *buf_mtxp;
176 static lck_mtx_t *buf_gc_callout;
177
178 static int buf_busycount;
179
180 #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16
181 typedef struct {
182 void (* callout)(int, void *);
183 void *context;
184 } fs_buffer_cache_gc_callout_t;
185
186 fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} };
187
188 static __inline__ int
189 buf_timestamp(void)
190 {
191 struct timeval t;
192 microuptime(&t);
193 return t.tv_sec;
194 }
195
196 /*
197 * Insq/Remq for the buffer free lists.
198 */
199 #define binsheadfree(bp, dp, whichq) do { \
200 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
201 } while (0)
202
203 #define binstailfree(bp, dp, whichq) do { \
204 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
205 } while (0)
206
207 #define BHASHENTCHECK(bp) \
208 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
209 panic("%p: b_hash.le_prev is not deadbeef", (bp));
210
211 #define BLISTNONE(bp) \
212 (bp)->b_hash.le_next = (struct buf *)0; \
213 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
214
215 /*
216 * Insq/Remq for the vnode usage lists.
217 */
218 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
219 #define bufremvn(bp) { \
220 LIST_REMOVE(bp, b_vnbufs); \
221 (bp)->b_vnbufs.le_next = NOLIST; \
222 }
223
224 /*
225 * Time in seconds before a buffer on a list is
226 * considered as a stale buffer
227 */
228 #define LRU_IS_STALE 120 /* default value for the LRU */
229 #define AGE_IS_STALE 60 /* default value for the AGE */
230 #define META_IS_STALE 180 /* default value for the BQ_META */
231
232 int lru_is_stale = LRU_IS_STALE;
233 int age_is_stale = AGE_IS_STALE;
234 int meta_is_stale = META_IS_STALE;
235
236 #define MAXLAUNDRY 10
237
238 /* LIST_INSERT_HEAD() with assertions */
239 static __inline__ void
240 blistenterhead(struct bufhashhdr * head, buf_t bp)
241 {
242 if ((bp->b_hash.le_next = (head)->lh_first) != NULL) {
243 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
244 }
245 (head)->lh_first = bp;
246 bp->b_hash.le_prev = &(head)->lh_first;
247 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
248 panic("blistenterhead: le_prev is deadbeef");
249 }
250 }
251
252 static __inline__ void
253 binshash(buf_t bp, struct bufhashhdr *dp)
254 {
255 #if DIAGNOSTIC
256 buf_t nbp;
257 #endif /* DIAGNOSTIC */
258
259 BHASHENTCHECK(bp);
260
261 #if DIAGNOSTIC
262 nbp = dp->lh_first;
263 for (; nbp != NULL; nbp = nbp->b_hash.le_next) {
264 if (nbp == bp) {
265 panic("buf already in hashlist");
266 }
267 }
268 #endif /* DIAGNOSTIC */
269
270 blistenterhead(dp, bp);
271 }
272
273 static __inline__ void
274 bremhash(buf_t bp)
275 {
276 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
277 panic("bremhash le_prev is deadbeef");
278 }
279 if (bp->b_hash.le_next == bp) {
280 panic("bremhash: next points to self");
281 }
282
283 if (bp->b_hash.le_next != NULL) {
284 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
285 }
286 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
287 }
288
289 /*
290 * buf_mtxp held.
291 */
292 static __inline__ void
293 bmovelaundry(buf_t bp)
294 {
295 bp->b_whichq = BQ_LAUNDRY;
296 bp->b_timestamp = buf_timestamp();
297 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
298 blaundrycnt++;
299 }
300
301 static __inline__ void
302 buf_release_credentials(buf_t bp)
303 {
304 if (IS_VALID_CRED(bp->b_rcred)) {
305 kauth_cred_unref(&bp->b_rcred);
306 }
307 if (IS_VALID_CRED(bp->b_wcred)) {
308 kauth_cred_unref(&bp->b_wcred);
309 }
310 }
311
312
313 int
314 buf_valid(buf_t bp)
315 {
316 if ((bp->b_flags & (B_DONE | B_DELWRI))) {
317 return 1;
318 }
319 return 0;
320 }
321
322 int
323 buf_fromcache(buf_t bp)
324 {
325 if ((bp->b_flags & B_CACHE)) {
326 return 1;
327 }
328 return 0;
329 }
330
331 void
332 buf_markinvalid(buf_t bp)
333 {
334 SET(bp->b_flags, B_INVAL);
335 }
336
337 void
338 buf_markdelayed(buf_t bp)
339 {
340 if (!ISSET(bp->b_flags, B_DELWRI)) {
341 SET(bp->b_flags, B_DELWRI);
342
343 OSAddAtomicLong(1, &nbdwrite);
344 buf_reassign(bp, bp->b_vp);
345 }
346 SET(bp->b_flags, B_DONE);
347 }
348
349 void
350 buf_markclean(buf_t bp)
351 {
352 if (ISSET(bp->b_flags, B_DELWRI)) {
353 CLR(bp->b_flags, B_DELWRI);
354
355 OSAddAtomicLong(-1, &nbdwrite);
356 buf_reassign(bp, bp->b_vp);
357 }
358 }
359
360 void
361 buf_markeintr(buf_t bp)
362 {
363 SET(bp->b_flags, B_EINTR);
364 }
365
366
367 void
368 buf_markaged(buf_t bp)
369 {
370 SET(bp->b_flags, B_AGE);
371 }
372
373 int
374 buf_fua(buf_t bp)
375 {
376 if ((bp->b_flags & B_FUA) == B_FUA) {
377 return 1;
378 }
379 return 0;
380 }
381
382 void
383 buf_markfua(buf_t bp)
384 {
385 SET(bp->b_flags, B_FUA);
386 }
387
388 #if CONFIG_PROTECT
389 cpx_t
390 bufattr_cpx(bufattr_t bap)
391 {
392 return bap->ba_cpx;
393 }
394
395 void
396 bufattr_setcpx(bufattr_t bap, cpx_t cpx)
397 {
398 bap->ba_cpx = cpx;
399 }
400
401 void
402 buf_setcpoff(buf_t bp, uint64_t foffset)
403 {
404 bp->b_attr.ba_cp_file_off = foffset;
405 }
406
407 uint64_t
408 bufattr_cpoff(bufattr_t bap)
409 {
410 return bap->ba_cp_file_off;
411 }
412
413 void
414 bufattr_setcpoff(bufattr_t bap, uint64_t foffset)
415 {
416 bap->ba_cp_file_off = foffset;
417 }
418
419 #else // !CONTECT_PROTECT
420
421 uint64_t
422 bufattr_cpoff(bufattr_t bap __unused)
423 {
424 return 0;
425 }
426
427 void
428 bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset)
429 {
430 return;
431 }
432
433 struct cpx *
434 bufattr_cpx(__unused bufattr_t bap)
435 {
436 return NULL;
437 }
438
439 void
440 bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
441 {
442 }
443
444 #endif /* !CONFIG_PROTECT */
445
446 bufattr_t
447 bufattr_alloc()
448 {
449 bufattr_t bap;
450 MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
451 if (bap == NULL) {
452 return NULL;
453 }
454
455 bzero(bap, sizeof(struct bufattr));
456 return bap;
457 }
458
459 void
460 bufattr_free(bufattr_t bap)
461 {
462 if (bap) {
463 FREE(bap, M_TEMP);
464 }
465 }
466
467 bufattr_t
468 bufattr_dup(bufattr_t bap)
469 {
470 bufattr_t new_bufattr;
471 MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK);
472 if (new_bufattr == NULL) {
473 return NULL;
474 }
475
476 /* Copy the provided one into the new copy */
477 memcpy(new_bufattr, bap, sizeof(struct bufattr));
478 return new_bufattr;
479 }
480
481 int
482 bufattr_rawencrypted(bufattr_t bap)
483 {
484 if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) {
485 return 1;
486 }
487 return 0;
488 }
489
490 int
491 bufattr_throttled(bufattr_t bap)
492 {
493 return GET_BUFATTR_IO_TIER(bap);
494 }
495
496 int
497 bufattr_passive(bufattr_t bap)
498 {
499 if ((bap->ba_flags & BA_PASSIVE)) {
500 return 1;
501 }
502 return 0;
503 }
504
505 int
506 bufattr_nocache(bufattr_t bap)
507 {
508 if ((bap->ba_flags & BA_NOCACHE)) {
509 return 1;
510 }
511 return 0;
512 }
513
514 int
515 bufattr_meta(bufattr_t bap)
516 {
517 if ((bap->ba_flags & BA_META)) {
518 return 1;
519 }
520 return 0;
521 }
522
523 void
524 bufattr_markmeta(bufattr_t bap)
525 {
526 SET(bap->ba_flags, BA_META);
527 }
528
529 int
530 #if !CONFIG_EMBEDDED
531 bufattr_delayidlesleep(bufattr_t bap)
532 #else /* !CONFIG_EMBEDDED */
533 bufattr_delayidlesleep(__unused bufattr_t bap)
534 #endif /* !CONFIG_EMBEDDED */
535 {
536 #if !CONFIG_EMBEDDED
537 if ((bap->ba_flags & BA_DELAYIDLESLEEP)) {
538 return 1;
539 }
540 #endif /* !CONFIG_EMBEDDED */
541 return 0;
542 }
543
544 bufattr_t
545 buf_attr(buf_t bp)
546 {
547 return &bp->b_attr;
548 }
549
550 void
551 buf_markstatic(buf_t bp __unused)
552 {
553 SET(bp->b_flags, B_STATICCONTENT);
554 }
555
556 int
557 buf_static(buf_t bp)
558 {
559 if ((bp->b_flags & B_STATICCONTENT)) {
560 return 1;
561 }
562 return 0;
563 }
564
565 void
566 bufattr_markgreedymode(bufattr_t bap)
567 {
568 SET(bap->ba_flags, BA_GREEDY_MODE);
569 }
570
571 int
572 bufattr_greedymode(bufattr_t bap)
573 {
574 if ((bap->ba_flags & BA_GREEDY_MODE)) {
575 return 1;
576 }
577 return 0;
578 }
579
580 void
581 bufattr_markisochronous(bufattr_t bap)
582 {
583 SET(bap->ba_flags, BA_ISOCHRONOUS);
584 }
585
586 int
587 bufattr_isochronous(bufattr_t bap)
588 {
589 if ((bap->ba_flags & BA_ISOCHRONOUS)) {
590 return 1;
591 }
592 return 0;
593 }
594
595 void
596 bufattr_markquickcomplete(bufattr_t bap)
597 {
598 SET(bap->ba_flags, BA_QUICK_COMPLETE);
599 }
600
601 int
602 bufattr_quickcomplete(bufattr_t bap)
603 {
604 if ((bap->ba_flags & BA_QUICK_COMPLETE)) {
605 return 1;
606 }
607 return 0;
608 }
609
610 void
611 bufattr_markioscheduled(bufattr_t bap)
612 {
613 SET(bap->ba_flags, BA_IO_SCHEDULED);
614 }
615
616
617 int
618 bufattr_ioscheduled(bufattr_t bap)
619 {
620 if ((bap->ba_flags & BA_IO_SCHEDULED)) {
621 return 1;
622 }
623 return 0;
624 }
625
626 errno_t
627 buf_error(buf_t bp)
628 {
629 return bp->b_error;
630 }
631
632 void
633 buf_seterror(buf_t bp, errno_t error)
634 {
635 if ((bp->b_error = error)) {
636 SET(bp->b_flags, B_ERROR);
637 } else {
638 CLR(bp->b_flags, B_ERROR);
639 }
640 }
641
642 void
643 buf_setflags(buf_t bp, int32_t flags)
644 {
645 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
646 }
647
648 void
649 buf_clearflags(buf_t bp, int32_t flags)
650 {
651 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
652 }
653
654 int32_t
655 buf_flags(buf_t bp)
656 {
657 return bp->b_flags & BUF_X_RDFLAGS;
658 }
659
660 void
661 buf_reset(buf_t bp, int32_t io_flags)
662 {
663 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
664 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
665
666 bp->b_error = 0;
667 }
668
669 uint32_t
670 buf_count(buf_t bp)
671 {
672 return bp->b_bcount;
673 }
674
675 void
676 buf_setcount(buf_t bp, uint32_t bcount)
677 {
678 bp->b_bcount = bcount;
679 }
680
681 uint32_t
682 buf_size(buf_t bp)
683 {
684 return bp->b_bufsize;
685 }
686
687 void
688 buf_setsize(buf_t bp, uint32_t bufsize)
689 {
690 bp->b_bufsize = bufsize;
691 }
692
693 uint32_t
694 buf_resid(buf_t bp)
695 {
696 return bp->b_resid;
697 }
698
699 void
700 buf_setresid(buf_t bp, uint32_t resid)
701 {
702 bp->b_resid = resid;
703 }
704
705 uint32_t
706 buf_dirtyoff(buf_t bp)
707 {
708 return bp->b_dirtyoff;
709 }
710
711 uint32_t
712 buf_dirtyend(buf_t bp)
713 {
714 return bp->b_dirtyend;
715 }
716
717 void
718 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff)
719 {
720 bp->b_dirtyoff = dirtyoff;
721 }
722
723 void
724 buf_setdirtyend(buf_t bp, uint32_t dirtyend)
725 {
726 bp->b_dirtyend = dirtyend;
727 }
728
729 uintptr_t
730 buf_dataptr(buf_t bp)
731 {
732 return bp->b_datap;
733 }
734
735 void
736 buf_setdataptr(buf_t bp, uintptr_t data)
737 {
738 bp->b_datap = data;
739 }
740
741 vnode_t
742 buf_vnode(buf_t bp)
743 {
744 return bp->b_vp;
745 }
746
747 void
748 buf_setvnode(buf_t bp, vnode_t vp)
749 {
750 bp->b_vp = vp;
751 }
752
753
754 void *
755 buf_callback(buf_t bp)
756 {
757 if (!(bp->b_flags & B_CALL)) {
758 return (void *) NULL;
759 }
760
761 return (void *)bp->b_iodone;
762 }
763
764
765 errno_t
766 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
767 {
768 assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY));
769
770 if (callback) {
771 bp->b_flags |= (B_CALL | B_ASYNC);
772 } else {
773 bp->b_flags &= ~B_CALL;
774 }
775 bp->b_transaction = transaction;
776 bp->b_iodone = callback;
777
778 return 0;
779 }
780
781 errno_t
782 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
783 {
784 if (!(bp->b_lflags & BL_IOBUF)) {
785 return EINVAL;
786 }
787
788 if (upl) {
789 bp->b_flags |= B_CLUSTER;
790 } else {
791 bp->b_flags &= ~B_CLUSTER;
792 }
793 bp->b_upl = upl;
794 bp->b_uploffset = offset;
795
796 return 0;
797 }
798
799 buf_t
800 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
801 {
802 buf_t io_bp;
803
804 if (io_offset < 0 || io_size < 0) {
805 return NULL;
806 }
807
808 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) {
809 return NULL;
810 }
811
812 if (bp->b_flags & B_CLUSTER) {
813 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) {
814 return NULL;
815 }
816
817 if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) {
818 return NULL;
819 }
820 }
821 io_bp = alloc_io_buf(bp->b_vp, 0);
822
823 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
824
825 if (iodone) {
826 io_bp->b_transaction = arg;
827 io_bp->b_iodone = iodone;
828 io_bp->b_flags |= B_CALL;
829 }
830 if (bp->b_flags & B_CLUSTER) {
831 io_bp->b_upl = bp->b_upl;
832 io_bp->b_uploffset = bp->b_uploffset + io_offset;
833 } else {
834 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
835 }
836 io_bp->b_bcount = io_size;
837
838 return io_bp;
839 }
840
841
842 int
843 buf_shadow(buf_t bp)
844 {
845 if (bp->b_lflags & BL_SHADOW) {
846 return 1;
847 }
848 return 0;
849 }
850
851
852 buf_t
853 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
854 {
855 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1);
856 }
857
858 buf_t
859 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
860 {
861 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0);
862 }
863
864
865 static buf_t
866 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
867 {
868 buf_t io_bp;
869
870 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
871
872 if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
873 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
874 return NULL;
875 }
876 #ifdef BUF_MAKE_PRIVATE
877 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) {
878 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
879 }
880 #endif
881 io_bp = alloc_io_buf(bp->b_vp, priv);
882
883 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
884 io_bp->b_blkno = bp->b_blkno;
885 io_bp->b_lblkno = bp->b_lblkno;
886
887 if (iodone) {
888 io_bp->b_transaction = arg;
889 io_bp->b_iodone = iodone;
890 io_bp->b_flags |= B_CALL;
891 }
892 if (force_copy == FALSE) {
893 io_bp->b_bcount = bp->b_bcount;
894 io_bp->b_bufsize = bp->b_bufsize;
895
896 if (external_storage) {
897 io_bp->b_datap = external_storage;
898 #ifdef BUF_MAKE_PRIVATE
899 io_bp->b_data_store = NULL;
900 #endif
901 } else {
902 io_bp->b_datap = bp->b_datap;
903 #ifdef BUF_MAKE_PRIVATE
904 io_bp->b_data_store = bp;
905 #endif
906 }
907 *(buf_t *)(&io_bp->b_orig) = bp;
908
909 lck_mtx_lock_spin(buf_mtxp);
910
911 io_bp->b_lflags |= BL_SHADOW;
912 io_bp->b_shadow = bp->b_shadow;
913 bp->b_shadow = io_bp;
914 bp->b_shadow_ref++;
915
916 #ifdef BUF_MAKE_PRIVATE
917 if (external_storage) {
918 io_bp->b_lflags |= BL_EXTERNAL;
919 } else {
920 bp->b_data_ref++;
921 }
922 #endif
923 lck_mtx_unlock(buf_mtxp);
924 } else {
925 if (external_storage) {
926 #ifdef BUF_MAKE_PRIVATE
927 io_bp->b_lflags |= BL_EXTERNAL;
928 #endif
929 io_bp->b_bcount = bp->b_bcount;
930 io_bp->b_bufsize = bp->b_bufsize;
931 io_bp->b_datap = external_storage;
932 } else {
933 allocbuf(io_bp, bp->b_bcount);
934
935 io_bp->b_lflags |= BL_IOBUF_ALLOC;
936 }
937 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
938
939 #ifdef BUF_MAKE_PRIVATE
940 io_bp->b_data_store = NULL;
941 #endif
942 }
943 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
944
945 return io_bp;
946 }
947
948
949 #ifdef BUF_MAKE_PRIVATE
950 errno_t
951 buf_make_private(buf_t bp)
952 {
953 buf_t ds_bp;
954 buf_t t_bp;
955 struct buf my_buf;
956
957 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
958
959 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
960 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
961 return EINVAL;
962 }
963 my_buf.b_flags = B_META;
964 my_buf.b_datap = (uintptr_t)NULL;
965 allocbuf(&my_buf, bp->b_bcount);
966
967 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
968
969 lck_mtx_lock_spin(buf_mtxp);
970
971 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
972 if (!ISSET(bp->b_lflags, BL_EXTERNAL)) {
973 break;
974 }
975 }
976 ds_bp = t_bp;
977
978 if (ds_bp == NULL && bp->b_data_ref) {
979 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
980 }
981
982 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) {
983 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
984 }
985
986 if (ds_bp == NULL) {
987 lck_mtx_unlock(buf_mtxp);
988
989 buf_free_meta_store(&my_buf);
990
991 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
992 return EINVAL;
993 }
994 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
995 if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) {
996 t_bp->b_data_store = ds_bp;
997 }
998 }
999 ds_bp->b_data_ref = bp->b_data_ref;
1000
1001 bp->b_data_ref = 0;
1002 bp->b_datap = my_buf.b_datap;
1003
1004 lck_mtx_unlock(buf_mtxp);
1005
1006 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
1007 return 0;
1008 }
1009 #endif
1010
1011
1012 void
1013 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
1014 void(**old_iodone)(buf_t, void *), void **old_transaction)
1015 {
1016 assert(ISSET(bp->b_lflags, BL_BUSY));
1017
1018 if (old_iodone) {
1019 *old_iodone = bp->b_iodone;
1020 }
1021 if (old_transaction) {
1022 *old_transaction = bp->b_transaction;
1023 }
1024
1025 bp->b_transaction = transaction;
1026 bp->b_iodone = filter;
1027 if (filter) {
1028 bp->b_flags |= B_FILTER;
1029 } else {
1030 bp->b_flags &= ~B_FILTER;
1031 }
1032 }
1033
1034
1035 daddr64_t
1036 buf_blkno(buf_t bp)
1037 {
1038 return bp->b_blkno;
1039 }
1040
1041 daddr64_t
1042 buf_lblkno(buf_t bp)
1043 {
1044 return bp->b_lblkno;
1045 }
1046
1047 void
1048 buf_setblkno(buf_t bp, daddr64_t blkno)
1049 {
1050 bp->b_blkno = blkno;
1051 }
1052
1053 void
1054 buf_setlblkno(buf_t bp, daddr64_t lblkno)
1055 {
1056 bp->b_lblkno = lblkno;
1057 }
1058
1059 dev_t
1060 buf_device(buf_t bp)
1061 {
1062 return bp->b_dev;
1063 }
1064
1065 errno_t
1066 buf_setdevice(buf_t bp, vnode_t vp)
1067 {
1068 if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) {
1069 return EINVAL;
1070 }
1071 bp->b_dev = vp->v_rdev;
1072
1073 return 0;
1074 }
1075
1076
1077 void *
1078 buf_drvdata(buf_t bp)
1079 {
1080 return bp->b_drvdata;
1081 }
1082
1083 void
1084 buf_setdrvdata(buf_t bp, void *drvdata)
1085 {
1086 bp->b_drvdata = drvdata;
1087 }
1088
1089 void *
1090 buf_fsprivate(buf_t bp)
1091 {
1092 return bp->b_fsprivate;
1093 }
1094
1095 void
1096 buf_setfsprivate(buf_t bp, void *fsprivate)
1097 {
1098 bp->b_fsprivate = fsprivate;
1099 }
1100
1101 kauth_cred_t
1102 buf_rcred(buf_t bp)
1103 {
1104 return bp->b_rcred;
1105 }
1106
1107 kauth_cred_t
1108 buf_wcred(buf_t bp)
1109 {
1110 return bp->b_wcred;
1111 }
1112
1113 void *
1114 buf_upl(buf_t bp)
1115 {
1116 return bp->b_upl;
1117 }
1118
1119 uint32_t
1120 buf_uploffset(buf_t bp)
1121 {
1122 return (uint32_t)(bp->b_uploffset);
1123 }
1124
1125 proc_t
1126 buf_proc(buf_t bp)
1127 {
1128 return bp->b_proc;
1129 }
1130
1131
1132 errno_t
1133 buf_map(buf_t bp, caddr_t *io_addr)
1134 {
1135 buf_t real_bp;
1136 vm_offset_t vaddr;
1137 kern_return_t kret;
1138
1139 if (!(bp->b_flags & B_CLUSTER)) {
1140 *io_addr = (caddr_t)bp->b_datap;
1141 return 0;
1142 }
1143 real_bp = (buf_t)(bp->b_real_bp);
1144
1145 if (real_bp && real_bp->b_datap) {
1146 /*
1147 * b_real_bp is only valid if B_CLUSTER is SET
1148 * if it's non-zero, than someone did a cluster_bp call
1149 * if the backing physical pages were already mapped
1150 * in before the call to cluster_bp (non-zero b_datap),
1151 * than we just use that mapping
1152 */
1153 *io_addr = (caddr_t)real_bp->b_datap;
1154 return 0;
1155 }
1156 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1157
1158 if (kret != KERN_SUCCESS) {
1159 *io_addr = NULL;
1160
1161 return ENOMEM;
1162 }
1163 vaddr += bp->b_uploffset;
1164
1165 *io_addr = (caddr_t)vaddr;
1166
1167 return 0;
1168 }
1169
1170 errno_t
1171 buf_unmap(buf_t bp)
1172 {
1173 buf_t real_bp;
1174 kern_return_t kret;
1175
1176 if (!(bp->b_flags & B_CLUSTER)) {
1177 return 0;
1178 }
1179 /*
1180 * see buf_map for the explanation
1181 */
1182 real_bp = (buf_t)(bp->b_real_bp);
1183
1184 if (real_bp && real_bp->b_datap) {
1185 return 0;
1186 }
1187
1188 if ((bp->b_lflags & BL_IOBUF) &&
1189 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1190 /*
1191 * ignore pageins... the 'right' thing will
1192 * happen due to the way we handle speculative
1193 * clusters...
1194 *
1195 * when we commit these pages, we'll hit
1196 * it with UPL_COMMIT_INACTIVE which
1197 * will clear the reference bit that got
1198 * turned on when we touched the mapping
1199 */
1200 bp->b_flags |= B_AGE;
1201 }
1202 kret = ubc_upl_unmap(bp->b_upl);
1203
1204 if (kret != KERN_SUCCESS) {
1205 return EINVAL;
1206 }
1207 return 0;
1208 }
1209
1210
1211 void
1212 buf_clear(buf_t bp)
1213 {
1214 caddr_t baddr;
1215
1216 if (buf_map(bp, &baddr) == 0) {
1217 bzero(baddr, bp->b_bcount);
1218 buf_unmap(bp);
1219 }
1220 bp->b_resid = 0;
1221 }
1222
1223 /*
1224 * Read or write a buffer that is not contiguous on disk.
1225 * buffer is marked done/error at the conclusion
1226 */
1227 static int
1228 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1229 {
1230 vnode_t vp = buf_vnode(bp);
1231 buf_t io_bp; /* For reading or writing a single block */
1232 int io_direction;
1233 int io_resid;
1234 size_t io_contig_bytes;
1235 daddr64_t io_blkno;
1236 int error = 0;
1237 int bmap_flags;
1238
1239 /*
1240 * save our starting point... the bp was already mapped
1241 * in buf_strategy before we got called
1242 * no sense doing it again.
1243 */
1244 io_blkno = bp->b_blkno;
1245 /*
1246 * Make sure we redo this mapping for the next I/O
1247 * i.e. this can never be a 'permanent' mapping
1248 */
1249 bp->b_blkno = bp->b_lblkno;
1250
1251 /*
1252 * Get an io buffer to do the deblocking
1253 */
1254 io_bp = alloc_io_buf(devvp, 0);
1255
1256 io_bp->b_lblkno = bp->b_lblkno;
1257 io_bp->b_datap = bp->b_datap;
1258 io_resid = bp->b_bcount;
1259 io_direction = bp->b_flags & B_READ;
1260 io_contig_bytes = contig_bytes;
1261
1262 if (bp->b_flags & B_READ) {
1263 bmap_flags = VNODE_READ;
1264 } else {
1265 bmap_flags = VNODE_WRITE;
1266 }
1267
1268 for (;;) {
1269 if (io_blkno == -1) {
1270 /*
1271 * this is unexepected, but we'll allow for it
1272 */
1273 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1274 } else {
1275 io_bp->b_bcount = io_contig_bytes;
1276 io_bp->b_bufsize = io_contig_bytes;
1277 io_bp->b_resid = io_contig_bytes;
1278 io_bp->b_blkno = io_blkno;
1279
1280 buf_reset(io_bp, io_direction);
1281
1282 /*
1283 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
1284 */
1285
1286 if (!ISSET(bp->b_flags, B_READ)) {
1287 OSAddAtomic(1, &devvp->v_numoutput);
1288 }
1289
1290 if ((error = VNOP_STRATEGY(io_bp))) {
1291 break;
1292 }
1293 if ((error = (int)buf_biowait(io_bp))) {
1294 break;
1295 }
1296 if (io_bp->b_resid) {
1297 io_resid -= (io_contig_bytes - io_bp->b_resid);
1298 break;
1299 }
1300 }
1301 if ((io_resid -= io_contig_bytes) == 0) {
1302 break;
1303 }
1304 f_offset += io_contig_bytes;
1305 io_bp->b_datap += io_contig_bytes;
1306
1307 /*
1308 * Map the current position to a physical block number
1309 */
1310 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) {
1311 break;
1312 }
1313 }
1314 buf_free(io_bp);
1315
1316 if (error) {
1317 buf_seterror(bp, error);
1318 }
1319 bp->b_resid = io_resid;
1320 /*
1321 * This I/O is now complete
1322 */
1323 buf_biodone(bp);
1324
1325 return error;
1326 }
1327
1328
1329 /*
1330 * struct vnop_strategy_args {
1331 * struct buf *a_bp;
1332 * } *ap;
1333 */
1334 errno_t
1335 buf_strategy(vnode_t devvp, void *ap)
1336 {
1337 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1338 vnode_t vp = bp->b_vp;
1339 int bmap_flags;
1340 errno_t error;
1341 #if CONFIG_DTRACE
1342 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
1343 * probe once, with the true physical
1344 * block in place (b_blkno)
1345 */
1346
1347 #endif
1348
1349 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) {
1350 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
1351 }
1352 /*
1353 * associate the physical device with
1354 * with this buf_t even if we don't
1355 * end up issuing the I/O...
1356 */
1357 bp->b_dev = devvp->v_rdev;
1358
1359 if (bp->b_flags & B_READ) {
1360 bmap_flags = VNODE_READ;
1361 } else {
1362 bmap_flags = VNODE_WRITE;
1363 }
1364
1365 if (!(bp->b_flags & B_CLUSTER)) {
1366 if ((bp->b_upl)) {
1367 /*
1368 * we have a UPL associated with this bp
1369 * go through cluster_bp which knows how
1370 * to deal with filesystem block sizes
1371 * that aren't equal to the page size
1372 */
1373 DTRACE_IO1(start, buf_t, bp);
1374 return cluster_bp(bp);
1375 }
1376 if (bp->b_blkno == bp->b_lblkno) {
1377 off_t f_offset;
1378 size_t contig_bytes;
1379
1380 if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1381 DTRACE_IO1(start, buf_t, bp);
1382 buf_seterror(bp, error);
1383 buf_biodone(bp);
1384
1385 return error;
1386 }
1387
1388 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1389 DTRACE_IO1(start, buf_t, bp);
1390 buf_seterror(bp, error);
1391 buf_biodone(bp);
1392
1393 return error;
1394 }
1395
1396 DTRACE_IO1(start, buf_t, bp);
1397 #if CONFIG_DTRACE
1398 dtrace_io_start_flag = 1;
1399 #endif /* CONFIG_DTRACE */
1400
1401 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1402 /* Set block number to force biodone later */
1403 bp->b_blkno = -1;
1404 buf_clear(bp);
1405 } else if ((long)contig_bytes < bp->b_bcount) {
1406 return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes);
1407 }
1408 }
1409
1410 #if CONFIG_DTRACE
1411 if (dtrace_io_start_flag == 0) {
1412 DTRACE_IO1(start, buf_t, bp);
1413 dtrace_io_start_flag = 1;
1414 }
1415 #endif /* CONFIG_DTRACE */
1416
1417 if (bp->b_blkno == -1) {
1418 buf_biodone(bp);
1419 return 0;
1420 }
1421 }
1422
1423 #if CONFIG_DTRACE
1424 if (dtrace_io_start_flag == 0) {
1425 DTRACE_IO1(start, buf_t, bp);
1426 }
1427 #endif /* CONFIG_DTRACE */
1428
1429 #if CONFIG_PROTECT
1430 /* Capture f_offset in the bufattr*/
1431 cpx_t cpx = bufattr_cpx(buf_attr(bp));
1432 if (cpx) {
1433 /* No need to go here for older EAs */
1434 if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
1435 off_t f_offset;
1436 if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) {
1437 return error;
1438 }
1439
1440 /*
1441 * Attach the file offset to this buffer. The
1442 * bufattr attributes will be passed down the stack
1443 * until they reach the storage driver (whether
1444 * IOFlashStorage, ASP, or IONVMe). The driver
1445 * will retain the offset in a local variable when it
1446 * issues its I/Os to the NAND controller.
1447 *
1448 * Note that LwVM may end up splitting this I/O
1449 * into sub-I/Os if it crosses a chunk boundary. In this
1450 * case, LwVM will update this field when it dispatches
1451 * each I/O to IOFlashStorage. But from our perspective
1452 * we have only issued a single I/O.
1453 *
1454 * In the case of APFS we do not bounce through another
1455 * intermediate layer (such as CoreStorage). APFS will
1456 * issue the I/Os directly to the block device / IOMedia
1457 * via buf_strategy on the specfs node.
1458 */
1459 buf_setcpoff(bp, f_offset);
1460 CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
1461 }
1462 }
1463 #endif
1464
1465 /*
1466 * we can issue the I/O because...
1467 * either B_CLUSTER is set which
1468 * means that the I/O is properly set
1469 * up to be a multiple of the page size, or
1470 * we were able to successfully set up the
1471 * physical block mapping
1472 */
1473 error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
1474 DTRACE_FSINFO(strategy, vnode_t, vp);
1475 return error;
1476 }
1477
1478
1479
1480 buf_t
1481 buf_alloc(vnode_t vp)
1482 {
1483 return alloc_io_buf(vp, is_vm_privileged());
1484 }
1485
1486 void
1487 buf_free(buf_t bp)
1488 {
1489 free_io_buf(bp);
1490 }
1491
1492
1493 /*
1494 * iterate buffers for the specified vp.
1495 * if BUF_SCAN_DIRTY is set, do the dirty list
1496 * if BUF_SCAN_CLEAN is set, do the clean list
1497 * if neither flag is set, default to BUF_SCAN_DIRTY
1498 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1499 */
1500
1501 struct buf_iterate_info_t {
1502 int flag;
1503 struct buflists *listhead;
1504 };
1505
1506 void
1507 buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1508 {
1509 buf_t bp;
1510 int retval;
1511 struct buflists local_iterblkhd;
1512 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1513 int notify_busy = flags & BUF_NOTIFY_BUSY;
1514 struct buf_iterate_info_t list[2];
1515 int num_lists, i;
1516
1517 if (flags & BUF_SKIP_LOCKED) {
1518 lock_flags |= BAC_SKIP_LOCKED;
1519 }
1520 if (flags & BUF_SKIP_NONLOCKED) {
1521 lock_flags |= BAC_SKIP_NONLOCKED;
1522 }
1523
1524 if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) {
1525 flags |= BUF_SCAN_DIRTY;
1526 }
1527
1528 num_lists = 0;
1529
1530 if (flags & BUF_SCAN_DIRTY) {
1531 list[num_lists].flag = VBI_DIRTY;
1532 list[num_lists].listhead = &vp->v_dirtyblkhd;
1533 num_lists++;
1534 }
1535 if (flags & BUF_SCAN_CLEAN) {
1536 list[num_lists].flag = VBI_CLEAN;
1537 list[num_lists].listhead = &vp->v_cleanblkhd;
1538 num_lists++;
1539 }
1540
1541 for (i = 0; i < num_lists; i++) {
1542 lck_mtx_lock(buf_mtxp);
1543
1544 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1545 lck_mtx_unlock(buf_mtxp);
1546 continue;
1547 }
1548 while (!LIST_EMPTY(&local_iterblkhd)) {
1549 bp = LIST_FIRST(&local_iterblkhd);
1550 LIST_REMOVE(bp, b_vnbufs);
1551 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1552
1553 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1554 if (notify_busy) {
1555 bp = NULL;
1556 } else {
1557 continue;
1558 }
1559 }
1560
1561 lck_mtx_unlock(buf_mtxp);
1562
1563 retval = callout(bp, arg);
1564
1565 switch (retval) {
1566 case BUF_RETURNED:
1567 if (bp) {
1568 buf_brelse(bp);
1569 }
1570 break;
1571 case BUF_CLAIMED:
1572 break;
1573 case BUF_RETURNED_DONE:
1574 if (bp) {
1575 buf_brelse(bp);
1576 }
1577 lck_mtx_lock(buf_mtxp);
1578 goto out;
1579 case BUF_CLAIMED_DONE:
1580 lck_mtx_lock(buf_mtxp);
1581 goto out;
1582 }
1583 lck_mtx_lock(buf_mtxp);
1584 } /* while list has more nodes */
1585 out:
1586 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1587 lck_mtx_unlock(buf_mtxp);
1588 } /* for each list */
1589 } /* buf_iterate */
1590
1591
1592 /*
1593 * Flush out and invalidate all buffers associated with a vnode.
1594 */
1595 int
1596 buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1597 {
1598 buf_t bp;
1599 int aflags;
1600 int error = 0;
1601 int must_rescan = 1;
1602 struct buflists local_iterblkhd;
1603
1604
1605 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1606 return 0;
1607 }
1608
1609 lck_mtx_lock(buf_mtxp);
1610
1611 for (;;) {
1612 if (must_rescan == 0) {
1613 /*
1614 * the lists may not be empty, but all that's left at this
1615 * point are metadata or B_LOCKED buffers which are being
1616 * skipped... we know this because we made it through both
1617 * the clean and dirty lists without dropping buf_mtxp...
1618 * each time we drop buf_mtxp we bump "must_rescan"
1619 */
1620 break;
1621 }
1622 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1623 break;
1624 }
1625 must_rescan = 0;
1626 /*
1627 * iterate the clean list
1628 */
1629 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1630 goto try_dirty_list;
1631 }
1632 while (!LIST_EMPTY(&local_iterblkhd)) {
1633 bp = LIST_FIRST(&local_iterblkhd);
1634
1635 LIST_REMOVE(bp, b_vnbufs);
1636 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1637
1638 /*
1639 * some filesystems distinguish meta data blocks with a negative logical block #
1640 */
1641 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1642 continue;
1643 }
1644
1645 aflags = BAC_REMOVE;
1646
1647 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1648 aflags |= BAC_SKIP_LOCKED;
1649 }
1650
1651 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1652 if (error == EDEADLK) {
1653 /*
1654 * this buffer was marked B_LOCKED...
1655 * we didn't drop buf_mtxp, so we
1656 * we don't need to rescan
1657 */
1658 continue;
1659 }
1660 if (error == EAGAIN) {
1661 /*
1662 * found a busy buffer... we blocked and
1663 * dropped buf_mtxp, so we're going to
1664 * need to rescan after this pass is completed
1665 */
1666 must_rescan++;
1667 continue;
1668 }
1669 /*
1670 * got some kind of 'real' error out of the msleep
1671 * in buf_acquire_locked, terminate the scan and return the error
1672 */
1673 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1674
1675 lck_mtx_unlock(buf_mtxp);
1676 return error;
1677 }
1678 lck_mtx_unlock(buf_mtxp);
1679
1680 if (bp->b_flags & B_LOCKED) {
1681 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1682 }
1683
1684 CLR(bp->b_flags, B_LOCKED);
1685 SET(bp->b_flags, B_INVAL);
1686 buf_brelse(bp);
1687
1688 lck_mtx_lock(buf_mtxp);
1689
1690 /*
1691 * by dropping buf_mtxp, we allow new
1692 * buffers to be added to the vnode list(s)
1693 * we'll have to rescan at least once more
1694 * if the queues aren't empty
1695 */
1696 must_rescan++;
1697 }
1698 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1699
1700 try_dirty_list:
1701 /*
1702 * Now iterate on dirty blks
1703 */
1704 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1705 continue;
1706 }
1707 while (!LIST_EMPTY(&local_iterblkhd)) {
1708 bp = LIST_FIRST(&local_iterblkhd);
1709
1710 LIST_REMOVE(bp, b_vnbufs);
1711 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1712
1713 /*
1714 * some filesystems distinguish meta data blocks with a negative logical block #
1715 */
1716 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1717 continue;
1718 }
1719
1720 aflags = BAC_REMOVE;
1721
1722 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1723 aflags |= BAC_SKIP_LOCKED;
1724 }
1725
1726 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1727 if (error == EDEADLK) {
1728 /*
1729 * this buffer was marked B_LOCKED...
1730 * we didn't drop buf_mtxp, so we
1731 * we don't need to rescan
1732 */
1733 continue;
1734 }
1735 if (error == EAGAIN) {
1736 /*
1737 * found a busy buffer... we blocked and
1738 * dropped buf_mtxp, so we're going to
1739 * need to rescan after this pass is completed
1740 */
1741 must_rescan++;
1742 continue;
1743 }
1744 /*
1745 * got some kind of 'real' error out of the msleep
1746 * in buf_acquire_locked, terminate the scan and return the error
1747 */
1748 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1749
1750 lck_mtx_unlock(buf_mtxp);
1751 return error;
1752 }
1753 lck_mtx_unlock(buf_mtxp);
1754
1755 if (bp->b_flags & B_LOCKED) {
1756 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1757 }
1758
1759 CLR(bp->b_flags, B_LOCKED);
1760 SET(bp->b_flags, B_INVAL);
1761
1762 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) {
1763 (void) VNOP_BWRITE(bp);
1764 } else {
1765 buf_brelse(bp);
1766 }
1767
1768 lck_mtx_lock(buf_mtxp);
1769 /*
1770 * by dropping buf_mtxp, we allow new
1771 * buffers to be added to the vnode list(s)
1772 * we'll have to rescan at least once more
1773 * if the queues aren't empty
1774 */
1775 must_rescan++;
1776 }
1777 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1778 }
1779 lck_mtx_unlock(buf_mtxp);
1780
1781 return 0;
1782 }
1783
1784 void
1785 buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg)
1786 {
1787 (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1788 return;
1789 }
1790
1791 int
1792 buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg)
1793 {
1794 buf_t bp;
1795 int writes_issued = 0;
1796 errno_t error;
1797 int busy = 0;
1798 struct buflists local_iterblkhd;
1799 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1800 int any_locked = 0;
1801
1802 if (flags & BUF_SKIP_LOCKED) {
1803 lock_flags |= BAC_SKIP_LOCKED;
1804 }
1805 if (flags & BUF_SKIP_NONLOCKED) {
1806 lock_flags |= BAC_SKIP_NONLOCKED;
1807 }
1808 loop:
1809 lck_mtx_lock(buf_mtxp);
1810
1811 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1812 while (!LIST_EMPTY(&local_iterblkhd)) {
1813 bp = LIST_FIRST(&local_iterblkhd);
1814 LIST_REMOVE(bp, b_vnbufs);
1815 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1816
1817 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1818 busy++;
1819 }
1820 if (error) {
1821 /*
1822 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1823 * we may want to do somethign differently if a locked or unlocked
1824 * buffer was encountered (depending on the arg specified).
1825 * In this case, we know that one of those two was set, and the
1826 * buf acquisition failed above.
1827 *
1828 * If it failed with EDEADLK, then save state which can be emitted
1829 * later on to the caller. Most callers should not care.
1830 */
1831 if (error == EDEADLK) {
1832 any_locked++;
1833 }
1834 continue;
1835 }
1836 lck_mtx_unlock(buf_mtxp);
1837
1838 bp->b_flags &= ~B_LOCKED;
1839
1840 /*
1841 * Wait for I/O associated with indirect blocks to complete,
1842 * since there is no way to quickly wait for them below.
1843 */
1844 if ((bp->b_vp == vp) || (wait == 0)) {
1845 (void) buf_bawrite(bp);
1846 } else {
1847 (void) VNOP_BWRITE(bp);
1848 }
1849 writes_issued++;
1850
1851 lck_mtx_lock(buf_mtxp);
1852 }
1853 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1854 }
1855 lck_mtx_unlock(buf_mtxp);
1856
1857 if (wait) {
1858 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1859
1860 if (vp->v_dirtyblkhd.lh_first && busy) {
1861 /*
1862 * we had one or more BUSY buffers on
1863 * the dirtyblock list... most likely
1864 * these are due to delayed writes that
1865 * were moved to the bclean queue but
1866 * have not yet been 'written'.
1867 * if we issued some writes on the
1868 * previous pass, we try again immediately
1869 * if we didn't, we'll sleep for some time
1870 * to allow the state to change...
1871 */
1872 if (writes_issued == 0) {
1873 (void)tsleep((caddr_t)&vp->v_numoutput,
1874 PRIBIO + 1, "vnode_flushdirtyblks", hz / 20);
1875 }
1876 writes_issued = 0;
1877 busy = 0;
1878
1879 goto loop;
1880 }
1881 }
1882
1883 return any_locked;
1884 }
1885
1886
1887 /*
1888 * called with buf_mtxp held...
1889 * this lock protects the queue manipulation
1890 */
1891 static int
1892 buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1893 {
1894 struct buflists * listheadp;
1895
1896 if (flags & VBI_DIRTY) {
1897 listheadp = &vp->v_dirtyblkhd;
1898 } else {
1899 listheadp = &vp->v_cleanblkhd;
1900 }
1901
1902 while (vp->v_iterblkflags & VBI_ITER) {
1903 vp->v_iterblkflags |= VBI_ITERWANT;
1904 msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL);
1905 }
1906 if (LIST_EMPTY(listheadp)) {
1907 LIST_INIT(iterheadp);
1908 return EINVAL;
1909 }
1910 vp->v_iterblkflags |= VBI_ITER;
1911
1912 iterheadp->lh_first = listheadp->lh_first;
1913 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
1914 LIST_INIT(listheadp);
1915
1916 return 0;
1917 }
1918
1919 /*
1920 * called with buf_mtxp held...
1921 * this lock protects the queue manipulation
1922 */
1923 static void
1924 buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
1925 {
1926 struct buflists * listheadp;
1927 buf_t bp;
1928
1929 if (flags & VBI_DIRTY) {
1930 listheadp = &vp->v_dirtyblkhd;
1931 } else {
1932 listheadp = &vp->v_cleanblkhd;
1933 }
1934
1935 while (!LIST_EMPTY(iterheadp)) {
1936 bp = LIST_FIRST(iterheadp);
1937 LIST_REMOVE(bp, b_vnbufs);
1938 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
1939 }
1940 vp->v_iterblkflags &= ~VBI_ITER;
1941
1942 if (vp->v_iterblkflags & VBI_ITERWANT) {
1943 vp->v_iterblkflags &= ~VBI_ITERWANT;
1944 wakeup(&vp->v_iterblkflags);
1945 }
1946 }
1947
1948
1949 static void
1950 bremfree_locked(buf_t bp)
1951 {
1952 struct bqueues *dp = NULL;
1953 int whichq;
1954
1955 whichq = bp->b_whichq;
1956
1957 if (whichq == -1) {
1958 if (bp->b_shadow_ref == 0) {
1959 panic("bremfree_locked: %p not on freelist", bp);
1960 }
1961 /*
1962 * there are clones pointing to 'bp'...
1963 * therefore, it was not put on a freelist
1964 * when buf_brelse was last called on 'bp'
1965 */
1966 return;
1967 }
1968 /*
1969 * We only calculate the head of the freelist when removing
1970 * the last element of the list as that is the only time that
1971 * it is needed (e.g. to reset the tail pointer).
1972 *
1973 * NB: This makes an assumption about how tailq's are implemented.
1974 */
1975 if (bp->b_freelist.tqe_next == NULL) {
1976 dp = &bufqueues[whichq];
1977
1978 if (dp->tqh_last != &bp->b_freelist.tqe_next) {
1979 panic("bremfree: lost tail");
1980 }
1981 }
1982 TAILQ_REMOVE(dp, bp, b_freelist);
1983
1984 if (whichq == BQ_LAUNDRY) {
1985 blaundrycnt--;
1986 }
1987
1988 bp->b_whichq = -1;
1989 bp->b_timestamp = 0;
1990 bp->b_shadow = 0;
1991 }
1992
1993 /*
1994 * Associate a buffer with a vnode.
1995 * buf_mtxp must be locked on entry
1996 */
1997 static void
1998 bgetvp_locked(vnode_t vp, buf_t bp)
1999 {
2000 if (bp->b_vp != vp) {
2001 panic("bgetvp_locked: not free");
2002 }
2003
2004 if (vp->v_type == VBLK || vp->v_type == VCHR) {
2005 bp->b_dev = vp->v_rdev;
2006 } else {
2007 bp->b_dev = NODEV;
2008 }
2009 /*
2010 * Insert onto list for new vnode.
2011 */
2012 bufinsvn(bp, &vp->v_cleanblkhd);
2013 }
2014
2015 /*
2016 * Disassociate a buffer from a vnode.
2017 * buf_mtxp must be locked on entry
2018 */
2019 static void
2020 brelvp_locked(buf_t bp)
2021 {
2022 /*
2023 * Delete from old vnode list, if on one.
2024 */
2025 if (bp->b_vnbufs.le_next != NOLIST) {
2026 bufremvn(bp);
2027 }
2028
2029 bp->b_vp = (vnode_t)NULL;
2030 }
2031
2032 /*
2033 * Reassign a buffer from one vnode to another.
2034 * Used to assign file specific control information
2035 * (indirect blocks) to the vnode to which they belong.
2036 */
2037 static void
2038 buf_reassign(buf_t bp, vnode_t newvp)
2039 {
2040 struct buflists *listheadp;
2041
2042 if (newvp == NULL) {
2043 printf("buf_reassign: NULL");
2044 return;
2045 }
2046 lck_mtx_lock_spin(buf_mtxp);
2047
2048 /*
2049 * Delete from old vnode list, if on one.
2050 */
2051 if (bp->b_vnbufs.le_next != NOLIST) {
2052 bufremvn(bp);
2053 }
2054 /*
2055 * If dirty, put on list of dirty buffers;
2056 * otherwise insert onto list of clean buffers.
2057 */
2058 if (ISSET(bp->b_flags, B_DELWRI)) {
2059 listheadp = &newvp->v_dirtyblkhd;
2060 } else {
2061 listheadp = &newvp->v_cleanblkhd;
2062 }
2063 bufinsvn(bp, listheadp);
2064
2065 lck_mtx_unlock(buf_mtxp);
2066 }
2067
2068 static __inline__ void
2069 bufhdrinit(buf_t bp)
2070 {
2071 bzero((char *)bp, sizeof *bp);
2072 bp->b_dev = NODEV;
2073 bp->b_rcred = NOCRED;
2074 bp->b_wcred = NOCRED;
2075 bp->b_vnbufs.le_next = NOLIST;
2076 bp->b_flags = B_INVAL;
2077
2078 return;
2079 }
2080
2081 /*
2082 * Initialize buffers and hash links for buffers.
2083 */
2084 __private_extern__ void
2085 bufinit(void)
2086 {
2087 buf_t bp;
2088 struct bqueues *dp;
2089 int i;
2090
2091 nbuf_headers = 0;
2092 /* Initialize the buffer queues ('freelists') and the hash table */
2093 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
2094 TAILQ_INIT(dp);
2095 }
2096 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
2097
2098 buf_busycount = 0;
2099
2100 /* Initialize the buffer headers */
2101 for (i = 0; i < max_nbuf_headers; i++) {
2102 nbuf_headers++;
2103 bp = &buf_headers[i];
2104 bufhdrinit(bp);
2105
2106 BLISTNONE(bp);
2107 dp = &bufqueues[BQ_EMPTY];
2108 bp->b_whichq = BQ_EMPTY;
2109 bp->b_timestamp = buf_timestamp();
2110 binsheadfree(bp, dp, BQ_EMPTY);
2111 binshash(bp, &invalhash);
2112 }
2113 boot_nbuf_headers = nbuf_headers;
2114
2115 TAILQ_INIT(&iobufqueue);
2116 TAILQ_INIT(&delaybufqueue);
2117
2118 for (; i < nbuf_headers + niobuf_headers; i++) {
2119 bp = &buf_headers[i];
2120 bufhdrinit(bp);
2121 bp->b_whichq = -1;
2122 binsheadfree(bp, &iobufqueue, -1);
2123 }
2124
2125 /*
2126 * allocate lock group attribute and group
2127 */
2128 buf_mtx_grp_attr = lck_grp_attr_alloc_init();
2129 buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr);
2130
2131 /*
2132 * allocate the lock attribute
2133 */
2134 buf_mtx_attr = lck_attr_alloc_init();
2135
2136 /*
2137 * allocate and initialize mutex's for the buffer and iobuffer pools
2138 */
2139 buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
2140 iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
2141 buf_gc_callout = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr);
2142
2143 if (iobuffer_mtxp == NULL) {
2144 panic("couldn't create iobuffer mutex");
2145 }
2146
2147 if (buf_mtxp == NULL) {
2148 panic("couldn't create buf mutex");
2149 }
2150
2151 if (buf_gc_callout == NULL) {
2152 panic("couldn't create buf_gc_callout mutex");
2153 }
2154
2155 /*
2156 * allocate and initialize cluster specific global locks...
2157 */
2158 cluster_init();
2159
2160 printf("using %d buffer headers and %d cluster IO buffer headers\n",
2161 nbuf_headers, niobuf_headers);
2162
2163 /* Set up zones used by the buffer cache */
2164 bufzoneinit();
2165
2166 /* start the bcleanbuf() thread */
2167 bcleanbuf_thread_init();
2168
2169 /* Register a callout for relieving vm pressure */
2170 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
2171 panic("Couldn't register buffer cache callout for vm pressure!\n");
2172 }
2173 }
2174
2175 /*
2176 * Zones for the meta data buffers
2177 */
2178
2179 #define MINMETA 512
2180 #define MAXMETA 16384
2181
2182 struct meta_zone_entry {
2183 zone_t mz_zone;
2184 vm_size_t mz_size;
2185 vm_size_t mz_max;
2186 const char *mz_name;
2187 };
2188
2189 struct meta_zone_entry meta_zones[] = {
2190 {.mz_zone = NULL, .mz_size = (MINMETA * 1), .mz_max = 128 * (MINMETA * 1), .mz_name = "buf.512" },
2191 {.mz_zone = NULL, .mz_size = (MINMETA * 2), .mz_max = 64 * (MINMETA * 2), .mz_name = "buf.1024" },
2192 {.mz_zone = NULL, .mz_size = (MINMETA * 4), .mz_max = 16 * (MINMETA * 4), .mz_name = "buf.2048" },
2193 {.mz_zone = NULL, .mz_size = (MINMETA * 8), .mz_max = 512 * (MINMETA * 8), .mz_name = "buf.4096" },
2194 {.mz_zone = NULL, .mz_size = (MINMETA * 16), .mz_max = 512 * (MINMETA * 16), .mz_name = "buf.8192" },
2195 {.mz_zone = NULL, .mz_size = (MINMETA * 32), .mz_max = 512 * (MINMETA * 32), .mz_name = "buf.16384" },
2196 {.mz_zone = NULL, .mz_size = 0, .mz_max = 0, .mz_name = "" } /* End */
2197 };
2198
2199 /*
2200 * Initialize the meta data zones
2201 */
2202 static void
2203 bufzoneinit(void)
2204 {
2205 int i;
2206
2207 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2208 meta_zones[i].mz_zone =
2209 zinit(meta_zones[i].mz_size,
2210 meta_zones[i].mz_max,
2211 PAGE_SIZE,
2212 meta_zones[i].mz_name);
2213 zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE);
2214 }
2215 buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
2216 zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE);
2217 }
2218
2219 static __inline__ zone_t
2220 getbufzone(size_t size)
2221 {
2222 int i;
2223
2224 if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) {
2225 panic("getbufzone: incorect size = %lu", size);
2226 }
2227
2228 for (i = 0; meta_zones[i].mz_size != 0; i++) {
2229 if (meta_zones[i].mz_size >= size) {
2230 break;
2231 }
2232 }
2233
2234 return meta_zones[i].mz_zone;
2235 }
2236
2237
2238
2239 static struct buf *
2240 bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
2241 {
2242 buf_t bp;
2243
2244 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2245
2246 /*
2247 * If buffer does not have data valid, start a read.
2248 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2249 * Therefore, it's valid if it's I/O has completed or been delayed.
2250 */
2251 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2252 struct proc *p;
2253
2254 p = current_proc();
2255
2256 /* Start I/O for the buffer (keeping credentials). */
2257 SET(bp->b_flags, B_READ | async);
2258 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2259 kauth_cred_ref(cred);
2260 bp->b_rcred = cred;
2261 }
2262
2263 VNOP_STRATEGY(bp);
2264
2265 trace(TR_BREADMISS, pack(vp, size), blkno);
2266
2267 /* Pay for the read. */
2268 if (p && p->p_stats) {
2269 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
2270 }
2271
2272 if (async) {
2273 /*
2274 * since we asked for an ASYNC I/O
2275 * the biodone will do the brelse
2276 * we don't want to pass back a bp
2277 * that we don't 'own'
2278 */
2279 bp = NULL;
2280 }
2281 } else if (async) {
2282 buf_brelse(bp);
2283 bp = NULL;
2284 }
2285
2286 trace(TR_BREADHIT, pack(vp, size), blkno);
2287
2288 return bp;
2289 }
2290
2291 /*
2292 * Perform the reads for buf_breadn() and buf_meta_breadn().
2293 * Trivial modification to the breada algorithm presented in Bach (p.55).
2294 */
2295 static errno_t
2296 do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
2297 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
2298 {
2299 buf_t bp;
2300 int i;
2301
2302 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2303
2304 /*
2305 * For each of the read-ahead blocks, start a read, if necessary.
2306 */
2307 for (i = 0; i < nrablks; i++) {
2308 /* If it's in the cache, just go on to next one. */
2309 if (incore(vp, rablks[i])) {
2310 continue;
2311 }
2312
2313 /* Get a buffer for the read-ahead block */
2314 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
2315 }
2316
2317 /* Otherwise, we had to start a read for it; wait until it's valid. */
2318 return buf_biowait(bp);
2319 }
2320
2321
2322 /*
2323 * Read a disk block.
2324 * This algorithm described in Bach (p.54).
2325 */
2326 errno_t
2327 buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2328 {
2329 buf_t bp;
2330
2331 /* Get buffer for block. */
2332 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2333
2334 /* Wait for the read to complete, and return result. */
2335 return buf_biowait(bp);
2336 }
2337
2338 /*
2339 * Read a disk block. [bread() for meta-data]
2340 * This algorithm described in Bach (p.54).
2341 */
2342 errno_t
2343 buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2344 {
2345 buf_t bp;
2346
2347 /* Get buffer for block. */
2348 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2349
2350 /* Wait for the read to complete, and return result. */
2351 return buf_biowait(bp);
2352 }
2353
2354 /*
2355 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2356 */
2357 errno_t
2358 buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2359 {
2360 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ);
2361 }
2362
2363 /*
2364 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2365 * [buf_breadn() for meta-data]
2366 */
2367 errno_t
2368 buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2369 {
2370 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META);
2371 }
2372
2373 /*
2374 * Block write. Described in Bach (p.56)
2375 */
2376 errno_t
2377 buf_bwrite(buf_t bp)
2378 {
2379 int sync, wasdelayed;
2380 errno_t rv;
2381 proc_t p = current_proc();
2382 vnode_t vp = bp->b_vp;
2383
2384 if (bp->b_datap == 0) {
2385 if (brecover_data(bp) == 0) {
2386 return 0;
2387 }
2388 }
2389 /* Remember buffer type, to switch on it later. */
2390 sync = !ISSET(bp->b_flags, B_ASYNC);
2391 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2392 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2393
2394 if (wasdelayed) {
2395 OSAddAtomicLong(-1, &nbdwrite);
2396 }
2397
2398 if (!sync) {
2399 /*
2400 * If not synchronous, pay for the I/O operation and make
2401 * sure the buf is on the correct vnode queue. We have
2402 * to do this now, because if we don't, the vnode may not
2403 * be properly notified that its I/O has completed.
2404 */
2405 if (wasdelayed) {
2406 buf_reassign(bp, vp);
2407 } else if (p && p->p_stats) {
2408 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2409 }
2410 }
2411 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2412
2413 /* Initiate disk write. Make sure the appropriate party is charged. */
2414
2415 OSAddAtomic(1, &vp->v_numoutput);
2416
2417 VNOP_STRATEGY(bp);
2418
2419 if (sync) {
2420 /*
2421 * If I/O was synchronous, wait for it to complete.
2422 */
2423 rv = buf_biowait(bp);
2424
2425 /*
2426 * Pay for the I/O operation, if it's not been paid for, and
2427 * make sure it's on the correct vnode queue. (async operatings
2428 * were payed for above.)
2429 */
2430 if (wasdelayed) {
2431 buf_reassign(bp, vp);
2432 } else if (p && p->p_stats) {
2433 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2434 }
2435
2436 /* Release the buffer. */
2437 buf_brelse(bp);
2438
2439 return rv;
2440 } else {
2441 return 0;
2442 }
2443 }
2444
2445 int
2446 vn_bwrite(struct vnop_bwrite_args *ap)
2447 {
2448 return buf_bwrite(ap->a_bp);
2449 }
2450
2451 /*
2452 * Delayed write.
2453 *
2454 * The buffer is marked dirty, but is not queued for I/O.
2455 * This routine should be used when the buffer is expected
2456 * to be modified again soon, typically a small write that
2457 * partially fills a buffer.
2458 *
2459 * NB: magnetic tapes cannot be delayed; they must be
2460 * written in the order that the writes are requested.
2461 *
2462 * Described in Leffler, et al. (pp. 208-213).
2463 *
2464 * Note: With the ability to allocate additional buffer
2465 * headers, we can get in to the situation where "too" many
2466 * buf_bdwrite()s can create situation where the kernel can create
2467 * buffers faster than the disks can service. Doing a buf_bawrite() in
2468 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
2469 */
2470 int
2471 bdwrite_internal(buf_t bp, int return_error)
2472 {
2473 proc_t p = current_proc();
2474 vnode_t vp = bp->b_vp;
2475
2476 /*
2477 * If the block hasn't been seen before:
2478 * (1) Mark it as having been seen,
2479 * (2) Charge for the write.
2480 * (3) Make sure it's on its vnode's correct block list,
2481 */
2482 if (!ISSET(bp->b_flags, B_DELWRI)) {
2483 SET(bp->b_flags, B_DELWRI);
2484 if (p && p->p_stats) {
2485 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2486 }
2487 OSAddAtomicLong(1, &nbdwrite);
2488 buf_reassign(bp, vp);
2489 }
2490
2491 /*
2492 * if we're not LOCKED, but the total number of delayed writes
2493 * has climbed above 75% of the total buffers in the system
2494 * return an error if the caller has indicated that it can
2495 * handle one in this case, otherwise schedule the I/O now
2496 * this is done to prevent us from allocating tons of extra
2497 * buffers when dealing with virtual disks (i.e. DiskImages),
2498 * because additional buffers are dynamically allocated to prevent
2499 * deadlocks from occurring
2500 *
2501 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2502 * buffer is part of a transaction and can't go to disk until
2503 * the LOCKED bit is cleared.
2504 */
2505 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) {
2506 if (return_error) {
2507 return EAGAIN;
2508 }
2509 /*
2510 * If the vnode has "too many" write operations in progress
2511 * wait for them to finish the IO
2512 */
2513 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
2514
2515 return buf_bawrite(bp);
2516 }
2517
2518 /* Otherwise, the "write" is done, so mark and release the buffer. */
2519 SET(bp->b_flags, B_DONE);
2520 buf_brelse(bp);
2521 return 0;
2522 }
2523
2524 errno_t
2525 buf_bdwrite(buf_t bp)
2526 {
2527 return bdwrite_internal(bp, 0);
2528 }
2529
2530
2531 /*
2532 * Asynchronous block write; just an asynchronous buf_bwrite().
2533 *
2534 * Note: With the abilitty to allocate additional buffer
2535 * headers, we can get in to the situation where "too" many
2536 * buf_bawrite()s can create situation where the kernel can create
2537 * buffers faster than the disks can service.
2538 * We limit the number of "in flight" writes a vnode can have to
2539 * avoid this.
2540 */
2541 static int
2542 bawrite_internal(buf_t bp, int throttle)
2543 {
2544 vnode_t vp = bp->b_vp;
2545
2546 if (vp) {
2547 if (throttle) {
2548 /*
2549 * If the vnode has "too many" write operations in progress
2550 * wait for them to finish the IO
2551 */
2552 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2553 } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) {
2554 /*
2555 * return to the caller and
2556 * let him decide what to do
2557 */
2558 return EWOULDBLOCK;
2559 }
2560 }
2561 SET(bp->b_flags, B_ASYNC);
2562
2563 return VNOP_BWRITE(bp);
2564 }
2565
2566 errno_t
2567 buf_bawrite(buf_t bp)
2568 {
2569 return bawrite_internal(bp, 1);
2570 }
2571
2572
2573
2574 static void
2575 buf_free_meta_store(buf_t bp)
2576 {
2577 if (bp->b_bufsize) {
2578 if (ISSET(bp->b_flags, B_ZALLOC)) {
2579 zone_t z;
2580
2581 z = getbufzone(bp->b_bufsize);
2582 zfree(z, bp->b_datap);
2583 } else {
2584 kmem_free(kernel_map, bp->b_datap, bp->b_bufsize);
2585 }
2586
2587 bp->b_datap = (uintptr_t)NULL;
2588 bp->b_bufsize = 0;
2589 }
2590 }
2591
2592
2593 static buf_t
2594 buf_brelse_shadow(buf_t bp)
2595 {
2596 buf_t bp_head;
2597 buf_t bp_temp;
2598 buf_t bp_return = NULL;
2599 #ifdef BUF_MAKE_PRIVATE
2600 buf_t bp_data;
2601 int data_ref = 0;
2602 #endif
2603 int need_wakeup = 0;
2604
2605 lck_mtx_lock_spin(buf_mtxp);
2606
2607 __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
2608
2609 if (bp_head->b_whichq != -1) {
2610 panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq);
2611 }
2612
2613 #ifdef BUF_MAKE_PRIVATE
2614 if (bp_data = bp->b_data_store) {
2615 bp_data->b_data_ref--;
2616 /*
2617 * snapshot the ref count so that we can check it
2618 * outside of the lock... we only want the guy going
2619 * from 1 -> 0 to try and release the storage
2620 */
2621 data_ref = bp_data->b_data_ref;
2622 }
2623 #endif
2624 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2625
2626 bp_head->b_shadow_ref--;
2627
2628 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
2629 ;
2630 }
2631
2632 if (bp_temp == NULL) {
2633 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2634 }
2635
2636 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2637
2638 #ifdef BUF_MAKE_PRIVATE
2639 /*
2640 * we're about to free the current 'owner' of the data buffer and
2641 * there is at least one other shadow buf_t still pointing at it
2642 * so transfer it to the first shadow buf left in the chain
2643 */
2644 if (bp == bp_data && data_ref) {
2645 if ((bp_data = bp_head->b_shadow) == NULL) {
2646 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2647 }
2648
2649 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
2650 bp_temp->b_data_store = bp_data;
2651 }
2652 bp_data->b_data_ref = data_ref;
2653 }
2654 #endif
2655 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
2656 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2657 }
2658 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
2659 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2660 }
2661
2662 if (bp_head->b_shadow_ref == 0) {
2663 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2664 CLR(bp_head->b_flags, B_AGE);
2665 bp_head->b_timestamp = buf_timestamp();
2666
2667 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2668 bp_head->b_whichq = BQ_LOCKED;
2669 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2670 } else {
2671 bp_head->b_whichq = BQ_META;
2672 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2673 }
2674 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2675 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2676
2677 bp_return = bp_head;
2678 }
2679 if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2680 CLR(bp_head->b_lflags, BL_WANTED_REF);
2681 need_wakeup = 1;
2682 }
2683 }
2684 lck_mtx_unlock(buf_mtxp);
2685
2686 if (need_wakeup) {
2687 wakeup(bp_head);
2688 }
2689
2690 #ifdef BUF_MAKE_PRIVATE
2691 if (bp == bp_data && data_ref == 0) {
2692 buf_free_meta_store(bp);
2693 }
2694
2695 bp->b_data_store = NULL;
2696 #endif
2697 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2698
2699 return bp_return;
2700 }
2701
2702
2703 /*
2704 * Release a buffer on to the free lists.
2705 * Described in Bach (p. 46).
2706 */
2707 void
2708 buf_brelse(buf_t bp)
2709 {
2710 struct bqueues *bufq;
2711 long whichq;
2712 upl_t upl;
2713 int need_wakeup = 0;
2714 int need_bp_wakeup = 0;
2715
2716
2717 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) {
2718 panic("buf_brelse: bad buffer = %p\n", bp);
2719 }
2720
2721 #ifdef JOE_DEBUG
2722 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2723
2724 bp->b_lastbrelse = current_thread();
2725 bp->b_tag = 0;
2726 #endif
2727 if (bp->b_lflags & BL_IOBUF) {
2728 buf_t shadow_master_bp = NULL;
2729
2730 if (ISSET(bp->b_lflags, BL_SHADOW)) {
2731 shadow_master_bp = buf_brelse_shadow(bp);
2732 } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) {
2733 buf_free_meta_store(bp);
2734 }
2735 free_io_buf(bp);
2736
2737 if (shadow_master_bp) {
2738 bp = shadow_master_bp;
2739 goto finish_shadow_master;
2740 }
2741 return;
2742 }
2743
2744 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
2745 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2746 bp->b_flags, 0);
2747
2748 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2749
2750 /*
2751 * if we're invalidating a buffer that has the B_FILTER bit
2752 * set then call the b_iodone function so it gets cleaned
2753 * up properly.
2754 *
2755 * the HFS journal code depends on this
2756 */
2757 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2758 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2759 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2760 void *arg = bp->b_transaction;
2761
2762 CLR(bp->b_flags, B_FILTER); /* but note callout done */
2763 bp->b_iodone = NULL;
2764 bp->b_transaction = NULL;
2765
2766 if (iodone_func == NULL) {
2767 panic("brelse: bp @ %p has NULL b_iodone!\n", bp);
2768 }
2769 (*iodone_func)(bp, arg);
2770 }
2771 }
2772 /*
2773 * I/O is done. Cleanup the UPL state
2774 */
2775 upl = bp->b_upl;
2776
2777 if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2778 kern_return_t kret;
2779 int upl_flags;
2780
2781 if (upl == NULL) {
2782 if (!ISSET(bp->b_flags, B_INVAL)) {
2783 kret = ubc_create_upl_kernel(bp->b_vp,
2784 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2785 bp->b_bufsize,
2786 &upl,
2787 NULL,
2788 UPL_PRECIOUS,
2789 VM_KERN_MEMORY_FILE);
2790
2791 if (kret != KERN_SUCCESS) {
2792 panic("brelse: Failed to create UPL");
2793 }
2794 #if UPL_DEBUG
2795 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2796 #endif /* UPL_DEBUG */
2797 }
2798 } else {
2799 if (bp->b_datap) {
2800 kret = ubc_upl_unmap(upl);
2801
2802 if (kret != KERN_SUCCESS) {
2803 panic("ubc_upl_unmap failed");
2804 }
2805 bp->b_datap = (uintptr_t)NULL;
2806 }
2807 }
2808 if (upl) {
2809 if (bp->b_flags & (B_ERROR | B_INVAL)) {
2810 if (bp->b_flags & (B_READ | B_INVAL)) {
2811 upl_flags = UPL_ABORT_DUMP_PAGES;
2812 } else {
2813 upl_flags = 0;
2814 }
2815
2816 ubc_upl_abort(upl, upl_flags);
2817 } else {
2818 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) {
2819 upl_flags = UPL_COMMIT_SET_DIRTY;
2820 } else {
2821 upl_flags = UPL_COMMIT_CLEAR_DIRTY;
2822 }
2823
2824 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2825 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2826 }
2827 bp->b_upl = NULL;
2828 }
2829 } else {
2830 if ((upl)) {
2831 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2832 }
2833 }
2834
2835 /*
2836 * If it's locked, don't report an error; try again later.
2837 */
2838 if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) {
2839 CLR(bp->b_flags, B_ERROR);
2840 }
2841 /*
2842 * If it's not cacheable, or an error, mark it invalid.
2843 */
2844 if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) {
2845 SET(bp->b_flags, B_INVAL);
2846 }
2847
2848 if ((bp->b_bufsize <= 0) ||
2849 ISSET(bp->b_flags, B_INVAL) ||
2850 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2851 boolean_t delayed_buf_free_meta_store = FALSE;
2852
2853 /*
2854 * If it's invalid or empty, dissociate it from its vnode,
2855 * release its storage if B_META, and
2856 * clean it up a bit and put it on the EMPTY queue
2857 */
2858 if (ISSET(bp->b_flags, B_DELWRI)) {
2859 OSAddAtomicLong(-1, &nbdwrite);
2860 }
2861
2862 if (ISSET(bp->b_flags, B_META)) {
2863 if (bp->b_shadow_ref) {
2864 delayed_buf_free_meta_store = TRUE;
2865 } else {
2866 buf_free_meta_store(bp);
2867 }
2868 }
2869 /*
2870 * nuke any credentials we were holding
2871 */
2872 buf_release_credentials(bp);
2873
2874 lck_mtx_lock_spin(buf_mtxp);
2875
2876 if (bp->b_shadow_ref) {
2877 SET(bp->b_lflags, BL_WAITSHADOW);
2878
2879 lck_mtx_unlock(buf_mtxp);
2880
2881 return;
2882 }
2883 if (delayed_buf_free_meta_store == TRUE) {
2884 lck_mtx_unlock(buf_mtxp);
2885 finish_shadow_master:
2886 buf_free_meta_store(bp);
2887
2888 lck_mtx_lock_spin(buf_mtxp);
2889 }
2890 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2891
2892 if (bp->b_vp) {
2893 brelvp_locked(bp);
2894 }
2895
2896 bremhash(bp);
2897 BLISTNONE(bp);
2898 binshash(bp, &invalhash);
2899
2900 bp->b_whichq = BQ_EMPTY;
2901 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2902 } else {
2903 /*
2904 * It has valid data. Put it on the end of the appropriate
2905 * queue, so that it'll stick around for as long as possible.
2906 */
2907 if (ISSET(bp->b_flags, B_LOCKED)) {
2908 whichq = BQ_LOCKED; /* locked in core */
2909 } else if (ISSET(bp->b_flags, B_META)) {
2910 whichq = BQ_META; /* meta-data */
2911 } else if (ISSET(bp->b_flags, B_AGE)) {
2912 whichq = BQ_AGE; /* stale but valid data */
2913 } else {
2914 whichq = BQ_LRU; /* valid data */
2915 }
2916 bufq = &bufqueues[whichq];
2917
2918 bp->b_timestamp = buf_timestamp();
2919
2920 lck_mtx_lock_spin(buf_mtxp);
2921
2922 /*
2923 * the buf_brelse_shadow routine doesn't take 'ownership'
2924 * of the parent buf_t... it updates state that is protected by
2925 * the buf_mtxp, and checks for BL_BUSY to determine whether to
2926 * put the buf_t back on a free list. b_shadow_ref is protected
2927 * by the lock, and since we have not yet cleared B_BUSY, we need
2928 * to check it while holding the lock to insure that one of us
2929 * puts this buf_t back on a free list when it is safe to do so
2930 */
2931 if (bp->b_shadow_ref == 0) {
2932 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2933 bp->b_whichq = whichq;
2934 binstailfree(bp, bufq, whichq);
2935 } else {
2936 /*
2937 * there are still cloned buf_t's pointing
2938 * at this guy... need to keep it off the
2939 * freelists until a buf_brelse is done on
2940 * the last clone
2941 */
2942 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2943 }
2944 }
2945 if (needbuffer) {
2946 /*
2947 * needbuffer is a global
2948 * we're currently using buf_mtxp to protect it
2949 * delay doing the actual wakeup until after
2950 * we drop buf_mtxp
2951 */
2952 needbuffer = 0;
2953 need_wakeup = 1;
2954 }
2955 if (ISSET(bp->b_lflags, BL_WANTED)) {
2956 /*
2957 * delay the actual wakeup until after we
2958 * clear BL_BUSY and we've dropped buf_mtxp
2959 */
2960 need_bp_wakeup = 1;
2961 }
2962 /*
2963 * Unlock the buffer.
2964 */
2965 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2966 buf_busycount--;
2967
2968 lck_mtx_unlock(buf_mtxp);
2969
2970 if (need_wakeup) {
2971 /*
2972 * Wake up any processes waiting for any buffer to become free.
2973 */
2974 wakeup(&needbuffer);
2975 }
2976 if (need_bp_wakeup) {
2977 /*
2978 * Wake up any proceeses waiting for _this_ buffer to become free.
2979 */
2980 wakeup(bp);
2981 }
2982 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
2983 bp, bp->b_datap, bp->b_flags, 0, 0);
2984 }
2985
2986 /*
2987 * Determine if a block is in the cache.
2988 * Just look on what would be its hash chain. If it's there, return
2989 * a pointer to it, unless it's marked invalid. If it's marked invalid,
2990 * we normally don't return the buffer, unless the caller explicitly
2991 * wants us to.
2992 */
2993 static boolean_t
2994 incore(vnode_t vp, daddr64_t blkno)
2995 {
2996 boolean_t retval;
2997 struct bufhashhdr *dp;
2998
2999 dp = BUFHASH(vp, blkno);
3000
3001 lck_mtx_lock_spin(buf_mtxp);
3002
3003 if (incore_locked(vp, blkno, dp)) {
3004 retval = TRUE;
3005 } else {
3006 retval = FALSE;
3007 }
3008 lck_mtx_unlock(buf_mtxp);
3009
3010 return retval;
3011 }
3012
3013
3014 static buf_t
3015 incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
3016 {
3017 struct buf *bp;
3018
3019 /* Search hash chain */
3020 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
3021 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3022 !ISSET(bp->b_flags, B_INVAL)) {
3023 return bp;
3024 }
3025 }
3026 return NULL;
3027 }
3028
3029
3030 void
3031 buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
3032 {
3033 buf_t bp;
3034 struct bufhashhdr *dp;
3035
3036 dp = BUFHASH(vp, blkno);
3037
3038 lck_mtx_lock_spin(buf_mtxp);
3039
3040 for (;;) {
3041 if ((bp = incore_locked(vp, blkno, dp)) == NULL) {
3042 break;
3043 }
3044
3045 if (bp->b_shadow_ref == 0) {
3046 break;
3047 }
3048
3049 SET(bp->b_lflags, BL_WANTED_REF);
3050
3051 (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL);
3052 }
3053 lck_mtx_unlock(buf_mtxp);
3054 }
3055
3056 /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
3057 /*
3058 * Get a block of requested size that is associated with
3059 * a given vnode and block offset. If it is found in the
3060 * block cache, mark it as having been found, make it busy
3061 * and return it. Otherwise, return an empty block of the
3062 * correct size. It is up to the caller to insure that the
3063 * cached blocks be of the correct size.
3064 */
3065 buf_t
3066 buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
3067 {
3068 buf_t bp;
3069 int err;
3070 upl_t upl;
3071 upl_page_info_t *pl;
3072 kern_return_t kret;
3073 int ret_only_valid;
3074 struct timespec ts;
3075 int upl_flags;
3076 struct bufhashhdr *dp;
3077
3078 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
3079 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
3080
3081 ret_only_valid = operation & BLK_ONLYVALID;
3082 operation &= ~BLK_ONLYVALID;
3083 dp = BUFHASH(vp, blkno);
3084 start:
3085 lck_mtx_lock_spin(buf_mtxp);
3086
3087 if ((bp = incore_locked(vp, blkno, dp))) {
3088 /*
3089 * Found in the Buffer Cache
3090 */
3091 if (ISSET(bp->b_lflags, BL_BUSY)) {
3092 /*
3093 * but is busy
3094 */
3095 switch (operation) {
3096 case BLK_READ:
3097 case BLK_WRITE:
3098 case BLK_META:
3099 SET(bp->b_lflags, BL_WANTED);
3100 bufstats.bufs_busyincore++;
3101
3102 /*
3103 * don't retake the mutex after being awakened...
3104 * the time out is in msecs
3105 */
3106 ts.tv_sec = (slptimeo / 1000);
3107 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
3108
3109 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
3110 (uintptr_t)blkno, size, operation, 0, 0);
3111
3112 err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
3113
3114 /*
3115 * Callers who call with PCATCH or timeout are
3116 * willing to deal with the NULL pointer
3117 */
3118 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) {
3119 return NULL;
3120 }
3121 goto start;
3122 /*NOTREACHED*/
3123
3124 default:
3125 /*
3126 * unknown operation requested
3127 */
3128 panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation);
3129 /*NOTREACHED*/
3130 break;
3131 }
3132 } else {
3133 int clear_bdone;
3134
3135 /*
3136 * buffer in core and not busy
3137 */
3138 SET(bp->b_lflags, BL_BUSY);
3139 SET(bp->b_flags, B_CACHE);
3140 buf_busycount++;
3141
3142 bremfree_locked(bp);
3143 bufstats.bufs_incore++;
3144
3145 lck_mtx_unlock(buf_mtxp);
3146 #ifdef JOE_DEBUG
3147 bp->b_owner = current_thread();
3148 bp->b_tag = 1;
3149 #endif
3150 if ((bp->b_upl)) {
3151 panic("buffer has UPL, but not marked BUSY: %p", bp);
3152 }
3153
3154 clear_bdone = FALSE;
3155 if (!ret_only_valid) {
3156 /*
3157 * If the number bytes that are valid is going
3158 * to increase (even if we end up not doing a
3159 * reallocation through allocbuf) we have to read
3160 * the new size first.
3161 *
3162 * This is required in cases where we doing a read
3163 * modify write of a already valid data on disk but
3164 * in cases where the data on disk beyond (blkno + b_bcount)
3165 * is invalid, we may end up doing extra I/O.
3166 */
3167 if (operation == BLK_META && bp->b_bcount < size) {
3168 /*
3169 * Since we are going to read in the whole size first
3170 * we first have to ensure that any pending delayed write
3171 * is flushed to disk first.
3172 */
3173 if (ISSET(bp->b_flags, B_DELWRI)) {
3174 CLR(bp->b_flags, B_CACHE);
3175 buf_bwrite(bp);
3176 goto start;
3177 }
3178 /*
3179 * clear B_DONE before returning from
3180 * this function so that the caller can
3181 * can issue a read for the new size.
3182 */
3183 clear_bdone = TRUE;
3184 }
3185
3186 if (bp->b_bufsize != size) {
3187 allocbuf(bp, size);
3188 }
3189 }
3190
3191 upl_flags = 0;
3192 switch (operation) {
3193 case BLK_WRITE:
3194 /*
3195 * "write" operation: let the UPL subsystem
3196 * know that we intend to modify the buffer
3197 * cache pages we're gathering.
3198 */
3199 upl_flags |= UPL_WILL_MODIFY;
3200 case BLK_READ:
3201 upl_flags |= UPL_PRECIOUS;
3202 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
3203 kret = ubc_create_upl_kernel(vp,
3204 ubc_blktooff(vp, bp->b_lblkno),
3205 bp->b_bufsize,
3206 &upl,
3207 &pl,
3208 upl_flags,
3209 VM_KERN_MEMORY_FILE);
3210 if (kret != KERN_SUCCESS) {
3211 panic("Failed to create UPL");
3212 }
3213
3214 bp->b_upl = upl;
3215
3216 if (upl_valid_page(pl, 0)) {
3217 if (upl_dirty_page(pl, 0)) {
3218 SET(bp->b_flags, B_WASDIRTY);
3219 } else {
3220 CLR(bp->b_flags, B_WASDIRTY);
3221 }
3222 } else {
3223 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
3224 }
3225
3226 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
3227
3228 if (kret != KERN_SUCCESS) {
3229 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3230 }
3231 }
3232 break;
3233
3234 case BLK_META:
3235 /*
3236 * VM is not involved in IO for the meta data
3237 * buffer already has valid data
3238 */
3239 break;
3240
3241 default:
3242 panic("getblk: paging or unknown operation for incore buffer- %d\n", operation);
3243 /*NOTREACHED*/
3244 break;
3245 }
3246
3247 if (clear_bdone) {
3248 CLR(bp->b_flags, B_DONE);
3249 }
3250 }
3251 } else { /* not incore() */
3252 int queue = BQ_EMPTY; /* Start with no preference */
3253
3254 if (ret_only_valid) {
3255 lck_mtx_unlock(buf_mtxp);
3256 return NULL;
3257 }
3258 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) {
3259 operation = BLK_META;
3260 }
3261
3262 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) {
3263 goto start;
3264 }
3265
3266 /*
3267 * getnewbuf may block for a number of different reasons...
3268 * if it does, it's then possible for someone else to
3269 * create a buffer for the same block and insert it into
3270 * the hash... if we see it incore at this point we dump
3271 * the buffer we were working on and start over
3272 */
3273 if (incore_locked(vp, blkno, dp)) {
3274 SET(bp->b_flags, B_INVAL);
3275 binshash(bp, &invalhash);
3276
3277 lck_mtx_unlock(buf_mtxp);
3278
3279 buf_brelse(bp);
3280 goto start;
3281 }
3282 /*
3283 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3284 * CALLED! BE CAREFUL.
3285 */
3286
3287 /*
3288 * mark the buffer as B_META if indicated
3289 * so that when buffer is released it will goto META queue
3290 */
3291 if (operation == BLK_META) {
3292 SET(bp->b_flags, B_META);
3293 }
3294
3295 bp->b_blkno = bp->b_lblkno = blkno;
3296 bp->b_vp = vp;
3297
3298 /*
3299 * Insert in the hash so that incore() can find it
3300 */
3301 binshash(bp, BUFHASH(vp, blkno));
3302
3303 bgetvp_locked(vp, bp);
3304
3305 lck_mtx_unlock(buf_mtxp);
3306
3307 allocbuf(bp, size);
3308
3309 upl_flags = 0;
3310 switch (operation) {
3311 case BLK_META:
3312 /*
3313 * buffer data is invalid...
3314 *
3315 * I don't want to have to retake buf_mtxp,
3316 * so the miss and vmhits counters are done
3317 * with Atomic updates... all other counters
3318 * in bufstats are protected with either
3319 * buf_mtxp or iobuffer_mtxp
3320 */
3321 OSAddAtomicLong(1, &bufstats.bufs_miss);
3322 break;
3323
3324 case BLK_WRITE:
3325 /*
3326 * "write" operation: let the UPL subsystem know
3327 * that we intend to modify the buffer cache pages
3328 * we're gathering.
3329 */
3330 upl_flags |= UPL_WILL_MODIFY;
3331 case BLK_READ:
3332 { off_t f_offset;
3333 size_t contig_bytes;
3334 int bmap_flags;
3335
3336 #if DEVELOPMENT || DEBUG
3337 /*
3338 * Apple implemented file systems use UBC excludively; they should
3339 * not call in here."
3340 */
3341 const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
3342 "exfat", "msdos", "webdav", NULL};
3343
3344 for (int i = 0; excldfs[i] != NULL; i++) {
3345 if (vp->v_mount &&
3346 !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
3347 excldfs[i])) {
3348 panic("%s %s calls buf_getblk",
3349 excldfs[i],
3350 operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
3351 }
3352 }
3353 #endif
3354
3355 if ((bp->b_upl)) {
3356 panic("bp already has UPL: %p", bp);
3357 }
3358
3359 f_offset = ubc_blktooff(vp, blkno);
3360
3361 upl_flags |= UPL_PRECIOUS;
3362 kret = ubc_create_upl_kernel(vp,
3363 f_offset,
3364 bp->b_bufsize,
3365 &upl,
3366 &pl,
3367 upl_flags,
3368 VM_KERN_MEMORY_FILE);
3369
3370 if (kret != KERN_SUCCESS) {
3371 panic("Failed to create UPL");
3372 }
3373 #if UPL_DEBUG
3374 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3375 #endif /* UPL_DEBUG */
3376 bp->b_upl = upl;
3377
3378 if (upl_valid_page(pl, 0)) {
3379 if (operation == BLK_READ) {
3380 bmap_flags = VNODE_READ;
3381 } else {
3382 bmap_flags = VNODE_WRITE;
3383 }
3384
3385 SET(bp->b_flags, B_CACHE | B_DONE);
3386
3387 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
3388
3389 bp->b_validoff = 0;
3390 bp->b_dirtyoff = 0;
3391
3392 if (upl_dirty_page(pl, 0)) {
3393 /* page is dirty */
3394 SET(bp->b_flags, B_WASDIRTY);
3395
3396 bp->b_validend = bp->b_bcount;
3397 bp->b_dirtyend = bp->b_bcount;
3398 } else {
3399 /* page is clean */
3400 bp->b_validend = bp->b_bcount;
3401 bp->b_dirtyend = 0;
3402 }
3403 /*
3404 * try to recreate the physical block number associated with
3405 * this buffer...
3406 */
3407 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
3408 panic("getblk: VNOP_BLOCKMAP failed");
3409 }
3410 /*
3411 * if the extent represented by this buffer
3412 * is not completely physically contiguous on
3413 * disk, than we can't cache the physical mapping
3414 * in the buffer header
3415 */
3416 if ((long)contig_bytes < bp->b_bcount) {
3417 bp->b_blkno = bp->b_lblkno;
3418 }
3419 } else {
3420 OSAddAtomicLong(1, &bufstats.bufs_miss);
3421 }
3422 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3423
3424 if (kret != KERN_SUCCESS) {
3425 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3426 }
3427 break;} // end BLK_READ
3428 default:
3429 panic("getblk: paging or unknown operation - %x", operation);
3430 /*NOTREACHED*/
3431 break;
3432 } // end switch
3433 } //end buf_t !incore
3434
3435 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
3436 bp, bp->b_datap, bp->b_flags, 3, 0);
3437
3438 #ifdef JOE_DEBUG
3439 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3440 #endif
3441 return bp;
3442 }
3443
3444 /*
3445 * Get an empty, disassociated buffer of given size.
3446 */
3447 buf_t
3448 buf_geteblk(int size)
3449 {
3450 buf_t bp = NULL;
3451 int queue = BQ_EMPTY;
3452
3453 do {
3454 lck_mtx_lock_spin(buf_mtxp);
3455
3456 bp = getnewbuf(0, 0, &queue);
3457 } while (bp == NULL);
3458
3459 SET(bp->b_flags, (B_META | B_INVAL));
3460
3461 #if DIAGNOSTIC
3462 assert(queue == BQ_EMPTY);
3463 #endif /* DIAGNOSTIC */
3464 /* XXX need to implement logic to deal with other queues */
3465
3466 binshash(bp, &invalhash);
3467 bufstats.bufs_eblk++;
3468
3469 lck_mtx_unlock(buf_mtxp);
3470
3471 allocbuf(bp, size);
3472
3473 return bp;
3474 }
3475
3476 uint32_t
3477 buf_redundancy_flags(buf_t bp)
3478 {
3479 return bp->b_redundancy_flags;
3480 }
3481
3482 void
3483 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3484 {
3485 SET(bp->b_redundancy_flags, flags);
3486 }
3487
3488 void
3489 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3490 {
3491 CLR(bp->b_redundancy_flags, flags);
3492 }
3493
3494
3495
3496 static void *
3497 recycle_buf_from_pool(int nsize)
3498 {
3499 buf_t bp;
3500 void *ptr = NULL;
3501
3502 lck_mtx_lock_spin(buf_mtxp);
3503
3504 TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
3505 if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) {
3506 continue;
3507 }
3508 ptr = (void *)bp->b_datap;
3509 bp->b_bufsize = 0;
3510
3511 bcleanbuf(bp, TRUE);
3512 break;
3513 }
3514 lck_mtx_unlock(buf_mtxp);
3515
3516 return ptr;
3517 }
3518
3519
3520
3521 int zalloc_nopagewait_failed = 0;
3522 int recycle_buf_failed = 0;
3523
3524 static void *
3525 grab_memory_for_meta_buf(int nsize)
3526 {
3527 zone_t z;
3528 void *ptr;
3529 boolean_t was_vmpriv;
3530
3531 z = getbufzone(nsize);
3532
3533 /*
3534 * make sure we're NOT priviliged so that
3535 * if a vm_page_grab is needed, it won't
3536 * block if we're out of free pages... if
3537 * it blocks, then we can't honor the
3538 * nopagewait request
3539 */
3540 was_vmpriv = set_vm_privilege(FALSE);
3541
3542 ptr = zalloc_nopagewait(z);
3543
3544 if (was_vmpriv == TRUE) {
3545 set_vm_privilege(TRUE);
3546 }
3547
3548 if (ptr == NULL) {
3549 zalloc_nopagewait_failed++;
3550
3551 ptr = recycle_buf_from_pool(nsize);
3552
3553 if (ptr == NULL) {
3554 recycle_buf_failed++;
3555
3556 if (was_vmpriv == FALSE) {
3557 set_vm_privilege(TRUE);
3558 }
3559
3560 ptr = zalloc(z);
3561
3562 if (was_vmpriv == FALSE) {
3563 set_vm_privilege(FALSE);
3564 }
3565 }
3566 }
3567 return ptr;
3568 }
3569
3570 /*
3571 * With UBC, there is no need to expand / shrink the file data
3572 * buffer. The VM uses the same pages, hence no waste.
3573 * All the file data buffers can have one size.
3574 * In fact expand / shrink would be an expensive operation.
3575 *
3576 * Only exception to this is meta-data buffers. Most of the
3577 * meta data operations are smaller than PAGE_SIZE. Having the
3578 * meta-data buffers grow and shrink as needed, optimizes use
3579 * of the kernel wired memory.
3580 */
3581
3582 int
3583 allocbuf(buf_t bp, int size)
3584 {
3585 vm_size_t desired_size;
3586
3587 desired_size = roundup(size, CLBYTES);
3588
3589 if (desired_size < PAGE_SIZE) {
3590 desired_size = PAGE_SIZE;
3591 }
3592 if (desired_size > MAXBSIZE) {
3593 panic("allocbuf: buffer larger than MAXBSIZE requested");
3594 }
3595
3596 if (ISSET(bp->b_flags, B_META)) {
3597 int nsize = roundup(size, MINMETA);
3598
3599 if (bp->b_datap) {
3600 vm_offset_t elem = (vm_offset_t)bp->b_datap;
3601
3602 if (ISSET(bp->b_flags, B_ZALLOC)) {
3603 if (bp->b_bufsize < nsize) {
3604 zone_t zprev;
3605
3606 /* reallocate to a bigger size */
3607
3608 zprev = getbufzone(bp->b_bufsize);
3609 if (nsize <= MAXMETA) {
3610 desired_size = nsize;
3611
3612 /* b_datap not really a ptr */
3613 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3614 } else {
3615 bp->b_datap = (uintptr_t)NULL;
3616 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3617 CLR(bp->b_flags, B_ZALLOC);
3618 }
3619 bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3620 zfree(zprev, elem);
3621 } else {
3622 desired_size = bp->b_bufsize;
3623 }
3624 } else {
3625 if ((vm_size_t)bp->b_bufsize < desired_size) {
3626 /* reallocate to a bigger size */
3627 bp->b_datap = (uintptr_t)NULL;
3628 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3629 bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3630 kmem_free(kernel_map, elem, bp->b_bufsize);
3631 } else {
3632 desired_size = bp->b_bufsize;
3633 }
3634 }
3635 } else {
3636 /* new allocation */
3637 if (nsize <= MAXMETA) {
3638 desired_size = nsize;
3639
3640 /* b_datap not really a ptr */
3641 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3642 SET(bp->b_flags, B_ZALLOC);
3643 } else {
3644 kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE);
3645 }
3646 }
3647
3648 if (bp->b_datap == 0) {
3649 panic("allocbuf: NULL b_datap");
3650 }
3651 }
3652 bp->b_bufsize = desired_size;
3653 bp->b_bcount = size;
3654
3655 return 0;
3656 }
3657
3658 /*
3659 * Get a new buffer from one of the free lists.
3660 *
3661 * Request for a queue is passes in. The queue from which the buffer was taken
3662 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3663 * BQUEUE means no preference. Use heuristics in that case.
3664 * Heuristics is as follows:
3665 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3666 * If none available block till one is made available.
3667 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3668 * Pick the most stale buffer.
3669 * If found buffer was marked delayed write, start the async. write
3670 * and restart the search.
3671 * Initialize the fields and disassociate the buffer from the vnode.
3672 * Remove the buffer from the hash. Return the buffer and the queue
3673 * on which it was found.
3674 *
3675 * buf_mtxp is held upon entry
3676 * returns with buf_mtxp locked if new buf available
3677 * returns with buf_mtxp UNlocked if new buf NOT available
3678 */
3679
3680 static buf_t
3681 getnewbuf(int slpflag, int slptimeo, int * queue)
3682 {
3683 buf_t bp;
3684 buf_t lru_bp;
3685 buf_t age_bp;
3686 buf_t meta_bp;
3687 int age_time, lru_time, bp_time, meta_time;
3688 int req = *queue; /* save it for restarts */
3689 struct timespec ts;
3690
3691 start:
3692 /*
3693 * invalid request gets empty queue
3694 */
3695 if ((*queue >= BQUEUES) || (*queue < 0)
3696 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) {
3697 *queue = BQ_EMPTY;
3698 }
3699
3700
3701 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) {
3702 goto found;
3703 }
3704
3705 /*
3706 * need to grow number of bufs, add another one rather than recycling
3707 */
3708 if (nbuf_headers < max_nbuf_headers) {
3709 /*
3710 * Increment count now as lock
3711 * is dropped for allocation.
3712 * That avoids over commits
3713 */
3714 nbuf_headers++;
3715 goto add_newbufs;
3716 }
3717 /* Try for the requested queue first */
3718 bp = bufqueues[*queue].tqh_first;
3719 if (bp) {
3720 goto found;
3721 }
3722
3723 /* Unable to use requested queue */
3724 age_bp = bufqueues[BQ_AGE].tqh_first;
3725 lru_bp = bufqueues[BQ_LRU].tqh_first;
3726 meta_bp = bufqueues[BQ_META].tqh_first;
3727
3728 if (!age_bp && !lru_bp && !meta_bp) {
3729 /*
3730 * Unavailble on AGE or LRU or META queues
3731 * Try the empty list first
3732 */
3733 bp = bufqueues[BQ_EMPTY].tqh_first;
3734 if (bp) {
3735 *queue = BQ_EMPTY;
3736 goto found;
3737 }
3738 /*
3739 * We have seen is this is hard to trigger.
3740 * This is an overcommit of nbufs but needed
3741 * in some scenarios with diskiamges
3742 */
3743
3744 add_newbufs:
3745 lck_mtx_unlock(buf_mtxp);
3746
3747 /* Create a new temporary buffer header */
3748 bp = (struct buf *)zalloc(buf_hdr_zone);
3749
3750 if (bp) {
3751 bufhdrinit(bp);
3752 bp->b_whichq = BQ_EMPTY;
3753 bp->b_timestamp = buf_timestamp();
3754 BLISTNONE(bp);
3755 SET(bp->b_flags, B_HDRALLOC);
3756 *queue = BQ_EMPTY;
3757 }
3758 lck_mtx_lock_spin(buf_mtxp);
3759
3760 if (bp) {
3761 binshash(bp, &invalhash);
3762 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3763 buf_hdr_count++;
3764 goto found;
3765 }
3766 /* subtract already accounted bufcount */
3767 nbuf_headers--;
3768
3769 bufstats.bufs_sleeps++;
3770
3771 /* wait for a free buffer of any kind */
3772 needbuffer = 1;
3773 /* hz value is 100 */
3774 ts.tv_sec = (slptimeo / 1000);
3775 /* the hz value is 100; which leads to 10ms */
3776 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
3777
3778 msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts);
3779 return NULL;
3780 }
3781
3782 /* Buffer available either on AGE or LRU or META */
3783 bp = NULL;
3784 *queue = -1;
3785
3786 /* Buffer available either on AGE or LRU */
3787 if (!age_bp) {
3788 bp = lru_bp;
3789 *queue = BQ_LRU;
3790 } else if (!lru_bp) {
3791 bp = age_bp;
3792 *queue = BQ_AGE;
3793 } else { /* buffer available on both AGE and LRU */
3794 int t = buf_timestamp();
3795
3796 age_time = t - age_bp->b_timestamp;
3797 lru_time = t - lru_bp->b_timestamp;
3798 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3799 bp = age_bp;
3800 *queue = BQ_AGE;
3801 /*
3802 * we should probably re-timestamp eveything in the
3803 * queues at this point with the current time
3804 */
3805 } else {
3806 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3807 bp = lru_bp;
3808 *queue = BQ_LRU;
3809 } else {
3810 bp = age_bp;
3811 *queue = BQ_AGE;
3812 }
3813 }
3814 }
3815
3816 if (!bp) { /* Neither on AGE nor on LRU */
3817 bp = meta_bp;
3818 *queue = BQ_META;
3819 } else if (meta_bp) {
3820 int t = buf_timestamp();
3821
3822 bp_time = t - bp->b_timestamp;
3823 meta_time = t - meta_bp->b_timestamp;
3824
3825 if (!(bp_time < 0) && !(meta_time < 0)) {
3826 /* time not set backwards */
3827 int bp_is_stale;
3828 bp_is_stale = (*queue == BQ_LRU) ?
3829 lru_is_stale : age_is_stale;
3830
3831 if ((meta_time >= meta_is_stale) &&
3832 (bp_time < bp_is_stale)) {
3833 bp = meta_bp;
3834 *queue = BQ_META;
3835 }
3836 }
3837 }
3838 found:
3839 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) {
3840 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags);
3841 }
3842
3843 /* Clean it */
3844 if (bcleanbuf(bp, FALSE)) {
3845 /*
3846 * moved to the laundry thread, buffer not ready
3847 */
3848 *queue = req;
3849 goto start;
3850 }
3851 return bp;
3852 }
3853
3854
3855 /*
3856 * Clean a buffer.
3857 * Returns 0 if buffer is ready to use,
3858 * Returns 1 if issued a buf_bawrite() to indicate
3859 * that the buffer is not ready.
3860 *
3861 * buf_mtxp is held upon entry
3862 * returns with buf_mtxp locked
3863 */
3864 int
3865 bcleanbuf(buf_t bp, boolean_t discard)
3866 {
3867 /* Remove from the queue */
3868 bremfree_locked(bp);
3869
3870 #ifdef JOE_DEBUG
3871 bp->b_owner = current_thread();
3872 bp->b_tag = 2;
3873 #endif
3874 /*
3875 * If buffer was a delayed write, start the IO by queuing
3876 * it on the LAUNDRY queue, and return 1
3877 */
3878 if (ISSET(bp->b_flags, B_DELWRI)) {
3879 if (discard) {
3880 SET(bp->b_lflags, BL_WANTDEALLOC);
3881 }
3882
3883 bmovelaundry(bp);
3884
3885 lck_mtx_unlock(buf_mtxp);
3886
3887 wakeup(&bufqueues[BQ_LAUNDRY]);
3888 /*
3889 * and give it a chance to run
3890 */
3891 (void)thread_block(THREAD_CONTINUE_NULL);
3892
3893 lck_mtx_lock_spin(buf_mtxp);
3894
3895 return 1;
3896 }
3897 #ifdef JOE_DEBUG
3898 bp->b_owner = current_thread();
3899 bp->b_tag = 8;
3900 #endif
3901 /*
3902 * Buffer is no longer on any free list... we own it
3903 */
3904 SET(bp->b_lflags, BL_BUSY);
3905 buf_busycount++;
3906
3907 bremhash(bp);
3908
3909 /*
3910 * disassociate us from our vnode, if we had one...
3911 */
3912 if (bp->b_vp) {
3913 brelvp_locked(bp);
3914 }
3915
3916 lck_mtx_unlock(buf_mtxp);
3917
3918 BLISTNONE(bp);
3919
3920 if (ISSET(bp->b_flags, B_META)) {
3921 buf_free_meta_store(bp);
3922 }
3923
3924 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3925
3926 buf_release_credentials(bp);
3927
3928 /* If discarding, just move to the empty queue */
3929 if (discard) {
3930 lck_mtx_lock_spin(buf_mtxp);
3931 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3932 bp->b_whichq = BQ_EMPTY;
3933 binshash(bp, &invalhash);
3934 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3935 CLR(bp->b_lflags, BL_BUSY);
3936 buf_busycount--;
3937 } else {
3938 /* Not discarding: clean up and prepare for reuse */
3939 bp->b_bufsize = 0;
3940 bp->b_datap = (uintptr_t)NULL;
3941 bp->b_upl = (void *)NULL;
3942 bp->b_fsprivate = (void *)NULL;
3943 /*
3944 * preserve the state of whether this buffer
3945 * was allocated on the fly or not...
3946 * the only other flag that should be set at
3947 * this point is BL_BUSY...
3948 */
3949 #ifdef JOE_DEBUG
3950 bp->b_owner = current_thread();
3951 bp->b_tag = 3;
3952 #endif
3953 bp->b_lflags = BL_BUSY;
3954 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3955 bp->b_redundancy_flags = 0;
3956 bp->b_dev = NODEV;
3957 bp->b_blkno = bp->b_lblkno = 0;
3958 bp->b_iodone = NULL;
3959 bp->b_error = 0;
3960 bp->b_resid = 0;
3961 bp->b_bcount = 0;
3962 bp->b_dirtyoff = bp->b_dirtyend = 0;
3963 bp->b_validoff = bp->b_validend = 0;
3964 bzero(&bp->b_attr, sizeof(struct bufattr));
3965
3966 lck_mtx_lock_spin(buf_mtxp);
3967 }
3968 return 0;
3969 }
3970
3971
3972
3973 errno_t
3974 buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3975 {
3976 buf_t bp;
3977 errno_t error;
3978 struct bufhashhdr *dp;
3979
3980 dp = BUFHASH(vp, lblkno);
3981
3982 relook:
3983 lck_mtx_lock_spin(buf_mtxp);
3984
3985 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3986 lck_mtx_unlock(buf_mtxp);
3987 return 0;
3988 }
3989 if (ISSET(bp->b_lflags, BL_BUSY)) {
3990 if (!ISSET(flags, BUF_WAIT)) {
3991 lck_mtx_unlock(buf_mtxp);
3992 return EBUSY;
3993 }
3994 SET(bp->b_lflags, BL_WANTED);
3995
3996 error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
3997
3998 if (error) {
3999 return error;
4000 }
4001 goto relook;
4002 }
4003 bremfree_locked(bp);
4004 SET(bp->b_lflags, BL_BUSY);
4005 SET(bp->b_flags, B_INVAL);
4006 buf_busycount++;
4007 #ifdef JOE_DEBUG
4008 bp->b_owner = current_thread();
4009 bp->b_tag = 4;
4010 #endif
4011 lck_mtx_unlock(buf_mtxp);
4012 buf_brelse(bp);
4013
4014 return 0;
4015 }
4016
4017
4018 void
4019 buf_drop(buf_t bp)
4020 {
4021 int need_wakeup = 0;
4022
4023 lck_mtx_lock_spin(buf_mtxp);
4024
4025 if (ISSET(bp->b_lflags, BL_WANTED)) {
4026 /*
4027 * delay the actual wakeup until after we
4028 * clear BL_BUSY and we've dropped buf_mtxp
4029 */
4030 need_wakeup = 1;
4031 }
4032 #ifdef JOE_DEBUG
4033 bp->b_owner = current_thread();
4034 bp->b_tag = 9;
4035 #endif
4036 /*
4037 * Unlock the buffer.
4038 */
4039 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
4040 buf_busycount--;
4041
4042 lck_mtx_unlock(buf_mtxp);
4043
4044 if (need_wakeup) {
4045 /*
4046 * Wake up any proceeses waiting for _this_ buffer to become free.
4047 */
4048 wakeup(bp);
4049 }
4050 }
4051
4052
4053 errno_t
4054 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo)
4055 {
4056 errno_t error;
4057
4058 lck_mtx_lock_spin(buf_mtxp);
4059
4060 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
4061
4062 lck_mtx_unlock(buf_mtxp);
4063
4064 return error;
4065 }
4066
4067
4068 static errno_t
4069 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
4070 {
4071 errno_t error;
4072 struct timespec ts;
4073
4074 if (ISSET(bp->b_flags, B_LOCKED)) {
4075 if ((flags & BAC_SKIP_LOCKED)) {
4076 return EDEADLK;
4077 }
4078 } else {
4079 if ((flags & BAC_SKIP_NONLOCKED)) {
4080 return EDEADLK;
4081 }
4082 }
4083 if (ISSET(bp->b_lflags, BL_BUSY)) {
4084 /*
4085 * since the lck_mtx_lock may block, the buffer
4086 * may become BUSY, so we need to
4087 * recheck for a NOWAIT request
4088 */
4089 if (flags & BAC_NOWAIT) {
4090 return EBUSY;
4091 }
4092 SET(bp->b_lflags, BL_WANTED);
4093
4094 /* the hz value is 100; which leads to 10ms */
4095 ts.tv_sec = (slptimeo / 100);
4096 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
4097 error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
4098
4099 if (error) {
4100 return error;
4101 }
4102 return EAGAIN;
4103 }
4104 if (flags & BAC_REMOVE) {
4105 bremfree_locked(bp);
4106 }
4107 SET(bp->b_lflags, BL_BUSY);
4108 buf_busycount++;
4109
4110 #ifdef JOE_DEBUG
4111 bp->b_owner = current_thread();
4112 bp->b_tag = 5;
4113 #endif
4114 return 0;
4115 }
4116
4117
4118 /*
4119 * Wait for operations on the buffer to complete.
4120 * When they do, extract and return the I/O's error value.
4121 */
4122 errno_t
4123 buf_biowait(buf_t bp)
4124 {
4125 while (!ISSET(bp->b_flags, B_DONE)) {
4126 lck_mtx_lock_spin(buf_mtxp);
4127
4128 if (!ISSET(bp->b_flags, B_DONE)) {
4129 DTRACE_IO1(wait__start, buf_t, bp);
4130 (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_biowait", NULL);
4131 DTRACE_IO1(wait__done, buf_t, bp);
4132 } else {
4133 lck_mtx_unlock(buf_mtxp);
4134 }
4135 }
4136 /* check for interruption of I/O (e.g. via NFS), then errors. */
4137 if (ISSET(bp->b_flags, B_EINTR)) {
4138 CLR(bp->b_flags, B_EINTR);
4139 return EINTR;
4140 } else if (ISSET(bp->b_flags, B_ERROR)) {
4141 return bp->b_error ? bp->b_error : EIO;
4142 } else {
4143 return 0;
4144 }
4145 }
4146
4147
4148 /*
4149 * Mark I/O complete on a buffer.
4150 *
4151 * If a callback has been requested, e.g. the pageout
4152 * daemon, do so. Otherwise, awaken waiting processes.
4153 *
4154 * [ Leffler, et al., says on p.247:
4155 * "This routine wakes up the blocked process, frees the buffer
4156 * for an asynchronous write, or, for a request by the pagedaemon
4157 * process, invokes a procedure specified in the buffer structure" ]
4158 *
4159 * In real life, the pagedaemon (or other system processes) wants
4160 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
4161 * (for swap pager, that puts swap buffers on the free lists (!!!),
4162 * for the vn device, that puts malloc'd buffers on the free lists!)
4163 */
4164
4165 void
4166 buf_biodone(buf_t bp)
4167 {
4168 mount_t mp;
4169 struct bufattr *bap;
4170 struct timeval real_elapsed;
4171 uint64_t real_elapsed_usec = 0;
4172
4173 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
4174 bp, bp->b_datap, bp->b_flags, 0, 0);
4175
4176 if (ISSET(bp->b_flags, B_DONE)) {
4177 panic("biodone already");
4178 }
4179
4180 bap = &bp->b_attr;
4181
4182 if (bp->b_vp && bp->b_vp->v_mount) {
4183 mp = bp->b_vp->v_mount;
4184 } else {
4185 mp = NULL;
4186 }
4187
4188 if (ISSET(bp->b_flags, B_ERROR)) {
4189 if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
4190 dk_error_description_t desc;
4191 bzero(&desc, sizeof(desc));
4192 desc.description = panic_disk_error_description;
4193 desc.description_size = panic_disk_error_description_size;
4194 VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel());
4195 }
4196 }
4197
4198 if (mp && (bp->b_flags & B_READ) == 0) {
4199 update_last_io_time(mp);
4200 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
4201 } else if (mp) {
4202 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
4203 }
4204
4205 throttle_info_end_io(bp);
4206
4207 if (kdebug_enable) {
4208 int code = DKIO_DONE;
4209 int io_tier = GET_BUFATTR_IO_TIER(bap);
4210
4211 if (bp->b_flags & B_READ) {
4212 code |= DKIO_READ;
4213 }
4214 if (bp->b_flags & B_ASYNC) {
4215 code |= DKIO_ASYNC;
4216 }
4217
4218 if (bp->b_flags & B_META) {
4219 code |= DKIO_META;
4220 } else if (bp->b_flags & B_PAGEIO) {
4221 code |= DKIO_PAGING;
4222 }
4223
4224 if (io_tier != 0) {
4225 code |= DKIO_THROTTLE;
4226 }
4227
4228 code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
4229
4230 if (bp->b_flags & B_PASSIVE) {
4231 code |= DKIO_PASSIVE;
4232 }
4233
4234 if (bap->ba_flags & BA_NOCACHE) {
4235 code |= DKIO_NOCACHE;
4236 }
4237
4238 if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
4239 code |= DKIO_TIER_UPGRADE;
4240 }
4241
4242 KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code),
4243 buf_kernel_addrperm_addr(bp),
4244 (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
4245 bp->b_error);
4246 }
4247
4248 microuptime(&real_elapsed);
4249 timevalsub(&real_elapsed, &bp->b_timestamp_tv);
4250 real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec;
4251 disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec);
4252
4253 /*
4254 * I/O was done, so don't believe
4255 * the DIRTY state from VM anymore...
4256 * and we need to reset the THROTTLED/PASSIVE
4257 * indicators
4258 */
4259 CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
4260 CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE));
4261
4262 SET_BUFATTR_IO_TIER(bap, 0);
4263
4264 DTRACE_IO1(done, buf_t, bp);
4265
4266 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) {
4267 /*
4268 * wake up any writer's blocked
4269 * on throttle or waiting for I/O
4270 * to drain
4271 */
4272 vnode_writedone(bp->b_vp);
4273 }
4274
4275 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
4276 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
4277 void *arg = bp->b_transaction;
4278 int callout = ISSET(bp->b_flags, B_CALL);
4279
4280 if (iodone_func == NULL) {
4281 panic("biodone: bp @ %p has NULL b_iodone!\n", bp);
4282 }
4283
4284 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
4285 bp->b_iodone = NULL;
4286 bp->b_transaction = NULL;
4287
4288 if (callout) {
4289 SET(bp->b_flags, B_DONE); /* note that it's done */
4290 }
4291 (*iodone_func)(bp, arg);
4292
4293 if (callout) {
4294 /*
4295 * assumes that the callback function takes
4296 * ownership of the bp and deals with releasing it if necessary
4297 */
4298 goto biodone_done;
4299 }
4300 /*
4301 * in this case the call back function is acting
4302 * strictly as a filter... it does not take
4303 * ownership of the bp and is expecting us
4304 * to finish cleaning up... this is currently used
4305 * by the HFS journaling code
4306 */
4307 }
4308 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
4309 SET(bp->b_flags, B_DONE); /* note that it's done */
4310
4311 buf_brelse(bp);
4312 } else { /* or just wakeup the buffer */
4313 /*
4314 * by taking the mutex, we serialize
4315 * the buf owner calling buf_biowait so that we'll
4316 * only see him in one of 2 states...
4317 * state 1: B_DONE wasn't set and he's
4318 * blocked in msleep
4319 * state 2: he's blocked trying to take the
4320 * mutex before looking at B_DONE
4321 * BL_WANTED is cleared in case anyone else
4322 * is blocked waiting for the buffer... note
4323 * that we haven't cleared B_BUSY yet, so if
4324 * they do get to run, their going to re-set
4325 * BL_WANTED and go back to sleep
4326 */
4327 lck_mtx_lock_spin(buf_mtxp);
4328
4329 CLR(bp->b_lflags, BL_WANTED);
4330 SET(bp->b_flags, B_DONE); /* note that it's done */
4331
4332 lck_mtx_unlock(buf_mtxp);
4333
4334 wakeup(bp);
4335 }
4336 biodone_done:
4337 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
4338 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
4339 }
4340
4341 /*
4342 * Obfuscate buf pointers.
4343 */
4344 vm_offset_t
4345 buf_kernel_addrperm_addr(void * addr)
4346 {
4347 if ((vm_offset_t)addr == 0) {
4348 return 0;
4349 } else {
4350 return (vm_offset_t)addr + buf_kernel_addrperm;
4351 }
4352 }
4353
4354 /*
4355 * Return a count of buffers on the "locked" queue.
4356 */
4357 int
4358 count_lock_queue(void)
4359 {
4360 buf_t bp;
4361 int n = 0;
4362
4363 lck_mtx_lock_spin(buf_mtxp);
4364
4365 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
4366 bp = bp->b_freelist.tqe_next) {
4367 n++;
4368 }
4369 lck_mtx_unlock(buf_mtxp);
4370
4371 return n;
4372 }
4373
4374 /*
4375 * Return a count of 'busy' buffers. Used at the time of shutdown.
4376 * note: This is also called from the mach side in debug context in kdp.c
4377 */
4378 int
4379 count_busy_buffers(void)
4380 {
4381 return buf_busycount + bufstats.bufs_iobufinuse;
4382 }
4383
4384 #if DIAGNOSTIC
4385 /*
4386 * Print out statistics on the current allocation of the buffer pool.
4387 * Can be enabled to print out on every ``sync'' by setting "syncprt"
4388 * in vfs_syscalls.c using sysctl.
4389 */
4390 void
4391 vfs_bufstats()
4392 {
4393 int i, j, count;
4394 struct buf *bp;
4395 struct bqueues *dp;
4396 int counts[MAXBSIZE / CLBYTES + 1];
4397 static char *bname[BQUEUES] =
4398 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4399
4400 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
4401 count = 0;
4402 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4403 counts[j] = 0;
4404 }
4405
4406 lck_mtx_lock(buf_mtxp);
4407
4408 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
4409 counts[bp->b_bufsize / CLBYTES]++;
4410 count++;
4411 }
4412 lck_mtx_unlock(buf_mtxp);
4413
4414 printf("%s: total-%d", bname[i], count);
4415 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4416 if (counts[j] != 0) {
4417 printf(", %d-%d", j * CLBYTES, counts[j]);
4418 }
4419 }
4420 printf("\n");
4421 }
4422 }
4423 #endif /* DIAGNOSTIC */
4424
4425 #define NRESERVEDIOBUFS 128
4426
4427 #define MNT_VIRTUALDEV_MAX_IOBUFS 16
4428 #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100)
4429
4430 buf_t
4431 alloc_io_buf(vnode_t vp, int priv)
4432 {
4433 buf_t bp;
4434 mount_t mp = NULL;
4435 int alloc_for_virtualdev = FALSE;
4436
4437 lck_mtx_lock_spin(iobuffer_mtxp);
4438
4439 /*
4440 * We subject iobuf requests for diskimages to additional restrictions.
4441 *
4442 * a) A single diskimage mount cannot use up more than
4443 * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests
4444 * are not subject to this restriction.
4445 * b) iobuf headers used by all diskimage headers by all mount
4446 * points cannot exceed VIRTUALDEV_MAX_IOBUFS.
4447 */
4448 if (vp && ((mp = vp->v_mount)) && mp != dead_mountp &&
4449 mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
4450 alloc_for_virtualdev = TRUE;
4451 while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) ||
4452 bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) {
4453 bufstats.bufs_iobufsleeps++;
4454
4455 need_iobuffer = 1;
4456 (void)msleep(&need_iobuffer, iobuffer_mtxp,
4457 PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)",
4458 NULL);
4459 }
4460 }
4461
4462 while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) ||
4463 (bp = iobufqueue.tqh_first) == NULL) {
4464 bufstats.bufs_iobufsleeps++;
4465
4466 need_iobuffer = 1;
4467 (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO + 1),
4468 (const char *)"alloc_io_buf (2)", NULL);
4469 }
4470 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4471
4472 bufstats.bufs_iobufinuse++;
4473 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) {
4474 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4475 }
4476
4477 if (alloc_for_virtualdev) {
4478 mp->mnt_iobufinuse++;
4479 bufstats.bufs_iobufinuse_vdev++;
4480 }
4481
4482 lck_mtx_unlock(iobuffer_mtxp);
4483
4484 /*
4485 * initialize various fields
4486 * we don't need to hold the mutex since the buffer
4487 * is now private... the vp should have a reference
4488 * on it and is not protected by this mutex in any event
4489 */
4490 bp->b_timestamp = 0;
4491 bp->b_proc = NULL;
4492
4493 bp->b_datap = 0;
4494 bp->b_flags = 0;
4495 bp->b_lflags = BL_BUSY | BL_IOBUF;
4496 if (alloc_for_virtualdev) {
4497 bp->b_lflags |= BL_IOBUF_VDEV;
4498 }
4499 bp->b_redundancy_flags = 0;
4500 bp->b_blkno = bp->b_lblkno = 0;
4501 #ifdef JOE_DEBUG
4502 bp->b_owner = current_thread();
4503 bp->b_tag = 6;
4504 #endif
4505 bp->b_iodone = NULL;
4506 bp->b_error = 0;
4507 bp->b_resid = 0;
4508 bp->b_bcount = 0;
4509 bp->b_bufsize = 0;
4510 bp->b_upl = NULL;
4511 bp->b_fsprivate = (void *)NULL;
4512 bp->b_vp = vp;
4513 bzero(&bp->b_attr, sizeof(struct bufattr));
4514
4515 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
4516 bp->b_dev = vp->v_rdev;
4517 } else {
4518 bp->b_dev = NODEV;
4519 }
4520
4521 return bp;
4522 }
4523
4524
4525 void
4526 free_io_buf(buf_t bp)
4527 {
4528 int need_wakeup = 0;
4529 int free_for_virtualdev = FALSE;
4530 mount_t mp = NULL;
4531
4532 /* Was this iobuf for a diskimage ? */
4533 if (bp->b_lflags & BL_IOBUF_VDEV) {
4534 free_for_virtualdev = TRUE;
4535 if (bp->b_vp) {
4536 mp = bp->b_vp->v_mount;
4537 }
4538 }
4539
4540 /*
4541 * put buffer back on the head of the iobufqueue
4542 */
4543 bp->b_vp = NULL;
4544 bp->b_flags = B_INVAL;
4545
4546 /* Zero out the bufattr and its flags before relinquishing this iobuf */
4547 bzero(&bp->b_attr, sizeof(struct bufattr));
4548
4549 lck_mtx_lock_spin(iobuffer_mtxp);
4550
4551 binsheadfree(bp, &iobufqueue, -1);
4552
4553 if (need_iobuffer) {
4554 /*
4555 * Wake up any processes waiting because they need an io buffer
4556 *
4557 * do the wakeup after we drop the mutex... it's possible that the
4558 * wakeup will be superfluous if need_iobuffer gets set again and
4559 * another thread runs this path, but it's highly unlikely, doesn't
4560 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4561 * trying to grab a task related lock...
4562 */
4563 need_iobuffer = 0;
4564 need_wakeup = 1;
4565 }
4566 if (bufstats.bufs_iobufinuse <= 0) {
4567 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4568 }
4569
4570 bufstats.bufs_iobufinuse--;
4571
4572 if (free_for_virtualdev) {
4573 bufstats.bufs_iobufinuse_vdev--;
4574 if (mp && mp != dead_mountp) {
4575 mp->mnt_iobufinuse--;
4576 }
4577 }
4578
4579 lck_mtx_unlock(iobuffer_mtxp);
4580
4581 if (need_wakeup) {
4582 wakeup(&need_iobuffer);
4583 }
4584 }
4585
4586
4587 void
4588 buf_list_lock(void)
4589 {
4590 lck_mtx_lock_spin(buf_mtxp);
4591 }
4592
4593 void
4594 buf_list_unlock(void)
4595 {
4596 lck_mtx_unlock(buf_mtxp);
4597 }
4598
4599 /*
4600 * If getnewbuf() calls bcleanbuf() on the same thread
4601 * there is a potential for stack overrun and deadlocks.
4602 * So we always handoff the work to a worker thread for completion
4603 */
4604
4605
4606 static void
4607 bcleanbuf_thread_init(void)
4608 {
4609 thread_t thread = THREAD_NULL;
4610
4611 /* create worker thread */
4612 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4613 thread_deallocate(thread);
4614 }
4615
4616 typedef int (*bcleanbufcontinuation)(int);
4617
4618 __attribute__((noreturn))
4619 static void
4620 bcleanbuf_thread(void)
4621 {
4622 struct buf *bp;
4623 int error = 0;
4624 int loopcnt = 0;
4625
4626 for (;;) {
4627 lck_mtx_lock_spin(buf_mtxp);
4628
4629 while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4630 (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
4631 }
4632
4633 /*
4634 * Remove from the queue
4635 */
4636 bremfree_locked(bp);
4637
4638 /*
4639 * Buffer is no longer on any free list
4640 */
4641 SET(bp->b_lflags, BL_BUSY);
4642 buf_busycount++;
4643
4644 #ifdef JOE_DEBUG
4645 bp->b_owner = current_thread();
4646 bp->b_tag = 10;
4647 #endif
4648
4649 lck_mtx_unlock(buf_mtxp);
4650 /*
4651 * do the IO
4652 */
4653 error = bawrite_internal(bp, 0);
4654
4655 if (error) {
4656 bp->b_whichq = BQ_LAUNDRY;
4657 bp->b_timestamp = buf_timestamp();
4658
4659 lck_mtx_lock_spin(buf_mtxp);
4660
4661 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4662 blaundrycnt++;
4663
4664 /* we never leave a busy page on the laundry queue */
4665 CLR(bp->b_lflags, BL_BUSY);
4666 buf_busycount--;
4667 #ifdef JOE_DEBUG
4668 bp->b_owner = current_thread();
4669 bp->b_tag = 11;
4670 #endif
4671
4672 lck_mtx_unlock(buf_mtxp);
4673
4674 if (loopcnt > MAXLAUNDRY) {
4675 /*
4676 * bawrite_internal() can return errors if we're throttled. If we've
4677 * done several I/Os and failed, give the system some time to unthrottle
4678 * the vnode
4679 */
4680 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
4681 loopcnt = 0;
4682 } else {
4683 /* give other threads a chance to run */
4684 (void)thread_block(THREAD_CONTINUE_NULL);
4685 loopcnt++;
4686 }
4687 }
4688 }
4689 }
4690
4691
4692 static int
4693 brecover_data(buf_t bp)
4694 {
4695 int upl_offset;
4696 upl_t upl;
4697 upl_page_info_t *pl;
4698 kern_return_t kret;
4699 vnode_t vp = bp->b_vp;
4700 int upl_flags;
4701
4702
4703 if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) {
4704 goto dump_buffer;
4705 }
4706
4707 upl_flags = UPL_PRECIOUS;
4708 if (!(buf_flags(bp) & B_READ)) {
4709 /*
4710 * "write" operation: let the UPL subsystem know
4711 * that we intend to modify the buffer cache pages we're
4712 * gathering.
4713 */
4714 upl_flags |= UPL_WILL_MODIFY;
4715 }
4716
4717 kret = ubc_create_upl_kernel(vp,
4718 ubc_blktooff(vp, bp->b_lblkno),
4719 bp->b_bufsize,
4720 &upl,
4721 &pl,
4722 upl_flags,
4723 VM_KERN_MEMORY_FILE);
4724 if (kret != KERN_SUCCESS) {
4725 panic("Failed to create UPL");
4726 }
4727
4728 for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4729 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4730 ubc_upl_abort(upl, 0);
4731 goto dump_buffer;
4732 }
4733 }
4734 bp->b_upl = upl;
4735
4736 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4737
4738 if (kret != KERN_SUCCESS) {
4739 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4740 }
4741 return 1;
4742
4743 dump_buffer:
4744 bp->b_bufsize = 0;
4745 SET(bp->b_flags, B_INVAL);
4746 buf_brelse(bp);
4747
4748 return 0;
4749 }
4750
4751 int
4752 fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context)
4753 {
4754 lck_mtx_lock(buf_gc_callout);
4755 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4756 if (fs_callouts[i].callout == NULL) {
4757 fs_callouts[i].callout = callout;
4758 fs_callouts[i].context = context;
4759 lck_mtx_unlock(buf_gc_callout);
4760 return 0;
4761 }
4762 }
4763
4764 lck_mtx_unlock(buf_gc_callout);
4765 return ENOMEM;
4766 }
4767
4768 int
4769 fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context)
4770 {
4771 lck_mtx_lock(buf_gc_callout);
4772 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4773 if (fs_callouts[i].callout == callout &&
4774 fs_callouts[i].context == context) {
4775 fs_callouts[i].callout = NULL;
4776 fs_callouts[i].context = NULL;
4777 }
4778 }
4779 lck_mtx_unlock(buf_gc_callout);
4780 return 0;
4781 }
4782
4783 static void
4784 fs_buffer_cache_gc_dispatch_callouts(int all)
4785 {
4786 lck_mtx_lock(buf_gc_callout);
4787 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4788 if (fs_callouts[i].callout != NULL) {
4789 fs_callouts[i].callout(all, fs_callouts[i].context);
4790 }
4791 }
4792 lck_mtx_unlock(buf_gc_callout);
4793 }
4794
4795 static boolean_t
4796 buffer_cache_gc(int all)
4797 {
4798 buf_t bp;
4799 boolean_t did_large_zfree = FALSE;
4800 boolean_t need_wakeup = FALSE;
4801 int now = buf_timestamp();
4802 uint32_t found = 0;
4803 struct bqueues privq;
4804 int thresh_hold = BUF_STALE_THRESHHOLD;
4805
4806 if (all) {
4807 thresh_hold = 0;
4808 }
4809 /*
4810 * We only care about metadata (incore storage comes from zalloc()).
4811 * Unless "all" is set (used to evict meta data buffers in preparation
4812 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
4813 * that have not been accessed in the last BUF_STALE_THRESHOLD seconds.
4814 * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock
4815 * "buf_mtxp" and the length of time we spend compute bound in the GC
4816 * thread which calls this function
4817 */
4818 lck_mtx_lock(buf_mtxp);
4819
4820 do {
4821 found = 0;
4822 TAILQ_INIT(&privq);
4823 need_wakeup = FALSE;
4824
4825 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4826 (now > bp->b_timestamp) &&
4827 (now - bp->b_timestamp > thresh_hold) &&
4828 (found < BUF_MAX_GC_BATCH_SIZE)) {
4829 /* Remove from free list */
4830 bremfree_locked(bp);
4831 found++;
4832
4833 #ifdef JOE_DEBUG
4834 bp->b_owner = current_thread();
4835 bp->b_tag = 12;
4836 #endif
4837
4838 /* If dirty, move to laundry queue and remember to do wakeup */
4839 if (ISSET(bp->b_flags, B_DELWRI)) {
4840 SET(bp->b_lflags, BL_WANTDEALLOC);
4841
4842 bmovelaundry(bp);
4843 need_wakeup = TRUE;
4844
4845 continue;
4846 }
4847
4848 /*
4849 * Mark busy and put on private list. We could technically get
4850 * away without setting BL_BUSY here.
4851 */
4852 SET(bp->b_lflags, BL_BUSY);
4853 buf_busycount++;
4854
4855 /*
4856 * Remove from hash and dissociate from vp.
4857 */
4858 bremhash(bp);
4859 if (bp->b_vp) {
4860 brelvp_locked(bp);
4861 }
4862
4863 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4864 }
4865
4866 if (found == 0) {
4867 break;
4868 }
4869
4870 /* Drop lock for batch processing */
4871 lck_mtx_unlock(buf_mtxp);
4872
4873 /* Wakeup and yield for laundry if need be */
4874 if (need_wakeup) {
4875 wakeup(&bufqueues[BQ_LAUNDRY]);
4876 (void)thread_block(THREAD_CONTINUE_NULL);
4877 }
4878
4879 /* Clean up every buffer on private list */
4880 TAILQ_FOREACH(bp, &privq, b_freelist) {
4881 /* Take note if we've definitely freed at least a page to a zone */
4882 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4883 did_large_zfree = TRUE;
4884 }
4885
4886 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4887
4888 /* Free Storage */
4889 buf_free_meta_store(bp);
4890
4891 /* Release credentials */
4892 buf_release_credentials(bp);
4893
4894 /* Prepare for moving to empty queue */
4895 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4896 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4897 bp->b_whichq = BQ_EMPTY;
4898 BLISTNONE(bp);
4899 }
4900 lck_mtx_lock(buf_mtxp);
4901
4902 /* Back under lock, move them all to invalid hash and clear busy */
4903 TAILQ_FOREACH(bp, &privq, b_freelist) {
4904 binshash(bp, &invalhash);
4905 CLR(bp->b_lflags, BL_BUSY);
4906 buf_busycount--;
4907
4908 #ifdef JOE_DEBUG
4909 if (bp->b_owner != current_thread()) {
4910 panic("Buffer stolen from buffer_cache_gc()");
4911 }
4912 bp->b_owner = current_thread();
4913 bp->b_tag = 13;
4914 #endif
4915 }
4916
4917 /* And do a big bulk move to the empty queue */
4918 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4919 } while (all && (found == BUF_MAX_GC_BATCH_SIZE));
4920
4921 lck_mtx_unlock(buf_mtxp);
4922
4923 fs_buffer_cache_gc_dispatch_callouts(all);
4924
4925 return did_large_zfree;
4926 }
4927
4928
4929 /*
4930 * disabled for now
4931 */
4932
4933 #if FLUSH_QUEUES
4934
4935 #define NFLUSH 32
4936
4937 static int
4938 bp_cmp(void *a, void *b)
4939 {
4940 buf_t *bp_a = *(buf_t **)a,
4941 *bp_b = *(buf_t **)b;
4942 daddr64_t res;
4943
4944 // don't have to worry about negative block
4945 // numbers so this is ok to do.
4946 //
4947 res = (bp_a->b_blkno - bp_b->b_blkno);
4948
4949 return (int)res;
4950 }
4951
4952
4953 int
4954 bflushq(int whichq, mount_t mp)
4955 {
4956 buf_t bp, next;
4957 int i, buf_count;
4958 int total_writes = 0;
4959 static buf_t flush_table[NFLUSH];
4960
4961 if (whichq < 0 || whichq >= BQUEUES) {
4962 return 0;
4963 }
4964
4965 restart:
4966 lck_mtx_lock(buf_mtxp);
4967
4968 bp = TAILQ_FIRST(&bufqueues[whichq]);
4969
4970 for (buf_count = 0; bp; bp = next) {
4971 next = bp->b_freelist.tqe_next;
4972
4973 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4974 continue;
4975 }
4976
4977 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4978 bremfree_locked(bp);
4979 #ifdef JOE_DEBUG
4980 bp->b_owner = current_thread();
4981 bp->b_tag = 7;
4982 #endif
4983 SET(bp->b_lflags, BL_BUSY);
4984 buf_busycount++;
4985
4986 flush_table[buf_count] = bp;
4987 buf_count++;
4988 total_writes++;
4989
4990 if (buf_count >= NFLUSH) {
4991 lck_mtx_unlock(buf_mtxp);
4992
4993 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
4994
4995 for (i = 0; i < buf_count; i++) {
4996 buf_bawrite(flush_table[i]);
4997 }
4998 goto restart;
4999 }
5000 }
5001 }
5002 lck_mtx_unlock(buf_mtxp);
5003
5004 if (buf_count > 0) {
5005 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
5006
5007 for (i = 0; i < buf_count; i++) {
5008 buf_bawrite(flush_table[i]);
5009 }
5010 }
5011
5012 return total_writes;
5013 }
5014 #endif