Lines Matching refs:bp

169 static void vm_hold_free_pages(struct buf *bp, int newbsize);
170 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
172 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
173 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
175 static void vfs_clean_pages_dirty_buf(struct buf *bp);
176 static void vfs_setdirty_range(struct buf *bp);
177 static void vfs_vmio_invalidate(struct buf *bp);
178 static void vfs_vmio_truncate(struct buf *bp, int npages);
179 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
195 static void bq_remove(struct bufqueue *bq, struct buf *bp);
196 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
579 bdirtysub(struct buf *bp) in bdirtysub() argument
584 bd = bufdomain(bp); in bdirtysub()
599 bdirtyadd(struct buf *bp) in bdirtyadd() argument
608 bd = bufdomain(bp); in bdirtyadd()
643 bufspace_adjust(struct buf *bp, int bufsize) in bufspace_adjust() argument
649 KASSERT((bp->b_flags & B_MALLOC) == 0, in bufspace_adjust()
650 ("bufspace_adjust: malloc buf %p", bp)); in bufspace_adjust()
651 bd = bufdomain(bp); in bufspace_adjust()
652 diff = bufsize - bp->b_bufsize; in bufspace_adjust()
662 bp->b_bufsize = bufsize; in bufspace_adjust()
884 bufmallocadjust(struct buf *bp, int bufsize) in bufmallocadjust() argument
888 KASSERT((bp->b_flags & B_MALLOC) != 0, in bufmallocadjust()
889 ("bufmallocadjust: non-malloc buf %p", bp)); in bufmallocadjust()
890 diff = bufsize - bp->b_bufsize; in bufmallocadjust()
895 bp->b_bufsize = bufsize; in bufmallocadjust()
922 runningbufwakeup(struct buf *bp) in runningbufwakeup() argument
926 bspace = bp->b_runningbufspace; in runningbufwakeup()
932 bp->b_runningbufspace = 0; in runningbufwakeup()
945 runningbufclaim(struct buf *bp, int space) in runningbufclaim() argument
950 bp->b_runningbufspace = space; in runningbufclaim()
986 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off, in vfs_buf_test_cache() argument
995 if (bp->b_flags & B_CACHE) { in vfs_buf_test_cache()
998 bp->b_flags &= ~B_CACHE; in vfs_buf_test_cache()
1214 struct buf *bp; in bufinit() local
1233 bp = nbufp(i); in bufinit()
1234 bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf)); in bufinit()
1235 bp->b_flags = B_INVAL; in bufinit()
1236 bp->b_rcred = NOCRED; in bufinit()
1237 bp->b_wcred = NOCRED; in bufinit()
1238 bp->b_qindex = QUEUE_NONE; in bufinit()
1239 bp->b_domain = -1; in bufinit()
1240 bp->b_subqueue = mp_maxid + 1; in bufinit()
1241 bp->b_xflags = 0; in bufinit()
1242 bp->b_data = bp->b_kvabase = unmapped_buf; in bufinit()
1243 LIST_INIT(&bp->b_dep); in bufinit()
1244 BUF_LOCKINIT(bp, buf_wmesg); in bufinit()
1245 bq_insert(&bqempty, bp, false); in bufinit()
1362 vfs_buf_check_mapped(struct buf *bp) in vfs_buf_check_mapped() argument
1365 KASSERT(bp->b_kvabase != unmapped_buf, in vfs_buf_check_mapped()
1366 ("mapped buf: b_kvabase was not updated %p", bp)); in vfs_buf_check_mapped()
1367 KASSERT(bp->b_data != unmapped_buf, in vfs_buf_check_mapped()
1368 ("mapped buf: b_data was not updated %p", bp)); in vfs_buf_check_mapped()
1369 KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf + in vfs_buf_check_mapped()
1370 maxphys, ("b_data + b_offset unmapped %p", bp)); in vfs_buf_check_mapped()
1374 vfs_buf_check_unmapped(struct buf *bp) in vfs_buf_check_unmapped() argument
1377 KASSERT(bp->b_data == unmapped_buf, in vfs_buf_check_unmapped()
1378 ("unmapped buf: corrupted b_data %p", bp)); in vfs_buf_check_unmapped()
1381 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp) argument
1382 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp) argument
1384 #define BUF_CHECK_MAPPED(bp) do {} while (0) argument
1385 #define BUF_CHECK_UNMAPPED(bp) do {} while (0) argument
1389 isbufbusy(struct buf *bp) in isbufbusy() argument
1391 if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) || in isbufbusy()
1392 ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI)) in isbufbusy()
1404 struct buf *bp; in bufshutdown() local
1424 bp = nbufp(i); in bufshutdown()
1425 if (isbufbusy(bp)) in bufshutdown()
1468 bp = nbufp(i); in bufshutdown()
1469 if (isbufbusy(bp)) { in bufshutdown()
1472 if (bp->b_dev == NULL) { in bufshutdown()
1474 bp->b_vp->v_mount, mnt_list); in bufshutdown()
1482 nbusy, bp, bp->b_vp, bp->b_flags, in bufshutdown()
1483 (intmax_t)bp->b_blkno, in bufshutdown()
1484 (intmax_t)bp->b_lblkno); in bufshutdown()
1485 BUF_LOCKPRINTINFO(bp); in bufshutdown()
1487 vn_printf(bp->b_vp, in bufshutdown()
1528 bpmap_qenter(struct buf *bp) in bpmap_qenter() argument
1531 BUF_CHECK_MAPPED(bp); in bpmap_qenter()
1537 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); in bpmap_qenter()
1538 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); in bpmap_qenter()
1539 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | in bpmap_qenter()
1540 (vm_offset_t)(bp->b_offset & PAGE_MASK)); in bpmap_qenter()
1544 bufdomain(struct buf *bp) in bufdomain() argument
1547 return (&bdomain[bp->b_domain]); in bufdomain()
1551 bufqueue(struct buf *bp) in bufqueue() argument
1554 switch (bp->b_qindex) { in bufqueue()
1562 return (&bufdomain(bp)->bd_dirtyq); in bufqueue()
1564 return (&bufdomain(bp)->bd_subq[bp->b_subqueue]); in bufqueue()
1568 panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex); in bufqueue()
1575 bufqueue_acquire(struct buf *bp) in bufqueue_acquire() argument
1584 bq = bufqueue(bp); in bufqueue_acquire()
1587 nbq = bufqueue(bp); in bufqueue_acquire()
1604 binsfree(struct buf *bp, int qindex) in binsfree() argument
1611 BUF_ASSERT_XLOCKED(bp); in binsfree()
1616 if (bp->b_flags & B_REMFREE) { in binsfree()
1617 if (bp->b_qindex == qindex) { in binsfree()
1618 bp->b_flags |= B_REUSE; in binsfree()
1619 bp->b_flags &= ~B_REMFREE; in binsfree()
1620 BUF_UNLOCK(bp); in binsfree()
1623 bq = bufqueue_acquire(bp); in binsfree()
1624 bq_remove(bq, bp); in binsfree()
1627 bd = bufdomain(bp); in binsfree()
1635 bq_insert(bq, bp, true); in binsfree()
1644 buf_free(struct buf *bp) in buf_free() argument
1647 if (bp->b_flags & B_REMFREE) in buf_free()
1648 bremfreef(bp); in buf_free()
1649 if (bp->b_vflags & BV_BKGRDINPROG) in buf_free()
1651 if (bp->b_rcred != NOCRED) { in buf_free()
1652 crfree(bp->b_rcred); in buf_free()
1653 bp->b_rcred = NOCRED; in buf_free()
1655 if (bp->b_wcred != NOCRED) { in buf_free()
1656 crfree(bp->b_wcred); in buf_free()
1657 bp->b_wcred = NOCRED; in buf_free()
1659 if (!LIST_EMPTY(&bp->b_dep)) in buf_free()
1660 buf_deallocate(bp); in buf_free()
1661 bufkva_free(bp); in buf_free()
1662 atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1); in buf_free()
1663 MPASS((bp->b_flags & B_MAXPHYS) == 0); in buf_free()
1664 BUF_UNLOCK(bp); in buf_free()
1665 uma_zfree(buf_zone, bp); in buf_free()
1679 struct buf *bp; in buf_import() local
1684 bp = TAILQ_FIRST(&bqempty.bq_queue); in buf_import()
1685 if (bp == NULL) in buf_import()
1687 bq_remove(&bqempty, bp); in buf_import()
1688 store[i] = bp; in buf_import()
1704 struct buf *bp; in buf_release() local
1710 bp = store[i]; in buf_release()
1712 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist); in buf_release()
1713 bp->b_flags &= ~(B_AGE | B_REUSE); in buf_release()
1715 bp->b_qindex = bq->bq_index; in buf_release()
1728 struct buf *bp; in buf_alloc() local
1736 bp = NULL; in buf_alloc()
1739 bp = uma_zalloc(buf_zone, M_NOWAIT); in buf_alloc()
1740 if (bp == NULL) { in buf_alloc()
1752 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL); in buf_alloc()
1753 KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp, in buf_alloc()
1757 KASSERT(bp->b_vp == NULL, in buf_alloc()
1758 ("bp: %p still has vnode %p.", bp, bp->b_vp)); in buf_alloc()
1759 KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0, in buf_alloc()
1760 ("invalid buffer %p flags %#x", bp, bp->b_flags)); in buf_alloc()
1761 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, in buf_alloc()
1762 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags)); in buf_alloc()
1763 KASSERT(bp->b_npages == 0, in buf_alloc()
1764 ("bp: %p still has %d vm pages\n", bp, bp->b_npages)); in buf_alloc()
1765 KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp)); in buf_alloc()
1766 KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp)); in buf_alloc()
1767 MPASS((bp->b_flags & B_MAXPHYS) == 0); in buf_alloc()
1769 bp->b_domain = BD_DOMAIN(bd); in buf_alloc()
1770 bp->b_flags = 0; in buf_alloc()
1771 bp->b_ioflags = 0; in buf_alloc()
1772 bp->b_xflags = 0; in buf_alloc()
1773 bp->b_vflags = 0; in buf_alloc()
1774 bp->b_vp = NULL; in buf_alloc()
1775 bp->b_blkno = bp->b_lblkno = 0; in buf_alloc()
1776 bp->b_offset = NOOFFSET; in buf_alloc()
1777 bp->b_iodone = 0; in buf_alloc()
1778 bp->b_error = 0; in buf_alloc()
1779 bp->b_resid = 0; in buf_alloc()
1780 bp->b_bcount = 0; in buf_alloc()
1781 bp->b_npages = 0; in buf_alloc()
1782 bp->b_dirtyoff = bp->b_dirtyend = 0; in buf_alloc()
1783 bp->b_bufobj = NULL; in buf_alloc()
1784 bp->b_data = bp->b_kvabase = unmapped_buf; in buf_alloc()
1785 bp->b_fsprivate1 = NULL; in buf_alloc()
1786 bp->b_fsprivate2 = NULL; in buf_alloc()
1787 bp->b_fsprivate3 = NULL; in buf_alloc()
1788 LIST_INIT(&bp->b_dep); in buf_alloc()
1790 return (bp); in buf_alloc()
1804 struct buf *bp, *nbp; in buf_recycle() local
1819 while ((bp = nbp) != NULL) { in buf_recycle()
1824 nbp = TAILQ_NEXT(bp, b_freelist); in buf_recycle()
1830 if (kva && bp->b_kvasize == 0) in buf_recycle()
1833 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) in buf_recycle()
1840 if ((bp->b_flags & B_REUSE) != 0) { in buf_recycle()
1841 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist); in buf_recycle()
1842 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist); in buf_recycle()
1843 bp->b_flags &= ~B_REUSE; in buf_recycle()
1844 BUF_UNLOCK(bp); in buf_recycle()
1851 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { in buf_recycle()
1852 BUF_UNLOCK(bp); in buf_recycle()
1856 KASSERT(bp->b_qindex == QUEUE_CLEAN, in buf_recycle()
1858 bp->b_qindex, bp)); in buf_recycle()
1859 KASSERT(bp->b_domain == BD_DOMAIN(bd), in buf_recycle()
1861 bp->b_domain, (int)BD_DOMAIN(bd))); in buf_recycle()
1866 bq_remove(bq, bp); in buf_recycle()
1873 if ((bp->b_vflags & BV_BKGRDERR) != 0) { in buf_recycle()
1874 bqrelse(bp); in buf_recycle()
1879 bp->b_flags |= B_INVAL; in buf_recycle()
1880 brelse(bp); in buf_recycle()
1896 bremfree(struct buf *bp) in bremfree() argument
1899 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bremfree()
1900 KASSERT((bp->b_flags & B_REMFREE) == 0, in bremfree()
1901 ("bremfree: buffer %p already marked for delayed removal.", bp)); in bremfree()
1902 KASSERT(bp->b_qindex != QUEUE_NONE, in bremfree()
1903 ("bremfree: buffer %p not on a queue.", bp)); in bremfree()
1904 BUF_ASSERT_XLOCKED(bp); in bremfree()
1906 bp->b_flags |= B_REMFREE; in bremfree()
1916 bremfreef(struct buf *bp) in bremfreef() argument
1920 bq = bufqueue_acquire(bp); in bremfreef()
1921 bq_remove(bq, bp); in bremfreef()
1960 bq_remove(struct bufqueue *bq, struct buf *bp) in bq_remove() argument
1964 bp, bp->b_vp, bp->b_flags); in bq_remove()
1965 KASSERT(bp->b_qindex != QUEUE_NONE, in bq_remove()
1966 ("bq_remove: buffer %p not on a queue.", bp)); in bq_remove()
1967 KASSERT(bufqueue(bp) == bq, in bq_remove()
1968 ("bq_remove: Remove buffer %p from wrong queue.", bp)); in bq_remove()
1971 if (bp->b_qindex != QUEUE_EMPTY) { in bq_remove()
1972 BUF_ASSERT_XLOCKED(bp); in bq_remove()
1975 ("queue %d underflow", bp->b_qindex)); in bq_remove()
1976 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist); in bq_remove()
1978 bp->b_qindex = QUEUE_NONE; in bq_remove()
1979 bp->b_flags &= ~(B_REMFREE | B_REUSE); in bq_remove()
1985 struct buf *bp; in bd_flush() local
1990 while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) { in bd_flush()
1991 TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist); in bd_flush()
1992 TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp, in bd_flush()
1994 bp->b_subqueue = bd->bd_cleanq->bq_subqueue; in bd_flush()
2031 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock) in bq_insert() argument
2035 if (bp->b_qindex != QUEUE_NONE) in bq_insert()
2036 panic("bq_insert: free buffer %p onto another queue?", bp); in bq_insert()
2038 bd = bufdomain(bp); in bq_insert()
2039 if (bp->b_flags & B_AGE) { in bq_insert()
2044 TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist); in bq_insert()
2047 TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist); in bq_insert()
2049 bp->b_flags &= ~(B_AGE | B_REUSE); in bq_insert()
2051 bp->b_qindex = bq->bq_index; in bq_insert()
2052 bp->b_subqueue = bq->bq_subqueue; in bq_insert()
2059 BUF_UNLOCK(bp); in bq_insert()
2061 if (bp->b_qindex == QUEUE_CLEAN) { in bq_insert()
2079 bufkva_free(struct buf *bp) in bufkva_free() argument
2083 if (bp->b_kvasize == 0) { in bufkva_free()
2084 KASSERT(bp->b_kvabase == unmapped_buf && in bufkva_free()
2085 bp->b_data == unmapped_buf, in bufkva_free()
2086 ("Leaked KVA space on %p", bp)); in bufkva_free()
2087 } else if (buf_mapped(bp)) in bufkva_free()
2088 BUF_CHECK_MAPPED(bp); in bufkva_free()
2090 BUF_CHECK_UNMAPPED(bp); in bufkva_free()
2092 if (bp->b_kvasize == 0) in bufkva_free()
2095 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize); in bufkva_free()
2096 counter_u64_add(bufkvaspace, -bp->b_kvasize); in bufkva_free()
2098 bp->b_data = bp->b_kvabase = unmapped_buf; in bufkva_free()
2099 bp->b_kvasize = 0; in bufkva_free()
2108 bufkva_alloc(struct buf *bp, int maxsize, int gbflags) in bufkva_alloc() argument
2115 MPASS((bp->b_flags & B_MAXPHYS) == 0); in bufkva_alloc()
2119 bufkva_free(bp); in bufkva_alloc()
2130 bp->b_kvabase = (caddr_t)addr; in bufkva_alloc()
2131 bp->b_kvasize = maxsize; in bufkva_alloc()
2132 counter_u64_add(bufkvaspace, bp->b_kvasize); in bufkva_alloc()
2134 bp->b_data = unmapped_buf; in bufkva_alloc()
2135 BUF_CHECK_UNMAPPED(bp); in bufkva_alloc()
2137 bp->b_data = bp->b_kvabase; in bufkva_alloc()
2138 BUF_CHECK_MAPPED(bp); in bufkva_alloc()
2237 struct buf *bp; in breadn_flags() local
2247 error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp); in breadn_flags()
2252 KASSERT(blkno == bp->b_lblkno, in breadn_flags()
2254 (intmax_t)bp->b_lblkno, (intmax_t)blkno)); in breadn_flags()
2256 *bpp = bp; in breadn_flags()
2262 if ((bp->b_flags & B_CACHE) == 0) { in breadn_flags()
2266 racct_add_buf(td->td_proc, bp, 0); in breadn_flags()
2271 bp->b_iocmd = BIO_READ; in breadn_flags()
2272 bp->b_flags &= ~B_INVAL; in breadn_flags()
2274 bp->b_flags |= B_CKHASH; in breadn_flags()
2275 bp->b_ckhashcalc = ckhashfunc; in breadn_flags()
2278 bp->b_xflags |= BX_CVTENXIO; in breadn_flags()
2279 bp->b_ioflags &= ~BIO_ERROR; in breadn_flags()
2280 if (bp->b_rcred == NOCRED && cred != NOCRED) in breadn_flags()
2281 bp->b_rcred = crhold(cred); in breadn_flags()
2282 vfs_busy_pages(bp, 0); in breadn_flags()
2283 bp->b_iooffset = dbtob(bp->b_blkno); in breadn_flags()
2284 bstrategy(bp); in breadn_flags()
2295 rv = bufwait(bp); in breadn_flags()
2297 brelse(bp); in breadn_flags()
2316 bufwrite(struct buf *bp) in bufwrite() argument
2323 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bufwrite()
2324 if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) { in bufwrite()
2325 bp->b_flags |= B_INVAL | B_RELBUF; in bufwrite()
2326 bp->b_flags &= ~B_CACHE; in bufwrite()
2327 brelse(bp); in bufwrite()
2330 if ((bp->b_flags & B_INVAL) != 0) { in bufwrite()
2331 brelse(bp); in bufwrite()
2335 if ((bp->b_flags & B_BARRIER) != 0) in bufwrite()
2338 oldflags = bp->b_flags; in bufwrite()
2340 KASSERT((bp->b_vflags & BV_BKGRDINPROG) == 0, in bufwrite()
2341 ("FFS background buffer should not get here %p", bp)); in bufwrite()
2343 vp = bp->b_vp; in bufwrite()
2352 bufobj_wref(bp->b_bufobj); in bufwrite()
2353 bundirty(bp); in bufwrite()
2355 bp->b_flags &= ~B_DONE; in bufwrite()
2356 bp->b_ioflags &= ~BIO_ERROR; in bufwrite()
2357 bp->b_flags |= B_CACHE; in bufwrite()
2358 bp->b_iocmd = BIO_WRITE; in bufwrite()
2360 vfs_busy_pages(bp, 1); in bufwrite()
2365 space = runningbufclaim(bp, bp->b_bufsize); in bufwrite()
2370 racct_add_buf(curproc, bp, 1); in bufwrite()
2376 BUF_KERNPROC(bp); in bufwrite()
2377 bp->b_iooffset = dbtob(bp->b_blkno); in bufwrite()
2378 buf_track(bp, __func__); in bufwrite()
2379 bstrategy(bp); in bufwrite()
2382 retval = bufwait(bp); in bufwrite()
2383 brelse(bp); in bufwrite()
2400 bufbdflush(struct bufobj *bo, struct buf *bp) in bufbdflush() argument
2407 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); in bufbdflush()
2419 if (bp == nbp) in bufbdflush()
2452 bdwrite(struct buf *bp) in bdwrite() argument
2458 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bdwrite()
2459 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); in bdwrite()
2460 KASSERT((bp->b_flags & B_BARRIER) == 0, in bdwrite()
2461 ("Barrier request in delayed write %p", bp)); in bdwrite()
2463 if (bp->b_flags & B_INVAL) { in bdwrite()
2464 brelse(bp); in bdwrite()
2475 vp = bp->b_vp; in bdwrite()
2476 bo = bp->b_bufobj; in bdwrite()
2479 BO_BDFLUSH(bo, bp); in bdwrite()
2484 bdirty(bp); in bdwrite()
2489 bp->b_flags |= B_CACHE; in bdwrite()
2500 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { in bdwrite()
2501 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); in bdwrite()
2504 buf_track(bp, __func__); in bdwrite()
2517 vfs_clean_pages_dirty_buf(bp); in bdwrite()
2518 bqrelse(bp); in bdwrite()
2545 bdirty(struct buf *bp) in bdirty() argument
2549 bp, bp->b_vp, bp->b_flags); in bdirty()
2550 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); in bdirty()
2551 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, in bdirty()
2552 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); in bdirty()
2553 bp->b_flags &= ~(B_RELBUF); in bdirty()
2554 bp->b_iocmd = BIO_WRITE; in bdirty()
2556 if ((bp->b_flags & B_DELWRI) == 0) { in bdirty()
2557 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; in bdirty()
2558 reassignbuf(bp); in bdirty()
2559 bdirtyadd(bp); in bdirty()
2575 bundirty(struct buf *bp) in bundirty() argument
2578 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bundirty()
2579 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); in bundirty()
2580 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, in bundirty()
2581 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); in bundirty()
2583 if (bp->b_flags & B_DELWRI) { in bundirty()
2584 bp->b_flags &= ~B_DELWRI; in bundirty()
2585 reassignbuf(bp); in bundirty()
2586 bdirtysub(bp); in bundirty()
2591 bp->b_flags &= ~B_DEFERRED; in bundirty()
2604 bawrite(struct buf *bp) in bawrite() argument
2607 bp->b_flags |= B_ASYNC; in bawrite()
2608 (void) bwrite(bp); in bawrite()
2621 babarrierwrite(struct buf *bp) in babarrierwrite() argument
2624 bp->b_flags |= B_ASYNC | B_BARRIER; in babarrierwrite()
2625 (void) bwrite(bp); in babarrierwrite()
2638 bbarrierwrite(struct buf *bp) in bbarrierwrite() argument
2641 bp->b_flags |= B_BARRIER; in bbarrierwrite()
2642 return (bwrite(bp)); in bbarrierwrite()
2686 brelse(struct buf *bp) in brelse() argument
2695 if (bp == NULL) in brelse()
2698 bp, bp->b_vp, bp->b_flags); in brelse()
2699 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), in brelse()
2700 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); in brelse()
2701 KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0, in brelse()
2704 if (BUF_LOCKRECURSED(bp)) { in brelse()
2709 BUF_UNLOCK(bp); in brelse()
2713 if (bp->b_flags & B_MANAGED) { in brelse()
2714 bqrelse(bp); in brelse()
2718 if (LIST_EMPTY(&bp->b_dep)) { in brelse()
2719 bp->b_flags &= ~B_IOSTARTED; in brelse()
2721 KASSERT((bp->b_flags & B_IOSTARTED) == 0, in brelse()
2722 ("brelse: SU io not finished bp %p", bp)); in brelse()
2725 if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) { in brelse()
2726 BO_LOCK(bp->b_bufobj); in brelse()
2727 bp->b_vflags &= ~BV_BKGRDERR; in brelse()
2728 BO_UNLOCK(bp->b_bufobj); in brelse()
2729 bdirty(bp); in brelse()
2732 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && in brelse()
2733 (bp->b_flags & B_INVALONERR)) { in brelse()
2740 bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE; in brelse()
2741 bp->b_flags &= ~(B_ASYNC | B_CACHE); in brelse()
2744 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && in brelse()
2745 (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) && in brelse()
2746 !(bp->b_flags & B_INVAL)) { in brelse()
2768 bp->b_ioflags &= ~BIO_ERROR; in brelse()
2769 bdirty(bp); in brelse()
2770 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || in brelse()
2771 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { in brelse()
2777 bp->b_flags |= B_INVAL; in brelse()
2778 if (!LIST_EMPTY(&bp->b_dep)) in brelse()
2779 buf_deallocate(bp); in brelse()
2780 if (bp->b_flags & B_DELWRI) in brelse()
2781 bdirtysub(bp); in brelse()
2782 bp->b_flags &= ~(B_DELWRI | B_CACHE); in brelse()
2783 if ((bp->b_flags & B_VMIO) == 0) { in brelse()
2784 allocbuf(bp, 0); in brelse()
2785 if (bp->b_vp) in brelse()
2786 brelvp(bp); in brelse()
2799 if (bp->b_flags & B_DELWRI) in brelse()
2800 bp->b_flags &= ~B_RELBUF; in brelse()
2820 v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL; in brelse()
2822 if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE || in brelse()
2823 (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) && in brelse()
2825 vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) { in brelse()
2826 vfs_vmio_invalidate(bp); in brelse()
2827 allocbuf(bp, 0); in brelse()
2830 if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 || in brelse()
2831 (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) { in brelse()
2832 allocbuf(bp, 0); in brelse()
2833 bp->b_flags &= ~B_NOREUSE; in brelse()
2834 if (bp->b_vp != NULL) in brelse()
2835 brelvp(bp); in brelse()
2843 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || in brelse()
2844 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) in brelse()
2845 bp->b_flags |= B_INVAL; in brelse()
2846 if (bp->b_flags & B_INVAL) { in brelse()
2847 if (bp->b_flags & B_DELWRI) in brelse()
2848 bundirty(bp); in brelse()
2849 if (bp->b_vp) in brelse()
2850 brelvp(bp); in brelse()
2853 buf_track(bp, __func__); in brelse()
2856 if (bp->b_bufsize == 0) { in brelse()
2857 buf_free(bp); in brelse()
2861 if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || in brelse()
2862 (bp->b_ioflags & BIO_ERROR)) { in brelse()
2863 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); in brelse()
2864 if (bp->b_vflags & BV_BKGRDINPROG) in brelse()
2867 bp->b_flags |= B_AGE; in brelse()
2869 } else if (bp->b_flags & B_DELWRI) in brelse()
2874 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) in brelse()
2877 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT); in brelse()
2878 bp->b_xflags &= ~(BX_CVTENXIO); in brelse()
2880 binsfree(bp, qindex); in brelse()
2895 bqrelse(struct buf *bp) in bqrelse() argument
2899 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bqrelse()
2900 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), in bqrelse()
2901 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); in bqrelse()
2904 if (BUF_LOCKRECURSED(bp)) { in bqrelse()
2906 BUF_UNLOCK(bp); in bqrelse()
2909 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); in bqrelse()
2910 bp->b_xflags &= ~(BX_CVTENXIO); in bqrelse()
2912 if (LIST_EMPTY(&bp->b_dep)) { in bqrelse()
2913 bp->b_flags &= ~B_IOSTARTED; in bqrelse()
2915 KASSERT((bp->b_flags & B_IOSTARTED) == 0, in bqrelse()
2916 ("bqrelse: SU io not finished bp %p", bp)); in bqrelse()
2919 if (bp->b_flags & B_MANAGED) { in bqrelse()
2920 if (bp->b_flags & B_REMFREE) in bqrelse()
2921 bremfreef(bp); in bqrelse()
2926 if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG | in bqrelse()
2928 BO_LOCK(bp->b_bufobj); in bqrelse()
2929 bp->b_vflags &= ~BV_BKGRDERR; in bqrelse()
2930 BO_UNLOCK(bp->b_bufobj); in bqrelse()
2933 if ((bp->b_flags & B_DELWRI) == 0 && in bqrelse()
2934 (bp->b_xflags & BX_VNDIRTY)) in bqrelse()
2936 if ((bp->b_flags & B_NOREUSE) != 0) { in bqrelse()
2937 brelse(bp); in bqrelse()
2942 buf_track(bp, __func__); in bqrelse()
2944 binsfree(bp, qindex); in bqrelse()
2948 buf_track(bp, __func__); in bqrelse()
2950 BUF_UNLOCK(bp); in bqrelse()
2958 vfs_vmio_iodone(struct buf *bp) in vfs_vmio_iodone() argument
2967 obj = bp->b_bufobj->bo_object; in vfs_vmio_iodone()
2968 KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages, in vfs_vmio_iodone()
2970 blockcount_read(&obj->paging_in_progress), bp->b_npages)); in vfs_vmio_iodone()
2972 vp = bp->b_vp; in vfs_vmio_iodone()
2976 foff = bp->b_offset; in vfs_vmio_iodone()
2977 KASSERT(bp->b_offset != NOOFFSET, in vfs_vmio_iodone()
2978 ("vfs_vmio_iodone: bp %p has no buffer offset", bp)); in vfs_vmio_iodone()
2981 iosize = bp->b_bcount - bp->b_resid; in vfs_vmio_iodone()
2982 for (i = 0; i < bp->b_npages; i++) { in vfs_vmio_iodone()
2990 m = bp->b_pages[i]; in vfs_vmio_iodone()
2996 bp->b_pages[i] = m; in vfs_vmio_iodone()
2997 } else if ((bp->b_iocmd == BIO_READ) && resid > 0) { in vfs_vmio_iodone()
3006 vfs_page_set_valid(bp, foff, m); in vfs_vmio_iodone()
3016 vm_object_pip_wakeupn(obj, bp->b_npages); in vfs_vmio_iodone()
3017 if (bogus && buf_mapped(bp)) { in vfs_vmio_iodone()
3018 BUF_CHECK_MAPPED(bp); in vfs_vmio_iodone()
3019 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), in vfs_vmio_iodone()
3020 bp->b_pages, bp->b_npages); in vfs_vmio_iodone()
3029 vfs_vmio_invalidate(struct buf *bp) in vfs_vmio_invalidate() argument
3035 if (buf_mapped(bp)) { in vfs_vmio_invalidate()
3036 BUF_CHECK_MAPPED(bp); in vfs_vmio_invalidate()
3037 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages); in vfs_vmio_invalidate()
3039 BUF_CHECK_UNMAPPED(bp); in vfs_vmio_invalidate()
3052 flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; in vfs_vmio_invalidate()
3053 obj = bp->b_bufobj->bo_object; in vfs_vmio_invalidate()
3054 resid = bp->b_bufsize; in vfs_vmio_invalidate()
3055 poffset = bp->b_offset & PAGE_MASK; in vfs_vmio_invalidate()
3057 for (i = 0; i < bp->b_npages; i++) { in vfs_vmio_invalidate()
3058 m = bp->b_pages[i]; in vfs_vmio_invalidate()
3061 bp->b_pages[i] = NULL; in vfs_vmio_invalidate()
3075 bp->b_npages = 0; in vfs_vmio_invalidate()
3082 vfs_vmio_truncate(struct buf *bp, int desiredpages) in vfs_vmio_truncate() argument
3088 if (bp->b_npages == desiredpages) in vfs_vmio_truncate()
3091 if (buf_mapped(bp)) { in vfs_vmio_truncate()
3092 BUF_CHECK_MAPPED(bp); in vfs_vmio_truncate()
3093 pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) + in vfs_vmio_truncate()
3094 (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages); in vfs_vmio_truncate()
3096 BUF_CHECK_UNMAPPED(bp); in vfs_vmio_truncate()
3101 flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0; in vfs_vmio_truncate()
3102 if ((bp->b_flags & B_DIRECT) != 0) { in vfs_vmio_truncate()
3104 obj = bp->b_bufobj->bo_object; in vfs_vmio_truncate()
3109 for (i = desiredpages; i < bp->b_npages; i++) { in vfs_vmio_truncate()
3110 m = bp->b_pages[i]; in vfs_vmio_truncate()
3112 bp->b_pages[i] = NULL; in vfs_vmio_truncate()
3120 bp->b_npages = desiredpages; in vfs_vmio_truncate()
3127 vfs_vmio_extend(struct buf *bp, int desiredpages, int size) in vfs_vmio_extend() argument
3143 obj = bp->b_bufobj->bo_object; in vfs_vmio_extend()
3144 if (bp->b_npages < desiredpages) { in vfs_vmio_extend()
3147 bp, desiredpages, maxbcachebuf)); in vfs_vmio_extend()
3160 OFF_TO_IDX(bp->b_offset) + bp->b_npages, in vfs_vmio_extend()
3163 &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages); in vfs_vmio_extend()
3164 bp->b_npages = desiredpages; in vfs_vmio_extend()
3181 toff = bp->b_bcount; in vfs_vmio_extend()
3182 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); in vfs_vmio_extend()
3183 while ((bp->b_flags & B_CACHE) && toff < size) { in vfs_vmio_extend()
3188 pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT; in vfs_vmio_extend()
3189 m = bp->b_pages[pi]; in vfs_vmio_extend()
3190 vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m); in vfs_vmio_extend()
3198 if (buf_mapped(bp)) in vfs_vmio_extend()
3199 bpmap_qenter(bp); in vfs_vmio_extend()
3201 BUF_CHECK_UNMAPPED(bp); in vfs_vmio_extend()
3252 vfs_bio_awrite(struct buf *bp) in vfs_bio_awrite() argument
3257 daddr_t lblkno = bp->b_lblkno; in vfs_bio_awrite()
3258 struct vnode *vp = bp->b_vp; in vfs_bio_awrite()
3266 gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0; in vfs_bio_awrite()
3274 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { in vfs_bio_awrite()
3281 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) in vfs_bio_awrite()
3286 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) in vfs_bio_awrite()
3295 BUF_UNLOCK(bp); in vfs_bio_awrite()
3301 bremfree(bp); in vfs_bio_awrite()
3302 bp->b_flags |= B_ASYNC; in vfs_bio_awrite()
3308 nwritten = bp->b_bufsize; in vfs_bio_awrite()
3309 (void) bwrite(bp); in vfs_bio_awrite()
3320 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize) in getnewbuf_kva() argument
3330 if (maxsize != bp->b_kvasize && in getnewbuf_kva()
3331 bufkva_alloc(bp, maxsize, gbflags)) in getnewbuf_kva()
3356 struct buf *bp; in getnewbuf() local
3359 bp = NULL; in getnewbuf()
3384 if ((bp = buf_alloc(bd)) == NULL) { in getnewbuf()
3388 if (getnewbuf_kva(bp, gbflags, maxsize) == 0) in getnewbuf()
3389 return (bp); in getnewbuf()
3395 if (bp != NULL) { in getnewbuf()
3396 bp->b_flags |= B_INVAL; in getnewbuf()
3397 brelse(bp); in getnewbuf()
3578 struct buf *bp; in flushbufqueues() local
3586 bp = NULL; in flushbufqueues()
3595 bp = TAILQ_NEXT(sentinel, b_freelist); in flushbufqueues()
3596 if (bp != NULL) { in flushbufqueues()
3598 TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel, in flushbufqueues()
3611 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL && in flushbufqueues()
3612 bp->b_vp != lvp)) { in flushbufqueues()
3616 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL); in flushbufqueues()
3625 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || in flushbufqueues()
3626 (bp->b_flags & B_DELWRI) == 0) { in flushbufqueues()
3627 BUF_UNLOCK(bp); in flushbufqueues()
3630 if (bp->b_flags & B_INVAL) { in flushbufqueues()
3631 bremfreef(bp); in flushbufqueues()
3632 brelse(bp); in flushbufqueues()
3637 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { in flushbufqueues()
3639 BUF_UNLOCK(bp); in flushbufqueues()
3655 vp = bp->b_vp; in flushbufqueues()
3657 BUF_UNLOCK(bp); in flushbufqueues()
3671 bp, bp->b_vp, bp->b_flags); in flushbufqueues()
3673 vfs_bio_awrite(bp); in flushbufqueues()
3675 bremfree(bp); in flushbufqueues()
3676 bwrite(bp); in flushbufqueues()
3695 BUF_UNLOCK(bp); in flushbufqueues()
3781 vfs_clean_pages_dirty_buf(struct buf *bp) in vfs_clean_pages_dirty_buf() argument
3787 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0) in vfs_clean_pages_dirty_buf()
3790 foff = bp->b_offset; in vfs_clean_pages_dirty_buf()
3791 KASSERT(bp->b_offset != NOOFFSET, in vfs_clean_pages_dirty_buf()
3794 vfs_busy_pages_acquire(bp); in vfs_clean_pages_dirty_buf()
3795 vfs_setdirty_range(bp); in vfs_clean_pages_dirty_buf()
3796 for (i = 0; i < bp->b_npages; i++) { in vfs_clean_pages_dirty_buf()
3799 if (eoff > bp->b_offset + bp->b_bufsize) in vfs_clean_pages_dirty_buf()
3800 eoff = bp->b_offset + bp->b_bufsize; in vfs_clean_pages_dirty_buf()
3801 m = bp->b_pages[i]; in vfs_clean_pages_dirty_buf()
3802 vfs_page_set_validclean(bp, foff, m); in vfs_clean_pages_dirty_buf()
3806 vfs_busy_pages_release(bp); in vfs_clean_pages_dirty_buf()
3810 vfs_setdirty_range(struct buf *bp) in vfs_setdirty_range() argument
3820 for (i = 0; i < bp->b_npages; i++) in vfs_setdirty_range()
3821 vm_page_test_dirty(bp->b_pages[i]); in vfs_setdirty_range()
3828 for (i = 0; i < bp->b_npages; i++) { in vfs_setdirty_range()
3829 if (bp->b_pages[i]->dirty) in vfs_setdirty_range()
3832 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); in vfs_setdirty_range()
3834 for (i = bp->b_npages - 1; i >= 0; --i) { in vfs_setdirty_range()
3835 if (bp->b_pages[i]->dirty) { in vfs_setdirty_range()
3839 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); in vfs_setdirty_range()
3845 if (eoffset > bp->b_bcount) in vfs_setdirty_range()
3846 eoffset = bp->b_bcount; in vfs_setdirty_range()
3854 if (bp->b_dirtyoff > boffset) in vfs_setdirty_range()
3855 bp->b_dirtyoff = boffset; in vfs_setdirty_range()
3856 if (bp->b_dirtyend < eoffset) in vfs_setdirty_range()
3857 bp->b_dirtyend = eoffset; in vfs_setdirty_range()
3867 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags) in bp_unmapped_get_kva() argument
3872 need_mapping = bp->b_data == unmapped_buf && in bp_unmapped_get_kva()
3874 need_kva = bp->b_kvabase == unmapped_buf && in bp_unmapped_get_kva()
3875 bp->b_data == unmapped_buf && in bp_unmapped_get_kva()
3880 BUF_CHECK_UNMAPPED(bp); in bp_unmapped_get_kva()
3882 if (need_mapping && bp->b_kvabase != unmapped_buf) { in bp_unmapped_get_kva()
3895 bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize; in bp_unmapped_get_kva()
3901 while (bufkva_alloc(bp, maxsize, gbflags) != 0) { in bp_unmapped_get_kva()
3907 panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp); in bp_unmapped_get_kva()
3910 bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0); in bp_unmapped_get_kva()
3915 bp->b_data = bp->b_kvabase; in bp_unmapped_get_kva()
3916 BUF_CHECK_MAPPED(bp); in bp_unmapped_get_kva()
3917 bpmap_qenter(bp); in bp_unmapped_get_kva()
3925 struct buf *bp; in getblk() local
3928 error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp); in getblk()
3931 return (bp); in getblk()
3981 struct buf *bp; in getblkx() local
4004 bp = gbincore_unlocked(bo, blkno); in getblkx()
4005 if (bp == NULL) { in getblkx()
4021 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0, in getblkx()
4042 if (bp->b_bufobj == bo && bp->b_lblkno == blkno) in getblkx()
4046 BUF_UNLOCK_RAW(bp); in getblkx()
4054 bp = gbincore(bo, blkno); in getblkx()
4055 if (bp != NULL) { in getblkx()
4068 error = BUF_TIMELOCK(bp, lockflags, in getblkx()
4083 if (BUF_LOCKRECURSED(bp)) in getblkx()
4092 if (bp->b_flags & B_INVAL) in getblkx()
4093 bp->b_flags &= ~B_CACHE; in getblkx()
4094 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) in getblkx()
4095 bp->b_flags |= B_CACHE; in getblkx()
4096 if (bp->b_flags & B_MANAGED) in getblkx()
4097 MPASS(bp->b_qindex == QUEUE_NONE); in getblkx()
4099 bremfree(bp); in getblkx()
4104 if (bp->b_bcount != size) { in getblkx()
4105 if ((bp->b_flags & B_VMIO) == 0 || in getblkx()
4106 (size > bp->b_kvasize)) { in getblkx()
4107 if (bp->b_flags & B_DELWRI) { in getblkx()
4108 bp->b_flags |= B_NOCACHE; in getblkx()
4109 bwrite(bp); in getblkx()
4111 if (LIST_EMPTY(&bp->b_dep)) { in getblkx()
4112 bp->b_flags |= B_RELBUF; in getblkx()
4113 brelse(bp); in getblkx()
4115 bp->b_flags |= B_NOCACHE; in getblkx()
4116 bwrite(bp); in getblkx()
4128 bp_unmapped_get_kva(bp, blkno, size, flags); in getblkx()
4136 allocbuf(bp, size); in getblkx()
4138 KASSERT(bp->b_offset != NOOFFSET, in getblkx()
4168 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { in getblkx()
4169 bp->b_flags |= B_NOCACHE; in getblkx()
4170 bwrite(bp); in getblkx()
4173 bp->b_flags &= ~B_DONE; in getblkx()
4219 bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags); in getblkx()
4220 if (bp == NULL) { in getblkx()
4254 bp->b_lblkno = blkno; in getblkx()
4255 bp->b_blkno = d_blkno; in getblkx()
4256 bp->b_offset = offset; in getblkx()
4257 error = bgetvp(vp, bp); in getblkx()
4262 bp->b_flags |= B_INVAL; in getblkx()
4263 bufspace_release(bufdomain(bp), maxsize); in getblkx()
4264 brelse(bp); in getblkx()
4276 bp->b_flags |= B_VMIO; in getblkx()
4277 KASSERT(vp->v_object == bp->b_bufobj->bo_object, in getblkx()
4279 bp, vp->v_object, bp->b_bufobj->bo_object)); in getblkx()
4281 bp->b_flags &= ~B_VMIO; in getblkx()
4282 KASSERT(bp->b_bufobj->bo_object == NULL, in getblkx()
4284 bp, bp->b_bufobj->bo_object)); in getblkx()
4285 BUF_CHECK_MAPPED(bp); in getblkx()
4288 allocbuf(bp, size); in getblkx()
4289 bufspace_release(bufdomain(bp), maxsize); in getblkx()
4290 bp->b_flags &= ~B_DONE; in getblkx()
4292 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); in getblkx()
4294 buf_track(bp, __func__); in getblkx()
4295 KASSERT(bp->b_bufobj == bo, in getblkx()
4296 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in getblkx()
4297 *bpp = bp; in getblkx()
4308 struct buf *bp; in geteblk() local
4312 while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) { in geteblk()
4317 allocbuf(bp, size); in geteblk()
4318 bufspace_release(bufdomain(bp), maxsize); in geteblk()
4319 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ in geteblk()
4320 return (bp); in geteblk()
4327 vfs_nonvmio_truncate(struct buf *bp, int newbsize) in vfs_nonvmio_truncate() argument
4330 if (bp->b_flags & B_MALLOC) { in vfs_nonvmio_truncate()
4335 bufmallocadjust(bp, 0); in vfs_nonvmio_truncate()
4336 free(bp->b_data, M_BIOBUF); in vfs_nonvmio_truncate()
4337 bp->b_data = bp->b_kvabase; in vfs_nonvmio_truncate()
4338 bp->b_flags &= ~B_MALLOC; in vfs_nonvmio_truncate()
4342 vm_hold_free_pages(bp, newbsize); in vfs_nonvmio_truncate()
4343 bufspace_adjust(bp, newbsize); in vfs_nonvmio_truncate()
4350 vfs_nonvmio_extend(struct buf *bp, int newbsize) in vfs_nonvmio_extend() argument
4365 if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 && in vfs_nonvmio_extend()
4367 bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK); in vfs_nonvmio_extend()
4368 bp->b_flags |= B_MALLOC; in vfs_nonvmio_extend()
4369 bufmallocadjust(bp, newbsize); in vfs_nonvmio_extend()
4380 if (bp->b_flags & B_MALLOC) { in vfs_nonvmio_extend()
4381 origbuf = bp->b_data; in vfs_nonvmio_extend()
4382 origbufsize = bp->b_bufsize; in vfs_nonvmio_extend()
4383 bp->b_data = bp->b_kvabase; in vfs_nonvmio_extend()
4384 bufmallocadjust(bp, 0); in vfs_nonvmio_extend()
4385 bp->b_flags &= ~B_MALLOC; in vfs_nonvmio_extend()
4388 vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize, in vfs_nonvmio_extend()
4389 (vm_offset_t) bp->b_data + newbsize); in vfs_nonvmio_extend()
4391 bcopy(origbuf, bp->b_data, origbufsize); in vfs_nonvmio_extend()
4394 bufspace_adjust(bp, newbsize); in vfs_nonvmio_extend()
4412 allocbuf(struct buf *bp, int size) in allocbuf() argument
4416 if (bp->b_bcount == size) in allocbuf()
4419 KASSERT(bp->b_kvasize == 0 || bp->b_kvasize >= size, in allocbuf()
4421 bp, bp->b_kvasize, size)); in allocbuf()
4424 if ((bp->b_flags & B_VMIO) == 0) { in allocbuf()
4425 if ((bp->b_flags & B_MALLOC) == 0) in allocbuf()
4431 if (newbsize < bp->b_bufsize) in allocbuf()
4432 vfs_nonvmio_truncate(bp, newbsize); in allocbuf()
4433 else if (newbsize > bp->b_bufsize) in allocbuf()
4434 vfs_nonvmio_extend(bp, newbsize); in allocbuf()
4439 num_pages((bp->b_offset & PAGE_MASK) + newbsize); in allocbuf()
4441 KASSERT((bp->b_flags & B_MALLOC) == 0, in allocbuf()
4442 ("allocbuf: VMIO buffer can't be malloced %p", bp)); in allocbuf()
4448 if (size == 0 || bp->b_bufsize == 0) in allocbuf()
4449 bp->b_flags |= B_CACHE; in allocbuf()
4451 if (newbsize < bp->b_bufsize) in allocbuf()
4452 vfs_vmio_truncate(bp, desiredpages); in allocbuf()
4454 else if (size > bp->b_bcount) in allocbuf()
4455 vfs_vmio_extend(bp, desiredpages, size); in allocbuf()
4456 bufspace_adjust(bp, newbsize); in allocbuf()
4458 bp->b_bcount = size; /* requested buffer size. */ in allocbuf()
4467 biodone(struct bio *bp) in biodone() argument
4473 biotrack(bp, __func__); in biodone()
4482 TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue); in biodone()
4485 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { in biodone()
4486 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING; in biodone()
4487 bp->bio_flags |= BIO_UNMAPPED; in biodone()
4488 start = trunc_page((vm_offset_t)bp->bio_data); in biodone()
4489 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length); in biodone()
4490 bp->bio_data = unmapped_buf; in biodone()
4495 done = bp->bio_done; in biodone()
4501 mtxp = mtx_pool_find(mtxpool_sleep, bp); in biodone()
4503 bp->bio_flags |= BIO_DONE; in biodone()
4504 wakeup(bp); in biodone()
4507 done(bp); in biodone()
4514 biowait(struct bio *bp, const char *wmesg) in biowait() argument
4518 mtxp = mtx_pool_find(mtxpool_sleep, bp); in biowait()
4520 while ((bp->bio_flags & BIO_DONE) == 0) in biowait()
4521 msleep(bp, mtxp, PRIBIO, wmesg, 0); in biowait()
4523 if (bp->bio_error != 0) in biowait()
4524 return (bp->bio_error); in biowait()
4525 if (!(bp->bio_flags & BIO_ERROR)) in biowait()
4531 biofinish(struct bio *bp, struct devstat *stat, int error) in biofinish() argument
4535 bp->bio_error = error; in biofinish()
4536 bp->bio_flags |= BIO_ERROR; in biofinish()
4539 devstat_end_transaction_bio(stat, bp); in biofinish()
4540 biodone(bp); in biofinish()
4545 biotrack_buf(struct bio *bp, const char *location) in biotrack_buf() argument
4548 buf_track(bp->bio_track_bp, location); in biotrack_buf()
4560 bufwait(struct buf *bp) in bufwait() argument
4562 if (bp->b_iocmd == BIO_READ) in bufwait()
4563 bwait(bp, PRIBIO, "biord"); in bufwait()
4565 bwait(bp, PRIBIO, "biowr"); in bufwait()
4566 if (bp->b_flags & B_EINTR) { in bufwait()
4567 bp->b_flags &= ~B_EINTR; in bufwait()
4570 if (bp->b_ioflags & BIO_ERROR) { in bufwait()
4571 return (bp->b_error ? bp->b_error : EIO); in bufwait()
4597 bufdone(struct buf *bp) in bufdone() argument
4602 buf_track(bp, __func__); in bufdone()
4603 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in bufdone()
4606 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); in bufdone()
4608 runningbufwakeup(bp); in bufdone()
4609 if (bp->b_iocmd == BIO_WRITE) in bufdone()
4610 dropobj = bp->b_bufobj; in bufdone()
4612 if (bp->b_iodone != NULL) { in bufdone()
4613 biodone = bp->b_iodone; in bufdone()
4614 bp->b_iodone = NULL; in bufdone()
4615 (*biodone) (bp); in bufdone()
4620 if (bp->b_flags & B_VMIO) { in bufdone()
4626 if (bp->b_iocmd == BIO_READ && in bufdone()
4627 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && in bufdone()
4628 !(bp->b_ioflags & BIO_ERROR)) in bufdone()
4629 bp->b_flags |= B_CACHE; in bufdone()
4630 vfs_vmio_iodone(bp); in bufdone()
4632 if (!LIST_EMPTY(&bp->b_dep)) in bufdone()
4633 buf_complete(bp); in bufdone()
4634 if ((bp->b_flags & B_CKHASH) != 0) { in bufdone()
4635 KASSERT(bp->b_iocmd == BIO_READ, in bufdone()
4636 ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd)); in bufdone()
4637 KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp)); in bufdone()
4638 (*bp->b_ckhashcalc)(bp); in bufdone()
4645 if (bp->b_flags & B_ASYNC) { in bufdone()
4646 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || in bufdone()
4647 (bp->b_ioflags & BIO_ERROR)) in bufdone()
4648 brelse(bp); in bufdone()
4650 bqrelse(bp); in bufdone()
4652 bdone(bp); in bufdone()
4663 vfs_unbusy_pages(struct buf *bp) in vfs_unbusy_pages() argument
4669 runningbufwakeup(bp); in vfs_unbusy_pages()
4670 if (!(bp->b_flags & B_VMIO)) in vfs_unbusy_pages()
4673 obj = bp->b_bufobj->bo_object; in vfs_unbusy_pages()
4674 for (i = 0; i < bp->b_npages; i++) { in vfs_unbusy_pages()
4675 m = bp->b_pages[i]; in vfs_unbusy_pages()
4677 m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i); in vfs_unbusy_pages()
4680 bp->b_pages[i] = m; in vfs_unbusy_pages()
4681 if (buf_mapped(bp)) { in vfs_unbusy_pages()
4682 BUF_CHECK_MAPPED(bp); in vfs_unbusy_pages()
4683 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), in vfs_unbusy_pages()
4684 bp->b_pages, bp->b_npages); in vfs_unbusy_pages()
4686 BUF_CHECK_UNMAPPED(bp); in vfs_unbusy_pages()
4690 vm_object_pip_wakeupn(obj, bp->b_npages); in vfs_unbusy_pages()
4702 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) in vfs_page_set_valid() argument
4713 if (eoff > bp->b_offset + bp->b_bcount) in vfs_page_set_valid()
4714 eoff = bp->b_offset + bp->b_bcount; in vfs_page_set_valid()
4731 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m) in vfs_page_set_validclean() argument
4743 if (eoff > bp->b_offset + bp->b_bcount) in vfs_page_set_validclean()
4744 eoff = bp->b_offset + bp->b_bcount; in vfs_page_set_validclean()
4763 vfs_busy_pages_acquire(struct buf *bp) in vfs_busy_pages_acquire() argument
4767 for (i = 0; i < bp->b_npages; i++) in vfs_busy_pages_acquire()
4768 vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY); in vfs_busy_pages_acquire()
4772 vfs_busy_pages_release(struct buf *bp) in vfs_busy_pages_release() argument
4776 for (i = 0; i < bp->b_npages; i++) in vfs_busy_pages_release()
4777 vm_page_sunbusy(bp->b_pages[i]); in vfs_busy_pages_release()
4793 vfs_busy_pages(struct buf *bp, int clear_modify) in vfs_busy_pages() argument
4801 if (!(bp->b_flags & B_VMIO)) in vfs_busy_pages()
4804 obj = bp->b_bufobj->bo_object; in vfs_busy_pages()
4805 foff = bp->b_offset; in vfs_busy_pages()
4806 KASSERT(bp->b_offset != NOOFFSET, in vfs_busy_pages()
4808 if ((bp->b_flags & B_CLUSTER) == 0) { in vfs_busy_pages()
4809 vm_object_pip_add(obj, bp->b_npages); in vfs_busy_pages()
4810 vfs_busy_pages_acquire(bp); in vfs_busy_pages()
4812 if (bp->b_bufsize != 0) in vfs_busy_pages()
4813 vfs_setdirty_range(bp); in vfs_busy_pages()
4815 for (i = 0; i < bp->b_npages; i++) { in vfs_busy_pages()
4816 m = bp->b_pages[i]; in vfs_busy_pages()
4836 vfs_page_set_validclean(bp, foff, m); in vfs_busy_pages()
4838 (bp->b_flags & B_CACHE) == 0) { in vfs_busy_pages()
4839 bp->b_pages[i] = bogus_page; in vfs_busy_pages()
4844 if (bogus && buf_mapped(bp)) { in vfs_busy_pages()
4845 BUF_CHECK_MAPPED(bp); in vfs_busy_pages()
4846 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), in vfs_busy_pages()
4847 bp->b_pages, bp->b_npages); in vfs_busy_pages()
4860 vfs_bio_set_valid(struct buf *bp, int base, int size) in vfs_bio_set_valid() argument
4865 if (!(bp->b_flags & B_VMIO)) in vfs_bio_set_valid()
4873 base += (bp->b_offset & PAGE_MASK); in vfs_bio_set_valid()
4882 vfs_busy_pages_acquire(bp); in vfs_bio_set_valid()
4883 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { in vfs_bio_set_valid()
4884 m = bp->b_pages[i]; in vfs_bio_set_valid()
4892 vfs_busy_pages_release(bp); in vfs_bio_set_valid()
4908 vfs_bio_clrbuf(struct buf *bp) in vfs_bio_clrbuf() argument
4913 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { in vfs_bio_clrbuf()
4914 clrbuf(bp); in vfs_bio_clrbuf()
4917 bp->b_flags &= ~B_INVAL; in vfs_bio_clrbuf()
4918 bp->b_ioflags &= ~BIO_ERROR; in vfs_bio_clrbuf()
4919 vfs_busy_pages_acquire(bp); in vfs_bio_clrbuf()
4920 sa = bp->b_offset & PAGE_MASK; in vfs_bio_clrbuf()
4922 for (i = 0; i < bp->b_npages; i++, sa = 0) { in vfs_bio_clrbuf()
4923 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize); in vfs_bio_clrbuf()
4927 if (bp->b_pages[i] == bogus_page) in vfs_bio_clrbuf()
4933 if ((bp->b_pages[i]->valid & mask) == mask) in vfs_bio_clrbuf()
4935 if ((bp->b_pages[i]->valid & mask) == 0) in vfs_bio_clrbuf()
4936 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa); in vfs_bio_clrbuf()
4939 if ((bp->b_pages[i]->valid & (1 << j)) == 0) { in vfs_bio_clrbuf()
4940 pmap_zero_page_area(bp->b_pages[i], in vfs_bio_clrbuf()
4945 vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE, in vfs_bio_clrbuf()
4948 vfs_busy_pages_release(bp); in vfs_bio_clrbuf()
4949 bp->b_resid = 0; in vfs_bio_clrbuf()
4953 vfs_bio_bzero_buf(struct buf *bp, int base, int size) in vfs_bio_bzero_buf() argument
4958 if (buf_mapped(bp)) { in vfs_bio_bzero_buf()
4959 BUF_CHECK_MAPPED(bp); in vfs_bio_bzero_buf()
4960 bzero(bp->b_data + base, size); in vfs_bio_bzero_buf()
4962 BUF_CHECK_UNMAPPED(bp); in vfs_bio_bzero_buf()
4964 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { in vfs_bio_bzero_buf()
4965 m = bp->b_pages[i]; in vfs_bio_bzero_buf()
4983 b_io_dismiss(struct buf *bp, int ioflag, bool release) in b_io_dismiss() argument
4987 ("buf %p non-VMIO noreuse", bp)); in b_io_dismiss()
4990 bp->b_flags |= B_DIRECT; in b_io_dismiss()
4992 bp->b_xflags |= BX_ALTDATA; in b_io_dismiss()
4993 if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) { in b_io_dismiss()
4994 bp->b_flags |= B_RELBUF; in b_io_dismiss()
4996 bp->b_flags |= B_NOREUSE; in b_io_dismiss()
4998 brelse(bp); in b_io_dismiss()
5000 bqrelse(bp); in b_io_dismiss()
5004 vfs_bio_brelse(struct buf *bp, int ioflag) in vfs_bio_brelse() argument
5007 b_io_dismiss(bp, ioflag, true); in vfs_bio_brelse()
5011 vfs_bio_set_flags(struct buf *bp, int ioflag) in vfs_bio_set_flags() argument
5014 b_io_dismiss(bp, ioflag, false); in vfs_bio_set_flags()
5023 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) in vm_hold_load_pages() argument
5029 BUF_CHECK_MAPPED(bp); in vm_hold_load_pages()
5033 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; in vm_hold_load_pages()
5034 MPASS((bp->b_flags & B_MAXPHYS) == 0); in vm_hold_load_pages()
5037 bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf)); in vm_hold_load_pages()
5048 bp->b_pages[index] = p; in vm_hold_load_pages()
5050 bp->b_npages = index; in vm_hold_load_pages()
5055 vm_hold_free_pages(struct buf *bp, int newbsize) in vm_hold_free_pages() argument
5061 BUF_CHECK_MAPPED(bp); in vm_hold_free_pages()
5063 from = round_page((vm_offset_t)bp->b_data + newbsize); in vm_hold_free_pages()
5064 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; in vm_hold_free_pages()
5065 if (bp->b_npages > newnpages) in vm_hold_free_pages()
5066 pmap_qremove(from, bp->b_npages - newnpages); in vm_hold_free_pages()
5067 for (index = newnpages; index < bp->b_npages; index++) { in vm_hold_free_pages()
5068 p = bp->b_pages[index]; in vm_hold_free_pages()
5069 bp->b_pages[index] = NULL; in vm_hold_free_pages()
5073 bp->b_npages = newnpages; in vm_hold_free_pages()
5091 vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf) in vmapbuf() argument
5096 MPASS((bp->b_flags & B_MAXPHYS) != 0); in vmapbuf()
5098 if (bp->b_iocmd == BIO_READ) in vmapbuf()
5101 (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES); in vmapbuf()
5104 bp->b_bufsize = len; in vmapbuf()
5105 bp->b_npages = pidx; in vmapbuf()
5106 bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK; in vmapbuf()
5108 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx); in vmapbuf()
5109 bp->b_data = bp->b_kvabase + bp->b_offset; in vmapbuf()
5111 bp->b_data = unmapped_buf; in vmapbuf()
5122 vunmapbuf(struct buf *bp) in vunmapbuf() argument
5126 npages = bp->b_npages; in vunmapbuf()
5127 if (buf_mapped(bp)) in vunmapbuf()
5128 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); in vunmapbuf()
5129 vm_page_unhold_pages(bp->b_pages, npages); in vunmapbuf()
5131 bp->b_data = unmapped_buf; in vunmapbuf()
5135 bdone(struct buf *bp) in bdone() argument
5139 mtxp = mtx_pool_find(mtxpool_sleep, bp); in bdone()
5141 bp->b_flags |= B_DONE; in bdone()
5142 wakeup(bp); in bdone()
5147 bwait(struct buf *bp, u_char pri, const char *wchan) in bwait() argument
5151 mtxp = mtx_pool_find(mtxpool_sleep, bp); in bwait()
5153 while ((bp->b_flags & B_DONE) == 0) in bwait()
5154 msleep(bp, mtxp, pri, wchan, 0); in bwait()
5166 bufstrategy(struct bufobj *bo, struct buf *bp) in bufstrategy() argument
5171 vp = bp->b_vp; in bufstrategy()
5174 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); in bufstrategy()
5175 i = VOP_STRATEGY(vp, bp); in bufstrategy()
5176 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); in bufstrategy()
5252 bdata2bio(struct buf *bp, struct bio *bip) in bdata2bio() argument
5255 if (!buf_mapped(bp)) { in bdata2bio()
5257 bip->bio_ma = bp->b_pages; in bdata2bio()
5258 bip->bio_ma_n = bp->b_npages; in bdata2bio()
5260 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; in bdata2bio()
5263 PAGE_SIZE == bp->b_npages, in bdata2bio()
5264 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset, in bdata2bio()
5267 bip->bio_data = bp->b_data; in bdata2bio()
5316 struct buf *bp; in vfs_bio_getpages() local
5400 curthread->td_ucred, br_flags, &bp); in vfs_bio_getpages()
5403 if (bp->b_rcred == curthread->td_ucred) { in vfs_bio_getpages()
5404 crfree(bp->b_rcred); in vfs_bio_getpages()
5405 bp->b_rcred = NOCRED; in vfs_bio_getpages()
5407 if (LIST_EMPTY(&bp->b_dep)) { in vfs_bio_getpages()
5422 bp->b_flags |= B_RELBUF; in vfs_bio_getpages()
5424 bp->b_flags &= ~B_NOCACHE; in vfs_bio_getpages()
5425 brelse(bp); in vfs_bio_getpages()
5427 bqrelse(bp); in vfs_bio_getpages()
5481 struct buf *bp = (struct buf *)addr; in DB_SHOW_COMMAND() local
5491 db_printf("buf at %p\n", bp); in DB_SHOW_COMMAND()
5493 (u_int)bp->b_flags, PRINT_BUF_FLAGS, in DB_SHOW_COMMAND()
5494 (u_int)bp->b_xflags, PRINT_BUF_XFLAGS); in DB_SHOW_COMMAND()
5496 (u_int)bp->b_vflags, PRINT_BUF_VFLAGS, in DB_SHOW_COMMAND()
5497 (u_int)bp->b_ioflags, PRINT_BIO_FLAGS); in DB_SHOW_COMMAND()
5502 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, in DB_SHOW_COMMAND()
5503 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, in DB_SHOW_COMMAND()
5504 (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first); in DB_SHOW_COMMAND()
5506 bp->b_kvabase, bp->b_kvasize); in DB_SHOW_COMMAND()
5507 if (bp->b_npages) { in DB_SHOW_COMMAND()
5509 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); in DB_SHOW_COMMAND()
5510 for (i = 0; i < bp->b_npages; i++) { in DB_SHOW_COMMAND()
5512 m = bp->b_pages[i]; in DB_SHOW_COMMAND()
5519 if ((i + 1) < bp->b_npages) in DB_SHOW_COMMAND()
5524 BUF_LOCKPRINTINFO(bp); in DB_SHOW_COMMAND()
5526 db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt); in DB_SHOW_COMMAND()
5528 i = bp->b_io_tcnt % BUF_TRACKING_SIZE; in DB_SHOW_COMMAND()
5530 if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL) in DB_SHOW_COMMAND()
5533 bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]); in DB_SHOW_COMMAND()
5536 db_printf("b_io_tracking: %s\n", bp->b_io_tracking); in DB_SHOW_COMMAND()
5543 struct buf *bp; in DB_SHOW_COMMAND_FLAGS() local
5568 TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist) in DB_SHOW_COMMAND_FLAGS()
5569 total += bp->b_bufsize; in DB_SHOW_COMMAND_FLAGS()
5573 TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist) in DB_SHOW_COMMAND_FLAGS()
5574 total += bp->b_bufsize; in DB_SHOW_COMMAND_FLAGS()
5586 bp = nbufp(j); in DB_SHOW_COMMAND_FLAGS()
5587 if (bp->b_domain == i && BUF_ISLOCKED(bp)) { in DB_SHOW_COMMAND_FLAGS()
5589 total += bp->b_bufsize; in DB_SHOW_COMMAND_FLAGS()
5596 bp = nbufp(j); in DB_SHOW_COMMAND_FLAGS()
5597 if (bp->b_domain == i) { in DB_SHOW_COMMAND_FLAGS()
5599 total += bp->b_bufsize; in DB_SHOW_COMMAND_FLAGS()
5608 struct buf *bp; in DB_SHOW_COMMAND_FLAGS() local
5612 bp = nbufp(i); in DB_SHOW_COMMAND_FLAGS()
5613 if (BUF_ISLOCKED(bp)) { in DB_SHOW_COMMAND_FLAGS()
5614 db_show_buffer((uintptr_t)bp, 1, 0, NULL); in DB_SHOW_COMMAND_FLAGS()
5625 struct buf *bp; in DB_SHOW_COMMAND() local
5633 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { in DB_SHOW_COMMAND()
5634 db_show_buffer((uintptr_t)bp, 1, 0, NULL); in DB_SHOW_COMMAND()
5638 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { in DB_SHOW_COMMAND()
5639 db_show_buffer((uintptr_t)bp, 1, 0, NULL); in DB_SHOW_COMMAND()
5646 struct buf *bp; in DB_COMMAND_FLAGS() local
5655 bp = nbufp(i); in DB_COMMAND_FLAGS()
5656 if (bp->b_qindex == QUEUE_EMPTY) in DB_COMMAND_FLAGS()