Lines Matching refs:bp
93 struct buf *bp, *rbp, *reqbp; in cluster_read() local
121 error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp); in cluster_read()
128 *bpp = reqbp = bp; in cluster_read()
135 if (bp->b_flags & B_CACHE) { in cluster_read()
138 } else if ((bp->b_flags & B_RAM) == 0) { in cluster_read()
141 bp->b_flags &= ~B_RAM; in cluster_read()
171 reqbp = bp = NULL; in cluster_read()
177 off_t firstread = bp->b_offset; in cluster_read()
181 KASSERT(bp->b_offset != NOOFFSET, in cluster_read()
225 bp = cluster_rbuild(vp, filesize, lblkno, in cluster_read()
226 blkno, size, nblks, gbflags, bp); in cluster_read()
227 lblkno += (bp->b_bufsize / size); in cluster_read()
229 bp->b_flags |= B_RAM; in cluster_read()
230 bp->b_iocmd = BIO_READ; in cluster_read()
238 if (bp) { in cluster_read()
239 if ((bp->b_flags & B_CLUSTER) == 0) { in cluster_read()
240 vfs_busy_pages(bp, 0); in cluster_read()
242 bp->b_flags &= ~B_INVAL; in cluster_read()
243 bp->b_ioflags &= ~BIO_ERROR; in cluster_read()
244 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) in cluster_read()
245 BUF_KERNPROC(bp); in cluster_read()
246 bp->b_iooffset = dbtob(bp->b_blkno); in cluster_read()
247 bstrategy(bp); in cluster_read()
251 racct_add_buf(td->td_proc, bp, 0); in cluster_read()
341 struct buf *bp, *tbp; in cluster_rbuild() local
373 bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT); in cluster_rbuild()
374 if (bp == NULL) in cluster_rbuild()
376 MPASS((bp->b_flags & B_MAXPHYS) != 0); in cluster_rbuild()
384 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; in cluster_rbuild()
386 bp->b_data = unmapped_buf; in cluster_rbuild()
388 bp->b_data = (char *)((vm_offset_t)bp->b_data | in cluster_rbuild()
391 bp->b_iocmd = BIO_READ; in cluster_rbuild()
392 bp->b_iodone = cluster_callback; in cluster_rbuild()
393 bp->b_blkno = blkno; in cluster_rbuild()
394 bp->b_lblkno = lbn; in cluster_rbuild()
395 bp->b_offset = tbp->b_offset; in cluster_rbuild()
396 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); in cluster_rbuild()
397 pbgetvp(vp, bp); in cluster_rbuild()
399 TAILQ_INIT(&bp->b_cluster.cluster_head); in cluster_rbuild()
401 bp->b_bcount = 0; in cluster_rbuild()
402 bp->b_bufsize = 0; in cluster_rbuild()
403 bp->b_npages = 0; in cluster_rbuild()
412 if ((bp->b_npages * PAGE_SIZE) + in cluster_rbuild()
499 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, in cluster_rbuild()
505 if ((bp->b_npages == 0) || in cluster_rbuild()
506 (bp->b_pages[bp->b_npages-1] != m)) { in cluster_rbuild()
507 bp->b_pages[bp->b_npages] = m; in cluster_rbuild()
508 bp->b_npages++; in cluster_rbuild()
523 bp->b_bcount += size; in cluster_rbuild()
524 bp->b_bufsize += size; in cluster_rbuild()
531 for (j = 0; j < bp->b_npages; j++) { in cluster_rbuild()
532 if (vm_page_all_valid(bp->b_pages[j])) in cluster_rbuild()
533 bp->b_pages[j] = bogus_page; in cluster_rbuild()
535 if (bp->b_bufsize > bp->b_kvasize) in cluster_rbuild()
537 bp->b_bufsize, bp->b_kvasize); in cluster_rbuild()
539 if (buf_mapped(bp)) { in cluster_rbuild()
540 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), in cluster_rbuild()
541 (vm_page_t *)bp->b_pages, bp->b_npages); in cluster_rbuild()
543 return (bp); in cluster_rbuild()
553 cluster_callback(struct buf *bp) in cluster_callback() argument
561 if (bp->b_ioflags & BIO_ERROR) in cluster_callback()
562 error = bp->b_error; in cluster_callback()
564 if (buf_mapped(bp)) { in cluster_callback()
565 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), in cluster_callback()
566 bp->b_npages); in cluster_callback()
572 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); in cluster_callback()
594 pbrelvp(bp); in cluster_callback()
595 uma_zfree(cluster_pbuf_zone, bp); in cluster_callback()
641 cluster_write(struct vnode *vp, struct vn_clusterw *vnc, struct buf *bp, in cluster_write() argument
657 lblocksize = bp->b_bufsize; in cluster_write()
659 lbn = bp->b_lblkno; in cluster_write()
660 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); in cluster_write()
667 (bp->b_blkno != vnc->v_lasta + btodb(lblocksize))) { in cluster_write()
687 if ((u_quad_t)bp->b_offset + lblocksize != filesize || in cluster_write()
697 buflist = cluster_collectbufs(vp, vnc, bp, in cluster_write()
704 bawrite(bp); in cluster_write()
736 vnc->v_lasta = bp->b_blkno; in cluster_write()
747 (u_quad_t) bp->b_offset + lblocksize != filesize && in cluster_write()
748 bp->b_blkno == bp->b_lblkno && in cluster_write()
749 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, in cluster_write()
750 NULL) != 0 || bp->b_blkno == -1)) { in cluster_write()
751 pbn = bp->b_blkno; in cluster_write()
752 bawrite(bp); in cluster_write()
760 pbn = bp->b_blkno; in cluster_write()
763 bawrite(bp); in cluster_write()
766 bdwrite(bp); in cluster_write()
774 pbn = bp->b_blkno; in cluster_write()
775 bdwrite(bp); in cluster_write()
786 pbn = bp->b_blkno; in cluster_write()
787 bawrite(bp); in cluster_write()
792 pbn = bp->b_blkno; in cluster_write()
793 bdwrite(bp); in cluster_write()
809 struct buf *bp, *tbp; in cluster_wbuild() local
861 ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) { in cluster_wbuild()
868 MPASS((bp->b_flags & B_MAXPHYS) != 0); in cluster_wbuild()
874 TAILQ_INIT(&bp->b_cluster.cluster_head); in cluster_wbuild()
875 bp->b_bcount = 0; in cluster_wbuild()
876 bp->b_bufsize = 0; in cluster_wbuild()
877 bp->b_npages = 0; in cluster_wbuild()
879 bp->b_wcred = crhold(tbp->b_wcred); in cluster_wbuild()
881 bp->b_blkno = tbp->b_blkno; in cluster_wbuild()
882 bp->b_lblkno = tbp->b_lblkno; in cluster_wbuild()
883 bp->b_offset = tbp->b_offset; in cluster_wbuild()
893 bp->b_data = (char *)((vm_offset_t)bp->b_data | in cluster_wbuild()
896 bp->b_data = unmapped_buf; in cluster_wbuild()
898 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO | in cluster_wbuild()
900 bp->b_iodone = cluster_callback; in cluster_wbuild()
901 pbgetvp(vp, bp); in cluster_wbuild()
935 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || in cluster_wbuild()
936 tbp->b_wcred != bp->b_wcred) { in cluster_wbuild()
947 ((bp->b_blkno + (dbsize * i)) != in cluster_wbuild()
949 ((tbp->b_npages + bp->b_npages) > in cluster_wbuild()
992 if ((bp->b_npages == 0) || in cluster_wbuild()
993 (bp->b_pages[bp->b_npages - 1] != m)) { in cluster_wbuild()
994 bp->b_pages[bp->b_npages] = m; in cluster_wbuild()
995 bp->b_npages++; in cluster_wbuild()
999 bp->b_bcount += size; in cluster_wbuild()
1000 bp->b_bufsize += size; in cluster_wbuild()
1006 bp->b_flags |= (tbp->b_flags & B_BARRIER); in cluster_wbuild()
1016 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, in cluster_wbuild()
1020 if (buf_mapped(bp)) { in cluster_wbuild()
1021 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), in cluster_wbuild()
1022 (vm_page_t *)bp->b_pages, bp->b_npages); in cluster_wbuild()
1024 if (bp->b_bufsize > bp->b_kvasize) in cluster_wbuild()
1027 bp->b_bufsize, bp->b_kvasize); in cluster_wbuild()
1028 totalwritten += bp->b_bufsize; in cluster_wbuild()
1029 bp->b_dirtyoff = 0; in cluster_wbuild()
1030 bp->b_dirtyend = bp->b_bufsize; in cluster_wbuild()
1031 bawrite(bp); in cluster_wbuild()
1047 struct buf *bp; in cluster_collectbufs() local
1058 gbflags, &bp); in cluster_collectbufs()
1069 buflist->bs_children[i] = bp; in cluster_collectbufs()
1070 if (bp->b_blkno == bp->b_lblkno) in cluster_collectbufs()
1071 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, in cluster_collectbufs()
1074 buflist->bs_children[i] = bp = last_bp; in cluster_collectbufs()
1075 if (bp->b_blkno == bp->b_lblkno) in cluster_collectbufs()
1076 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); in cluster_collectbufs()