1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.85 1999/06/29 05:59:43 peter Exp $ 37 */ 38 39 #include "opt_debug_cluster.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/proc.h> 45 #include <sys/buf.h> 46 #include <sys/vnode.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/resourcevar.h> 50 #include <vm/vm.h> 51 #include <vm/vm_prot.h> 52 #include <vm/vm_object.h> 53 #include <vm/vm_page.h> 54 #include <sys/sysctl.h> 55 56 #if defined(CLUSTERDEBUG) 57 #include <sys/sysctl.h> 58 static int rcluster= 0; 59 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 60 #endif 61 62 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 63 64 static struct cluster_save * 65 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 66 static struct buf * 67 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 68 daddr_t blkno, long size, int run, struct buf *fbp)); 69 70 static int write_behind = 1; 71 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, ""); 72 73 extern vm_page_t bogus_page; 74 75 extern int cluster_pbuf_freecnt; 76 77 /* 78 * Maximum number of blocks for read-ahead. 79 */ 80 #define MAXRA 32 81 82 /* 83 * This replaces bread. 84 */ 85 int 86 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 87 struct vnode *vp; 88 u_quad_t filesize; 89 daddr_t lblkno; 90 long size; 91 struct ucred *cred; 92 long totread; 93 int seqcount; 94 struct buf **bpp; 95 { 96 struct buf *bp, *rbp, *reqbp; 97 daddr_t blkno, origblkno; 98 int error, num_ra; 99 int i; 100 int maxra, racluster; 101 long origtotread; 102 103 error = 0; 104 if (vp->v_maxio == 0) 105 vp->v_maxio = DFLTPHYS; 106 107 /* 108 * Try to limit the amount of read-ahead by a few 109 * ad-hoc parameters. This needs work!!! 110 */ 111 racluster = vp->v_maxio/size; 112 maxra = 2 * racluster + (totread / size); 113 if (maxra > MAXRA) 114 maxra = MAXRA; 115 if (maxra > nbuf/8) 116 maxra = nbuf/8; 117 118 /* 119 * get the requested block 120 */ 121 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 122 origblkno = lblkno; 123 origtotread = totread; 124 125 /* 126 * if it is in the cache, then check to see if the reads have been 127 * sequential. If they have, then try some read-ahead, otherwise 128 * back-off on prospective read-aheads. 129 */ 130 if (bp->b_flags & B_CACHE) { 131 if (!seqcount) { 132 return 0; 133 } else if ((bp->b_flags & B_RAM) == 0) { 134 return 0; 135 } else { 136 int s; 137 struct buf *tbp; 138 bp->b_flags &= ~B_RAM; 139 /* 140 * We do the spl here so that there is no window 141 * between the incore and the b_usecount increment 142 * below. We opt to keep the spl out of the loop 143 * for efficiency. 144 */ 145 s = splbio(); 146 for (i = 1; i < maxra; i++) { 147 148 if (!(tbp = incore(vp, lblkno+i))) { 149 break; 150 } 151 152 /* 153 * Set another read-ahead mark so we know to check 154 * again. 155 */ 156 if (((i % racluster) == (racluster - 1)) || 157 (i == (maxra - 1))) 158 tbp->b_flags |= B_RAM; 159 160 #if 0 161 if ((tbp->b_usecount < 1) && 162 BUF_REFCNT(tbp) == 0 && 163 (tbp->b_qindex == QUEUE_LRU)) { 164 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist); 165 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist); 166 } 167 #endif 168 } 169 splx(s); 170 if (i >= maxra) { 171 return 0; 172 } 173 lblkno += i; 174 } 175 reqbp = bp = NULL; 176 } else { 177 off_t firstread = bp->b_offset; 178 179 KASSERT(bp->b_offset != NOOFFSET, 180 ("cluster_read: no buffer offset")); 181 if (firstread + totread > filesize) 182 totread = filesize - firstread; 183 if (totread > size) { 184 int nblks = 0; 185 int ncontigafter; 186 while (totread > 0) { 187 nblks++; 188 totread -= size; 189 } 190 if (nblks == 1) 191 goto single_block_read; 192 if (nblks > racluster) 193 nblks = racluster; 194 195 error = VOP_BMAP(vp, lblkno, NULL, 196 &blkno, &ncontigafter, NULL); 197 if (error) 198 goto single_block_read; 199 if (blkno == -1) 200 goto single_block_read; 201 if (ncontigafter == 0) 202 goto single_block_read; 203 if (ncontigafter + 1 < nblks) 204 nblks = ncontigafter + 1; 205 206 bp = cluster_rbuild(vp, filesize, lblkno, 207 blkno, size, nblks, bp); 208 lblkno += (bp->b_bufsize / size); 209 } else { 210 single_block_read: 211 /* 212 * if it isn't in the cache, then get a chunk from 213 * disk if sequential, otherwise just get the block. 214 */ 215 bp->b_flags |= B_READ | B_RAM; 216 lblkno += 1; 217 } 218 } 219 220 /* 221 * if we have been doing sequential I/O, then do some read-ahead 222 */ 223 rbp = NULL; 224 if (seqcount && (lblkno < (origblkno + seqcount))) { 225 /* 226 * we now build the read-ahead buffer if it is desirable. 227 */ 228 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 229 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 230 blkno != -1) { 231 int nblksread; 232 int ntoread = num_ra + 1; 233 nblksread = (origtotread + size - 1) / size; 234 if (seqcount < nblksread) 235 seqcount = nblksread; 236 if (seqcount < ntoread) 237 ntoread = seqcount; 238 if (num_ra) { 239 rbp = cluster_rbuild(vp, filesize, lblkno, 240 blkno, size, ntoread, NULL); 241 } else { 242 rbp = getblk(vp, lblkno, size, 0, 0); 243 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 244 rbp->b_blkno = blkno; 245 } 246 } 247 } 248 249 /* 250 * handle the synchronous read 251 */ 252 if (bp) { 253 #if defined(CLUSTERDEBUG) 254 if (rcluster) 255 printf("S(%ld,%ld,%d) ", 256 (long)bp->b_lblkno, bp->b_bcount, seqcount); 257 #endif 258 if ((bp->b_flags & B_CLUSTER) == 0) 259 vfs_busy_pages(bp, 0); 260 bp->b_flags &= ~(B_ERROR|B_INVAL); 261 if (bp->b_flags & (B_ASYNC|B_CALL)) 262 BUF_KERNPROC(bp); 263 error = VOP_STRATEGY(vp, bp); 264 curproc->p_stats->p_ru.ru_inblock++; 265 } 266 267 /* 268 * and if we have read-aheads, do them too 269 */ 270 if (rbp) { 271 if (error) { 272 rbp->b_flags &= ~(B_ASYNC | B_READ); 273 brelse(rbp); 274 } else if (rbp->b_flags & B_CACHE) { 275 rbp->b_flags &= ~(B_ASYNC | B_READ); 276 bqrelse(rbp); 277 } else { 278 #if defined(CLUSTERDEBUG) 279 if (rcluster) { 280 if (bp) 281 printf("A+(%ld,%ld,%ld,%d) ", 282 (long)rbp->b_lblkno, rbp->b_bcount, 283 (long)(rbp->b_lblkno - origblkno), 284 seqcount); 285 else 286 printf("A(%ld,%ld,%ld,%d) ", 287 (long)rbp->b_lblkno, rbp->b_bcount, 288 (long)(rbp->b_lblkno - origblkno), 289 seqcount); 290 } 291 #endif 292 293 if ((rbp->b_flags & B_CLUSTER) == 0) 294 vfs_busy_pages(rbp, 0); 295 rbp->b_flags &= ~(B_ERROR|B_INVAL); 296 if (rbp->b_flags & (B_ASYNC|B_CALL)) 297 BUF_KERNPROC(rbp); 298 (void) VOP_STRATEGY(vp, rbp); 299 curproc->p_stats->p_ru.ru_inblock++; 300 } 301 } 302 if (reqbp) 303 return (biowait(reqbp)); 304 else 305 return (error); 306 } 307 308 /* 309 * If blocks are contiguous on disk, use this to provide clustered 310 * read ahead. We will read as many blocks as possible sequentially 311 * and then parcel them up into logical blocks in the buffer hash table. 312 */ 313 static struct buf * 314 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 315 struct vnode *vp; 316 u_quad_t filesize; 317 daddr_t lbn; 318 daddr_t blkno; 319 long size; 320 int run; 321 struct buf *fbp; 322 { 323 struct buf *bp, *tbp; 324 daddr_t bn; 325 int i, inc, j; 326 327 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 328 ("cluster_rbuild: size %ld != filesize %ld\n", 329 size, vp->v_mount->mnt_stat.f_iosize)); 330 331 /* 332 * avoid a division 333 */ 334 while ((u_quad_t) size * (lbn + run) > filesize) { 335 --run; 336 } 337 338 if (fbp) { 339 tbp = fbp; 340 tbp->b_flags |= B_READ; 341 } else { 342 tbp = getblk(vp, lbn, size, 0, 0); 343 if (tbp->b_flags & B_CACHE) 344 return tbp; 345 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 346 } 347 348 tbp->b_blkno = blkno; 349 if( (tbp->b_flags & B_MALLOC) || 350 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 351 return tbp; 352 353 bp = trypbuf(&cluster_pbuf_freecnt); 354 if (bp == 0) 355 return tbp; 356 357 bp->b_data = (char *)((vm_offset_t)bp->b_data | 358 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 359 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_CLUSTER | B_VMIO; 360 bp->b_iodone = cluster_callback; 361 bp->b_blkno = blkno; 362 bp->b_lblkno = lbn; 363 bp->b_offset = tbp->b_offset; 364 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 365 pbgetvp(vp, bp); 366 367 TAILQ_INIT(&bp->b_cluster.cluster_head); 368 369 bp->b_bcount = 0; 370 bp->b_bufsize = 0; 371 bp->b_npages = 0; 372 373 if (vp->v_maxio == 0) 374 vp->v_maxio = DFLTPHYS; 375 inc = btodb(size); 376 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 377 if (i != 0) { 378 if ((bp->b_npages * PAGE_SIZE) + 379 round_page(size) > vp->v_maxio) 380 break; 381 382 if ((tbp = incore(vp, lbn + i)) != NULL) { 383 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 384 break; 385 BUF_UNLOCK(tbp); 386 387 for (j = 0; j < tbp->b_npages; j++) 388 if (tbp->b_pages[j]->valid) 389 break; 390 391 if (j != tbp->b_npages) 392 break; 393 394 if (tbp->b_bcount != size) 395 break; 396 } 397 398 tbp = getblk(vp, lbn + i, size, 0, 0); 399 400 if ((tbp->b_flags & B_CACHE) || 401 (tbp->b_flags & B_VMIO) == 0) { 402 bqrelse(tbp); 403 break; 404 } 405 406 for (j = 0;j < tbp->b_npages; j++) 407 if (tbp->b_pages[j]->valid) 408 break; 409 410 if (j != tbp->b_npages) { 411 bqrelse(tbp); 412 break; 413 } 414 415 if ((fbp && (i == 1)) || (i == (run - 1))) 416 tbp->b_flags |= B_RAM; 417 tbp->b_flags |= B_READ | B_ASYNC; 418 if (tbp->b_blkno == tbp->b_lblkno) { 419 tbp->b_blkno = bn; 420 } else if (tbp->b_blkno != bn) { 421 brelse(tbp); 422 break; 423 } 424 } 425 /* 426 * XXX fbp from caller may not be B_ASYNC, but we are going 427 * to biodone() it in cluster_callback() anyway 428 */ 429 BUF_KERNPROC(tbp); 430 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 431 tbp, b_cluster.cluster_entry); 432 for (j = 0; j < tbp->b_npages; j += 1) { 433 vm_page_t m; 434 m = tbp->b_pages[j]; 435 vm_page_io_start(m); 436 vm_object_pip_add(m->object, 1); 437 if ((bp->b_npages == 0) || 438 (bp->b_pages[bp->b_npages-1] != m)) { 439 bp->b_pages[bp->b_npages] = m; 440 bp->b_npages++; 441 } 442 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 443 tbp->b_pages[j] = bogus_page; 444 } 445 bp->b_bcount += tbp->b_bcount; 446 bp->b_bufsize += tbp->b_bufsize; 447 } 448 449 for(j=0;j<bp->b_npages;j++) { 450 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 451 VM_PAGE_BITS_ALL) 452 bp->b_pages[j] = bogus_page; 453 } 454 if (bp->b_bufsize > bp->b_kvasize) 455 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 456 bp->b_bufsize, bp->b_kvasize); 457 bp->b_kvasize = bp->b_bufsize; 458 459 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 460 (vm_page_t *)bp->b_pages, bp->b_npages); 461 return (bp); 462 } 463 464 /* 465 * Cleanup after a clustered read or write. 466 * This is complicated by the fact that any of the buffers might have 467 * extra memory (if there were no empty buffer headers at allocbuf time) 468 * that we will need to shift around. 469 */ 470 void 471 cluster_callback(bp) 472 struct buf *bp; 473 { 474 struct buf *nbp, *tbp; 475 int error = 0; 476 477 /* 478 * Must propogate errors to all the components. 479 */ 480 if (bp->b_flags & B_ERROR) 481 error = bp->b_error; 482 483 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 484 /* 485 * Move memory from the large cluster buffer into the component 486 * buffers and mark IO as done on these. 487 */ 488 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 489 tbp; tbp = nbp) { 490 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 491 if (error) { 492 tbp->b_flags |= B_ERROR; 493 tbp->b_error = error; 494 } else { 495 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 496 tbp->b_flags &= ~(B_ERROR|B_INVAL); 497 } 498 biodone(tbp); 499 } 500 relpbuf(bp, &cluster_pbuf_freecnt); 501 } 502 503 /* 504 * cluster_wbuild_wb: 505 * 506 * Implement modified write build for cluster. 507 * 508 * write_behind = 0 write behind disabled 509 * write_behind = 1 write behind normal (default) 510 * write_behind = 2 write behind backed-off 511 */ 512 513 static __inline int 514 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 515 { 516 int r = 0; 517 518 switch(write_behind) { 519 case 2: 520 if (start_lbn < len) 521 break; 522 start_lbn -= len; 523 /* fall through */ 524 case 1: 525 r = cluster_wbuild(vp, size, start_lbn, len); 526 /* fall through */ 527 default: 528 /* fall through */ 529 break; 530 } 531 return(r); 532 } 533 534 /* 535 * Do clustered write for FFS. 536 * 537 * Three cases: 538 * 1. Write is not sequential (write asynchronously) 539 * Write is sequential: 540 * 2. beginning of cluster - begin cluster 541 * 3. middle of a cluster - add to cluster 542 * 4. end of a cluster - asynchronously write cluster 543 */ 544 void 545 cluster_write(bp, filesize) 546 struct buf *bp; 547 u_quad_t filesize; 548 { 549 struct vnode *vp; 550 daddr_t lbn; 551 int maxclen, cursize; 552 int lblocksize; 553 int async; 554 555 vp = bp->b_vp; 556 if (vp->v_maxio == 0) 557 vp->v_maxio = DFLTPHYS; 558 if (vp->v_type == VREG) { 559 async = vp->v_mount->mnt_flag & MNT_ASYNC; 560 lblocksize = vp->v_mount->mnt_stat.f_iosize; 561 } else { 562 async = 0; 563 lblocksize = bp->b_bufsize; 564 } 565 lbn = bp->b_lblkno; 566 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 567 568 /* Initialize vnode to beginning of file. */ 569 if (lbn == 0) 570 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 571 572 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 573 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 574 maxclen = vp->v_maxio / lblocksize - 1; 575 if (vp->v_clen != 0) { 576 /* 577 * Next block is not sequential. 578 * 579 * If we are not writing at end of file, the process 580 * seeked to another point in the file since its last 581 * write, or we have reached our maximum cluster size, 582 * then push the previous cluster. Otherwise try 583 * reallocating to make it sequential. 584 */ 585 cursize = vp->v_lastw - vp->v_cstart + 1; 586 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 587 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 588 if (!async) 589 cluster_wbuild(vp, lblocksize, 590 vp->v_cstart, cursize); 591 } else { 592 struct buf **bpp, **endbp; 593 struct cluster_save *buflist; 594 595 buflist = cluster_collectbufs(vp, bp); 596 endbp = &buflist->bs_children 597 [buflist->bs_nchildren - 1]; 598 if (VOP_REALLOCBLKS(vp, buflist)) { 599 /* 600 * Failed, push the previous cluster. 601 */ 602 for (bpp = buflist->bs_children; 603 bpp < endbp; bpp++) 604 brelse(*bpp); 605 free(buflist, M_SEGMENT); 606 cluster_wbuild_wb(vp, lblocksize, 607 vp->v_cstart, cursize); 608 } else { 609 /* 610 * Succeeded, keep building cluster. 611 */ 612 for (bpp = buflist->bs_children; 613 bpp <= endbp; bpp++) 614 bdwrite(*bpp); 615 free(buflist, M_SEGMENT); 616 vp->v_lastw = lbn; 617 vp->v_lasta = bp->b_blkno; 618 return; 619 } 620 } 621 } 622 /* 623 * Consider beginning a cluster. If at end of file, make 624 * cluster as large as possible, otherwise find size of 625 * existing cluster. 626 */ 627 if ((vp->v_type == VREG) && 628 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 629 (bp->b_blkno == bp->b_lblkno) && 630 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 631 bp->b_blkno == -1)) { 632 bawrite(bp); 633 vp->v_clen = 0; 634 vp->v_lasta = bp->b_blkno; 635 vp->v_cstart = lbn + 1; 636 vp->v_lastw = lbn; 637 return; 638 } 639 vp->v_clen = maxclen; 640 if (!async && maxclen == 0) { /* I/O not contiguous */ 641 vp->v_cstart = lbn + 1; 642 bawrite(bp); 643 } else { /* Wait for rest of cluster */ 644 vp->v_cstart = lbn; 645 bdwrite(bp); 646 } 647 } else if (lbn == vp->v_cstart + vp->v_clen) { 648 /* 649 * At end of cluster, write it out. 650 */ 651 bdwrite(bp); 652 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 653 vp->v_clen = 0; 654 vp->v_cstart = lbn + 1; 655 } else 656 /* 657 * In the middle of a cluster, so just delay the I/O for now. 658 */ 659 bdwrite(bp); 660 vp->v_lastw = lbn; 661 vp->v_lasta = bp->b_blkno; 662 } 663 664 665 /* 666 * This is an awful lot like cluster_rbuild...wish they could be combined. 667 * The last lbn argument is the current block on which I/O is being 668 * performed. Check to see that it doesn't fall in the middle of 669 * the current block (if last_bp == NULL). 670 */ 671 int 672 cluster_wbuild(vp, size, start_lbn, len) 673 struct vnode *vp; 674 long size; 675 daddr_t start_lbn; 676 int len; 677 { 678 struct buf *bp, *tbp; 679 int i, j, s; 680 int totalwritten = 0; 681 int dbsize = btodb(size); 682 683 if (vp->v_maxio == 0) 684 vp->v_maxio = DFLTPHYS; 685 while (len > 0) { 686 s = splbio(); 687 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 688 ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) || 689 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 690 ++start_lbn; 691 --len; 692 splx(s); 693 continue; 694 } 695 bremfree(tbp); 696 tbp->b_flags &= ~B_DONE; 697 splx(s); 698 699 /* 700 * Extra memory in the buffer, punt on this buffer. 701 * XXX we could handle this in most cases, but we would 702 * have to push the extra memory down to after our max 703 * possible cluster size and then potentially pull it back 704 * up if the cluster was terminated prematurely--too much 705 * hassle. 706 */ 707 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 708 (tbp->b_bcount != tbp->b_bufsize) || 709 (tbp->b_bcount != size) || 710 (len == 1) || 711 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 712 totalwritten += tbp->b_bufsize; 713 bawrite(tbp); 714 ++start_lbn; 715 --len; 716 continue; 717 } 718 719 /* 720 * We got a pbuf to make the cluster in. 721 * so initialise it. 722 */ 723 TAILQ_INIT(&bp->b_cluster.cluster_head); 724 bp->b_bcount = 0; 725 bp->b_bufsize = 0; 726 bp->b_npages = 0; 727 if (tbp->b_wcred != NOCRED) { 728 bp->b_wcred = tbp->b_wcred; 729 crhold(bp->b_wcred); 730 } 731 732 bp->b_blkno = tbp->b_blkno; 733 bp->b_lblkno = tbp->b_lblkno; 734 bp->b_offset = tbp->b_offset; 735 bp->b_data = (char *)((vm_offset_t)bp->b_data | 736 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 737 bp->b_flags |= B_CALL | B_CLUSTER | 738 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 739 bp->b_iodone = cluster_callback; 740 pbgetvp(vp, bp); 741 /* 742 * From this location in the file, scan forward to see 743 * if there are buffers with adjacent data that need to 744 * be written as well. 745 */ 746 for (i = 0; i < len; ++i, ++start_lbn) { 747 if (i != 0) { /* If not the first buffer */ 748 s = splbio(); 749 /* 750 * If the adjacent data is not even in core it 751 * can't need to be written. 752 */ 753 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 754 splx(s); 755 break; 756 } 757 758 /* 759 * If it IS in core, but has different 760 * characteristics, don't cluster with it. 761 */ 762 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 763 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 764 != (B_DELWRI | B_CLUSTEROK | 765 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 766 tbp->b_wcred != bp->b_wcred || 767 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 768 splx(s); 769 break; 770 } 771 772 /* 773 * Check that the combined cluster 774 * would make sense with regard to pages 775 * and would not be too large 776 */ 777 if ((tbp->b_bcount != size) || 778 ((bp->b_blkno + (dbsize * i)) != 779 tbp->b_blkno) || 780 ((tbp->b_npages + bp->b_npages) > 781 (vp->v_maxio / PAGE_SIZE))) { 782 BUF_UNLOCK(tbp); 783 splx(s); 784 break; 785 } 786 /* 787 * Ok, it's passed all the tests, 788 * so remove it from the free list 789 * and mark it busy. We will use it. 790 */ 791 bremfree(tbp); 792 tbp->b_flags &= ~B_DONE; 793 splx(s); 794 } /* end of code for non-first buffers only */ 795 /* check for latent dependencies to be handled */ 796 if ((LIST_FIRST(&tbp->b_dep)) != NULL && 797 bioops.io_start) 798 (*bioops.io_start)(tbp); 799 /* 800 * If the IO is via the VM then we do some 801 * special VM hackery. (yuck) 802 */ 803 if (tbp->b_flags & B_VMIO) { 804 vm_page_t m; 805 806 if (i != 0) { /* if not first buffer */ 807 for (j = 0; j < tbp->b_npages; j += 1) { 808 m = tbp->b_pages[j]; 809 if (m->flags & PG_BUSY) 810 goto finishcluster; 811 } 812 } 813 814 for (j = 0; j < tbp->b_npages; j += 1) { 815 m = tbp->b_pages[j]; 816 vm_page_io_start(m); 817 vm_object_pip_add(m->object, 1); 818 if ((bp->b_npages == 0) || 819 (bp->b_pages[bp->b_npages - 1] != m)) { 820 bp->b_pages[bp->b_npages] = m; 821 bp->b_npages++; 822 } 823 } 824 } 825 bp->b_bcount += size; 826 bp->b_bufsize += size; 827 828 s = splbio(); 829 bundirty(tbp); 830 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 831 tbp->b_flags |= B_ASYNC; 832 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 833 ++tbp->b_vp->v_numoutput; 834 splx(s); 835 BUF_KERNPROC(tbp); 836 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 837 tbp, b_cluster.cluster_entry); 838 } 839 finishcluster: 840 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 841 (vm_page_t *) bp->b_pages, bp->b_npages); 842 if (bp->b_bufsize > bp->b_kvasize) 843 panic( 844 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 845 bp->b_bufsize, bp->b_kvasize); 846 bp->b_kvasize = bp->b_bufsize; 847 totalwritten += bp->b_bufsize; 848 bp->b_dirtyoff = 0; 849 bp->b_dirtyend = bp->b_bufsize; 850 bawrite(bp); 851 852 len -= i; 853 } 854 return totalwritten; 855 } 856 857 /* 858 * Collect together all the buffers in a cluster. 859 * Plus add one additional buffer. 860 */ 861 static struct cluster_save * 862 cluster_collectbufs(vp, last_bp) 863 struct vnode *vp; 864 struct buf *last_bp; 865 { 866 struct cluster_save *buflist; 867 struct buf *bp; 868 daddr_t lbn; 869 int i, len; 870 871 len = vp->v_lastw - vp->v_cstart + 1; 872 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 873 M_SEGMENT, M_WAITOK); 874 buflist->bs_nchildren = 0; 875 buflist->bs_children = (struct buf **) (buflist + 1); 876 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 877 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 878 buflist->bs_children[i] = bp; 879 if (bp->b_blkno == bp->b_lblkno) 880 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 881 NULL, NULL); 882 } 883 buflist->bs_children[i] = bp = last_bp; 884 if (bp->b_blkno == bp->b_lblkno) 885 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 886 NULL, NULL); 887 buflist->bs_nchildren = i + 1; 888 return (buflist); 889 } 890