1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.59 1998/03/16 18:39:41 julian Exp $ 37 */ 38 39 #include "opt_debug_cluster.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/mount.h> 47 #include <sys/resourcevar.h> 48 #include <vm/vm.h> 49 #include <vm/vm_prot.h> 50 #include <vm/vm_object.h> 51 #include <vm/vm_page.h> 52 53 #if defined(CLUSTERDEBUG) 54 #include <sys/sysctl.h> 55 #include <sys/kernel.h> 56 static int rcluster= 0; 57 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 58 #endif 59 60 #ifdef notyet_block_reallocation_enabled 61 static struct cluster_save * 62 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 63 #endif 64 static struct buf * 65 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 66 daddr_t blkno, long size, int run, struct buf *fbp)); 67 68 extern vm_page_t bogus_page; 69 70 /* 71 * Maximum number of blocks for read-ahead. 72 */ 73 #define MAXRA 32 74 75 /* 76 * This replaces bread. 77 */ 78 int 79 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 80 struct vnode *vp; 81 u_quad_t filesize; 82 daddr_t lblkno; 83 long size; 84 struct ucred *cred; 85 long totread; 86 int seqcount; 87 struct buf **bpp; 88 { 89 struct buf *bp, *rbp, *reqbp; 90 daddr_t blkno, origblkno; 91 int error, num_ra; 92 int i; 93 int maxra, racluster; 94 long origtotread; 95 96 error = 0; 97 if (vp->v_maxio == 0) 98 vp->v_maxio = DFLTPHYS; 99 100 /* 101 * Try to limit the amount of read-ahead by a few 102 * ad-hoc parameters. This needs work!!! 103 */ 104 racluster = vp->v_maxio/size; 105 maxra = 2 * racluster + (totread / size); 106 if (maxra > MAXRA) 107 maxra = MAXRA; 108 if (maxra > nbuf/8) 109 maxra = nbuf/8; 110 111 /* 112 * get the requested block 113 */ 114 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 115 origblkno = lblkno; 116 origtotread = totread; 117 118 /* 119 * if it is in the cache, then check to see if the reads have been 120 * sequential. If they have, then try some read-ahead, otherwise 121 * back-off on prospective read-aheads. 122 */ 123 if (bp->b_flags & B_CACHE) { 124 if (!seqcount) { 125 return 0; 126 } else if ((bp->b_flags & B_RAM) == 0) { 127 return 0; 128 } else { 129 int s; 130 struct buf *tbp; 131 bp->b_flags &= ~B_RAM; 132 /* 133 * We do the spl here so that there is no window 134 * between the incore and the b_usecount increment 135 * below. We opt to keep the spl out of the loop 136 * for efficiency. 137 */ 138 s = splbio(); 139 for(i=1;i<maxra;i++) { 140 141 if (!(tbp = incore(vp, lblkno+i))) { 142 break; 143 } 144 145 /* 146 * Set another read-ahead mark so we know to check 147 * again. 148 */ 149 if (((i % racluster) == (racluster - 1)) || 150 (i == (maxra - 1))) 151 tbp->b_flags |= B_RAM; 152 153 if ((tbp->b_usecount < 1) && 154 ((tbp->b_flags & B_BUSY) == 0) && 155 (tbp->b_qindex == QUEUE_LRU)) { 156 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist); 157 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist); 158 } 159 } 160 splx(s); 161 if (i >= maxra) { 162 return 0; 163 } 164 lblkno += i; 165 } 166 reqbp = bp = NULL; 167 } else { 168 off_t firstread; 169 firstread = bp->b_offset; 170 #ifdef DIAGNOSTIC 171 if (bp->b_offset == NOOFFSET) 172 panic("cluster_read: no buffer offset"); 173 #endif 174 if (firstread + totread > filesize) 175 totread = filesize - firstread; 176 if (totread > size) { 177 int nblks = 0; 178 int ncontigafter; 179 while (totread > 0) { 180 nblks++; 181 totread -= size; 182 } 183 if (nblks == 1) 184 goto single_block_read; 185 if (nblks > racluster) 186 nblks = racluster; 187 188 error = VOP_BMAP(vp, lblkno, NULL, 189 &blkno, &ncontigafter, NULL); 190 if (error) 191 goto single_block_read; 192 if (blkno == -1) 193 goto single_block_read; 194 if (ncontigafter == 0) 195 goto single_block_read; 196 if (ncontigafter + 1 < nblks) 197 nblks = ncontigafter + 1; 198 199 bp = cluster_rbuild(vp, filesize, lblkno, 200 blkno, size, nblks, bp); 201 lblkno += (bp->b_bufsize / size); 202 } else { 203 single_block_read: 204 /* 205 * if it isn't in the cache, then get a chunk from 206 * disk if sequential, otherwise just get the block. 207 */ 208 bp->b_flags |= B_READ | B_RAM; 209 lblkno += 1; 210 } 211 } 212 213 /* 214 * if we have been doing sequential I/O, then do some read-ahead 215 */ 216 rbp = NULL; 217 if (seqcount && (lblkno < (origblkno + seqcount))) { 218 /* 219 * we now build the read-ahead buffer if it is desirable. 220 */ 221 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 222 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 223 blkno != -1) { 224 int nblksread; 225 int ntoread = num_ra + 1; 226 nblksread = (origtotread + size - 1) / size; 227 if (seqcount < nblksread) 228 seqcount = nblksread; 229 if (seqcount < ntoread) 230 ntoread = seqcount; 231 if (num_ra) { 232 rbp = cluster_rbuild(vp, filesize, lblkno, 233 blkno, size, ntoread, NULL); 234 } else { 235 rbp = getblk(vp, lblkno, size, 0, 0); 236 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 237 rbp->b_blkno = blkno; 238 } 239 } 240 } 241 242 /* 243 * handle the synchronous read 244 */ 245 if (bp) { 246 if (bp->b_flags & (B_DONE | B_DELWRI)) { 247 panic("cluster_read: DONE bp"); 248 } else { 249 #if defined(CLUSTERDEBUG) 250 if (rcluster) 251 printf("S(%d,%d,%d) ", 252 bp->b_lblkno, bp->b_bcount, seqcount); 253 #endif 254 if ((bp->b_flags & B_CLUSTER) == 0) 255 vfs_busy_pages(bp, 0); 256 error = VOP_STRATEGY(bp); 257 curproc->p_stats->p_ru.ru_inblock++; 258 } 259 } 260 261 /* 262 * and if we have read-aheads, do them too 263 */ 264 if (rbp) { 265 if (error) { 266 rbp->b_flags &= ~(B_ASYNC | B_READ); 267 brelse(rbp); 268 } else if (rbp->b_flags & B_CACHE) { 269 rbp->b_flags &= ~(B_ASYNC | B_READ); 270 bqrelse(rbp); 271 } else { 272 #if defined(CLUSTERDEBUG) 273 if (rcluster) { 274 if (bp) 275 printf("A+(%d,%d,%d,%d) ", 276 rbp->b_lblkno, rbp->b_bcount, 277 rbp->b_lblkno - origblkno, 278 seqcount); 279 else 280 printf("A(%d,%d,%d,%d) ", 281 rbp->b_lblkno, rbp->b_bcount, 282 rbp->b_lblkno - origblkno, 283 seqcount); 284 } 285 #endif 286 287 if ((rbp->b_flags & B_CLUSTER) == 0) 288 vfs_busy_pages(rbp, 0); 289 (void) VOP_STRATEGY(rbp); 290 curproc->p_stats->p_ru.ru_inblock++; 291 } 292 } 293 if (reqbp) 294 return (biowait(reqbp)); 295 else 296 return (error); 297 } 298 299 /* 300 * If blocks are contiguous on disk, use this to provide clustered 301 * read ahead. We will read as many blocks as possible sequentially 302 * and then parcel them up into logical blocks in the buffer hash table. 303 */ 304 static struct buf * 305 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 306 struct vnode *vp; 307 u_quad_t filesize; 308 daddr_t lbn; 309 daddr_t blkno; 310 long size; 311 int run; 312 struct buf *fbp; 313 { 314 struct buf *bp, *tbp; 315 daddr_t bn; 316 int i, inc, j; 317 318 #ifdef DIAGNOSTIC 319 if (size != vp->v_mount->mnt_stat.f_iosize) 320 panic("cluster_rbuild: size %d != filesize %d\n", 321 size, vp->v_mount->mnt_stat.f_iosize); 322 #endif 323 /* 324 * avoid a division 325 */ 326 while ((u_quad_t) size * (lbn + run) > filesize) { 327 --run; 328 } 329 330 if (fbp) { 331 tbp = fbp; 332 tbp->b_flags |= B_READ; 333 } else { 334 tbp = getblk(vp, lbn, size, 0, 0); 335 if (tbp->b_flags & B_CACHE) 336 return tbp; 337 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 338 } 339 340 tbp->b_blkno = blkno; 341 if( (tbp->b_flags & B_MALLOC) || 342 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 343 return tbp; 344 345 bp = trypbuf(); 346 if (bp == 0) 347 return tbp; 348 349 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 350 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO; 351 bp->b_iodone = cluster_callback; 352 bp->b_blkno = blkno; 353 bp->b_lblkno = lbn; 354 bp->b_offset = tbp->b_offset; 355 #ifdef DIAGNOSTIC 356 if (bp->b_offset == NOOFFSET) 357 panic("cluster_rbuild: no buffer offset"); 358 #endif 359 pbgetvp(vp, bp); 360 361 TAILQ_INIT(&bp->b_cluster.cluster_head); 362 363 bp->b_bcount = 0; 364 bp->b_bufsize = 0; 365 bp->b_npages = 0; 366 367 if (vp->v_maxio == 0) 368 vp->v_maxio = DFLTPHYS; 369 inc = btodb(size); 370 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 371 if (i != 0) { 372 if ((bp->b_npages * PAGE_SIZE) + 373 round_page(size) > vp->v_maxio) 374 break; 375 376 if (tbp = incore(vp, lbn + i)) { 377 if (tbp->b_flags & B_BUSY) 378 break; 379 380 for (j = 0; j < tbp->b_npages; j++) 381 if (tbp->b_pages[j]->valid) 382 break; 383 384 if (j != tbp->b_npages) 385 break; 386 387 if (tbp->b_bcount != size) 388 break; 389 } 390 391 tbp = getblk(vp, lbn + i, size, 0, 0); 392 393 if ((tbp->b_flags & B_CACHE) || 394 (tbp->b_flags & B_VMIO) == 0) { 395 bqrelse(tbp); 396 break; 397 } 398 399 for (j = 0;j < tbp->b_npages; j++) 400 if (tbp->b_pages[j]->valid) 401 break; 402 403 if (j != tbp->b_npages) { 404 bqrelse(tbp); 405 break; 406 } 407 408 if ((fbp && (i == 1)) || (i == (run - 1))) 409 tbp->b_flags |= B_RAM; 410 tbp->b_flags |= B_READ | B_ASYNC; 411 if (tbp->b_blkno == tbp->b_lblkno) { 412 tbp->b_blkno = bn; 413 } else if (tbp->b_blkno != bn) { 414 brelse(tbp); 415 break; 416 } 417 } 418 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 419 tbp, b_cluster.cluster_entry); 420 for (j = 0; j < tbp->b_npages; j += 1) { 421 vm_page_t m; 422 m = tbp->b_pages[j]; 423 ++m->busy; 424 ++m->object->paging_in_progress; 425 if ((bp->b_npages == 0) || 426 (bp->b_pages[bp->b_npages-1] != m)) { 427 bp->b_pages[bp->b_npages] = m; 428 bp->b_npages++; 429 } 430 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 431 tbp->b_pages[j] = bogus_page; 432 } 433 bp->b_bcount += tbp->b_bcount; 434 bp->b_bufsize += tbp->b_bufsize; 435 } 436 437 for(j=0;j<bp->b_npages;j++) { 438 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 439 VM_PAGE_BITS_ALL) 440 bp->b_pages[j] = bogus_page; 441 } 442 if (bp->b_bufsize > bp->b_kvasize) 443 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)\n", 444 bp->b_bufsize, bp->b_kvasize); 445 bp->b_kvasize = bp->b_bufsize; 446 447 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 448 (vm_page_t *)bp->b_pages, bp->b_npages); 449 return (bp); 450 } 451 452 /* 453 * Cleanup after a clustered read or write. 454 * This is complicated by the fact that any of the buffers might have 455 * extra memory (if there were no empty buffer headers at allocbuf time) 456 * that we will need to shift around. 457 */ 458 void 459 cluster_callback(bp) 460 struct buf *bp; 461 { 462 struct buf *nbp, *tbp; 463 int error = 0; 464 465 /* 466 * Must propogate errors to all the components. 467 */ 468 if (bp->b_flags & B_ERROR) 469 error = bp->b_error; 470 471 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 472 /* 473 * Move memory from the large cluster buffer into the component 474 * buffers and mark IO as done on these. 475 */ 476 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 477 tbp; tbp = nbp) { 478 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 479 if (error) { 480 tbp->b_flags |= B_ERROR; 481 tbp->b_error = error; 482 } else 483 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 484 biodone(tbp); 485 } 486 relpbuf(bp); 487 } 488 489 /* 490 * Do clustered write for FFS. 491 * 492 * Three cases: 493 * 1. Write is not sequential (write asynchronously) 494 * Write is sequential: 495 * 2. beginning of cluster - begin cluster 496 * 3. middle of a cluster - add to cluster 497 * 4. end of a cluster - asynchronously write cluster 498 */ 499 void 500 cluster_write(bp, filesize) 501 struct buf *bp; 502 u_quad_t filesize; 503 { 504 struct vnode *vp; 505 daddr_t lbn; 506 int maxclen, cursize; 507 int lblocksize; 508 int async; 509 510 vp = bp->b_vp; 511 if (vp->v_maxio == 0) 512 vp->v_maxio = DFLTPHYS; 513 if (vp->v_type == VREG) { 514 async = vp->v_mount->mnt_flag & MNT_ASYNC; 515 lblocksize = vp->v_mount->mnt_stat.f_iosize; 516 } else { 517 async = 0; 518 lblocksize = bp->b_bufsize; 519 } 520 lbn = bp->b_lblkno; 521 522 #ifdef DIAGNOSTIC 523 if (bp->b_offset == NOOFFSET) 524 panic("cluster_write: no buffer offset"); 525 #endif 526 527 /* Initialize vnode to beginning of file. */ 528 if (lbn == 0) 529 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 530 531 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 532 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 533 maxclen = vp->v_maxio / lblocksize - 1; 534 if (vp->v_clen != 0) { 535 /* 536 * Next block is not sequential. 537 * 538 * If we are not writing at end of file, the process 539 * seeked to another point in the file since its last 540 * write, or we have reached our maximum cluster size, 541 * then push the previous cluster. Otherwise try 542 * reallocating to make it sequential. 543 */ 544 cursize = vp->v_lastw - vp->v_cstart + 1; 545 #ifndef notyet_block_reallocation_enabled 546 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 547 lbn != vp->v_lastw + 1 || 548 vp->v_clen <= cursize) { 549 if (!async) 550 cluster_wbuild(vp, lblocksize, 551 vp->v_cstart, cursize); 552 } 553 #else 554 if ((lbn + 1) * lblocksize != filesize || 555 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 556 if (!async) 557 cluster_wbuild(vp, lblocksize, 558 vp->v_cstart, cursize); 559 } else { 560 struct buf **bpp, **endbp; 561 struct cluster_save *buflist; 562 563 buflist = cluster_collectbufs(vp, bp); 564 endbp = &buflist->bs_children 565 [buflist->bs_nchildren - 1]; 566 if (VOP_REALLOCBLKS(vp, buflist)) { 567 /* 568 * Failed, push the previous cluster. 569 */ 570 for (bpp = buflist->bs_children; 571 bpp < endbp; bpp++) 572 brelse(*bpp); 573 free(buflist, M_SEGMENT); 574 cluster_wbuild(vp, lblocksize, 575 vp->v_cstart, cursize); 576 } else { 577 /* 578 * Succeeded, keep building cluster. 579 */ 580 for (bpp = buflist->bs_children; 581 bpp <= endbp; bpp++) 582 bdwrite(*bpp); 583 free(buflist, M_SEGMENT); 584 vp->v_lastw = lbn; 585 vp->v_lasta = bp->b_blkno; 586 return; 587 } 588 } 589 #endif /* notyet_block_reallocation_enabled */ 590 } 591 /* 592 * Consider beginning a cluster. If at end of file, make 593 * cluster as large as possible, otherwise find size of 594 * existing cluster. 595 */ 596 if ((vp->v_type == VREG) && 597 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 598 (bp->b_blkno == bp->b_lblkno) && 599 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 600 bp->b_blkno == -1)) { 601 bawrite(bp); 602 vp->v_clen = 0; 603 vp->v_lasta = bp->b_blkno; 604 vp->v_cstart = lbn + 1; 605 vp->v_lastw = lbn; 606 return; 607 } 608 vp->v_clen = maxclen; 609 if (!async && maxclen == 0) { /* I/O not contiguous */ 610 vp->v_cstart = lbn + 1; 611 bawrite(bp); 612 } else { /* Wait for rest of cluster */ 613 vp->v_cstart = lbn; 614 bdwrite(bp); 615 } 616 } else if (lbn == vp->v_cstart + vp->v_clen) { 617 /* 618 * At end of cluster, write it out. 619 */ 620 bdwrite(bp); 621 cluster_wbuild(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 622 vp->v_clen = 0; 623 vp->v_cstart = lbn + 1; 624 } else 625 /* 626 * In the middle of a cluster, so just delay the I/O for now. 627 */ 628 bdwrite(bp); 629 vp->v_lastw = lbn; 630 vp->v_lasta = bp->b_blkno; 631 } 632 633 634 /* 635 * This is an awful lot like cluster_rbuild...wish they could be combined. 636 * The last lbn argument is the current block on which I/O is being 637 * performed. Check to see that it doesn't fall in the middle of 638 * the current block (if last_bp == NULL). 639 */ 640 int 641 cluster_wbuild(vp, size, start_lbn, len) 642 struct vnode *vp; 643 long size; 644 daddr_t start_lbn; 645 int len; 646 { 647 struct buf *bp, *tbp; 648 int i, j, s; 649 int totalwritten = 0; 650 int dbsize = btodb(size); 651 while (len > 0) { 652 s = splbio(); 653 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 654 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) { 655 ++start_lbn; 656 --len; 657 splx(s); 658 continue; 659 } 660 bremfree(tbp); 661 tbp->b_flags |= B_BUSY; 662 tbp->b_flags &= ~B_DONE; 663 splx(s); 664 665 /* 666 * Extra memory in the buffer, punt on this buffer. XXX we could 667 * handle this in most cases, but we would have to push the extra 668 * memory down to after our max possible cluster size and then 669 * potentially pull it back up if the cluster was terminated 670 * prematurely--too much hassle. 671 */ 672 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 673 (tbp->b_bcount != tbp->b_bufsize) || 674 (tbp->b_bcount != size) || 675 (len == 1) || 676 ((bp = trypbuf()) == NULL)) { 677 totalwritten += tbp->b_bufsize; 678 bawrite(tbp); 679 ++start_lbn; 680 --len; 681 continue; 682 } 683 684 /* 685 * We got a pbuf to make the cluster in. 686 * so initialise it. 687 */ 688 TAILQ_INIT(&bp->b_cluster.cluster_head); 689 bp->b_bcount = 0; 690 bp->b_bufsize = 0; 691 bp->b_npages = 0; 692 if (tbp->b_wcred != NOCRED) { 693 bp->b_wcred = tbp->b_wcred; 694 crhold(bp->b_wcred); 695 } 696 697 bp->b_blkno = tbp->b_blkno; 698 bp->b_lblkno = tbp->b_lblkno; 699 bp->b_offset = tbp->b_offset; 700 (vm_offset_t) bp->b_data |= 701 ((vm_offset_t) tbp->b_data) & PAGE_MASK; 702 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | 703 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 704 bp->b_iodone = cluster_callback; 705 pbgetvp(vp, bp); 706 /* 707 * From this location in the file, scan forward to see 708 * if there are buffers with adjacent data that need to 709 * be written as well. 710 */ 711 for (i = 0; i < len; ++i, ++start_lbn) { 712 if (i != 0) { /* If not the first buffer */ 713 s = splbio(); 714 /* 715 * If the adjacent data is not even in core it 716 * can't need to be written. 717 */ 718 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 719 splx(s); 720 break; 721 } 722 723 /* 724 * If it IS in core, but has different 725 * characteristics, don't cluster with it. 726 */ 727 if ((tbp->b_flags & 728 (B_VMIO | B_CLUSTEROK | B_INVAL | B_BUSY | 729 B_DELWRI | B_NEEDCOMMIT)) 730 != (B_DELWRI | B_CLUSTEROK | 731 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT)))) { 732 splx(s); 733 break; 734 } 735 736 if (tbp->b_wcred != bp->b_wcred) { 737 splx(s); 738 break; 739 } 740 741 /* 742 * Check that the combined cluster 743 * would make sense with regard to pages 744 * and would not be too large 745 */ 746 if ((tbp->b_bcount != size) || 747 ((bp->b_blkno + (dbsize * i)) != 748 tbp->b_blkno) || 749 ((tbp->b_npages + bp->b_npages) > 750 (vp->v_maxio / PAGE_SIZE))) { 751 splx(s); 752 break; 753 } 754 /* 755 * Ok, it's passed all the tests, 756 * so remove it from the free list 757 * and mark it busy. We will use it. 758 */ 759 bremfree(tbp); 760 tbp->b_flags |= B_BUSY; 761 tbp->b_flags &= ~B_DONE; 762 splx(s); 763 } /* end of code for non-first buffers only */ 764 /* check for latent dependencies to be handled */ 765 if ((LIST_FIRST(&tbp->b_dep)) != NULL && 766 bioops.io_start) 767 (*bioops.io_start)(tbp); 768 /* 769 * If the IO is via the VM then we do some 770 * special VM hackery. (yuck) 771 */ 772 if (tbp->b_flags & B_VMIO) { 773 vm_page_t m; 774 775 if (i != 0) { /* if not first buffer */ 776 for (j = 0; j < tbp->b_npages; j += 1) { 777 m = tbp->b_pages[j]; 778 if (m->flags & PG_BUSY) 779 goto finishcluster; 780 } 781 } 782 783 for (j = 0; j < tbp->b_npages; j += 1) { 784 m = tbp->b_pages[j]; 785 ++m->busy; 786 ++m->object->paging_in_progress; 787 if ((bp->b_npages == 0) || 788 (bp->b_pages[bp->b_npages - 1] != m)) { 789 bp->b_pages[bp->b_npages] = m; 790 bp->b_npages++; 791 } 792 } 793 } 794 bp->b_bcount += size; 795 bp->b_bufsize += size; 796 797 --numdirtybuffers; 798 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 799 tbp->b_flags |= B_ASYNC; 800 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 801 ++tbp->b_vp->v_numoutput; 802 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 803 tbp, b_cluster.cluster_entry); 804 } 805 finishcluster: 806 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 807 (vm_page_t *) bp->b_pages, bp->b_npages); 808 if (bp->b_bufsize > bp->b_kvasize) 809 panic("cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n", 810 bp->b_bufsize, bp->b_kvasize); 811 bp->b_kvasize = bp->b_bufsize; 812 totalwritten += bp->b_bufsize; 813 bp->b_dirtyoff = 0; 814 bp->b_dirtyend = bp->b_bufsize; 815 bawrite(bp); 816 817 len -= i; 818 } 819 return totalwritten; 820 } 821 822 #ifdef notyet_block_reallocation_enabled 823 /* 824 * Collect together all the buffers in a cluster. 825 * Plus add one additional buffer. 826 */ 827 static struct cluster_save * 828 cluster_collectbufs(vp, last_bp) 829 struct vnode *vp; 830 struct buf *last_bp; 831 { 832 struct cluster_save *buflist; 833 daddr_t lbn; 834 int i, len; 835 836 len = vp->v_lastw - vp->v_cstart + 1; 837 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 838 M_SEGMENT, M_WAITOK); 839 buflist->bs_nchildren = 0; 840 buflist->bs_children = (struct buf **) (buflist + 1); 841 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 842 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 843 &buflist->bs_children[i]); 844 buflist->bs_children[i] = last_bp; 845 buflist->bs_nchildren = i + 1; 846 return (buflist); 847 } 848 #endif /* notyet_block_reallocation_enabled */ 849