1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.55 1998/02/06 12:13:30 eivind Exp $ 37 */ 38 39 #include "opt_debug_cluster.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/mount.h> 47 #include <sys/resourcevar.h> 48 #include <vm/vm.h> 49 #include <vm/vm_prot.h> 50 #include <vm/vm_object.h> 51 #include <vm/vm_page.h> 52 53 #if defined(CLUSTERDEBUG) 54 #include <sys/sysctl.h> 55 #include <sys/kernel.h> 56 static int rcluster= 0; 57 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 58 #endif 59 60 #ifdef notyet_block_reallocation_enabled 61 static struct cluster_save * 62 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 63 #endif 64 static struct buf * 65 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 66 daddr_t blkno, long size, int run, struct buf *fbp)); 67 68 extern vm_page_t bogus_page; 69 70 /* 71 * Maximum number of blocks for read-ahead. 72 */ 73 #define MAXRA 32 74 75 /* 76 * This replaces bread. 77 */ 78 int 79 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 80 struct vnode *vp; 81 u_quad_t filesize; 82 daddr_t lblkno; 83 long size; 84 struct ucred *cred; 85 long totread; 86 int seqcount; 87 struct buf **bpp; 88 { 89 struct buf *bp, *rbp, *reqbp; 90 daddr_t blkno, origblkno; 91 int error, num_ra; 92 int i; 93 int maxra, racluster; 94 long origtotread; 95 96 error = 0; 97 if (vp->v_maxio == 0) 98 vp->v_maxio = DFLTPHYS; 99 100 /* 101 * Try to limit the amount of read-ahead by a few 102 * ad-hoc parameters. This needs work!!! 103 */ 104 racluster = vp->v_maxio/size; 105 maxra = 2 * racluster + (totread / size); 106 if (maxra > MAXRA) 107 maxra = MAXRA; 108 if (maxra > nbuf/8) 109 maxra = nbuf/8; 110 111 /* 112 * get the requested block 113 */ 114 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 115 origblkno = lblkno; 116 origtotread = totread; 117 118 /* 119 * if it is in the cache, then check to see if the reads have been 120 * sequential. If they have, then try some read-ahead, otherwise 121 * back-off on prospective read-aheads. 122 */ 123 if (bp->b_flags & B_CACHE) { 124 if (!seqcount) { 125 return 0; 126 } else if ((bp->b_flags & B_RAM) == 0) { 127 return 0; 128 } else { 129 int s; 130 struct buf *tbp; 131 bp->b_flags &= ~B_RAM; 132 /* 133 * We do the spl here so that there is no window 134 * between the incore and the b_usecount increment 135 * below. We opt to keep the spl out of the loop 136 * for efficiency. 137 */ 138 s = splbio(); 139 for(i=1;i<maxra;i++) { 140 141 if (!(tbp = incore(vp, lblkno+i))) { 142 break; 143 } 144 145 /* 146 * Set another read-ahead mark so we know to check 147 * again. 148 */ 149 if (((i % racluster) == (racluster - 1)) || 150 (i == (maxra - 1))) 151 tbp->b_flags |= B_RAM; 152 153 if ((tbp->b_usecount < 5) && 154 ((tbp->b_flags & B_BUSY) == 0) && 155 (tbp->b_qindex == QUEUE_LRU)) { 156 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist); 157 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist); 158 } 159 } 160 splx(s); 161 if (i >= maxra) { 162 return 0; 163 } 164 lblkno += i; 165 } 166 reqbp = bp = NULL; 167 } else { 168 u_quad_t firstread; 169 firstread = (u_quad_t) lblkno * size; 170 if (firstread + totread > filesize) 171 totread = filesize - firstread; 172 if (totread > size) { 173 int nblks = 0; 174 int ncontigafter; 175 while (totread > 0) { 176 nblks++; 177 totread -= size; 178 } 179 if (nblks == 1) 180 goto single_block_read; 181 if (nblks > racluster) 182 nblks = racluster; 183 184 error = VOP_BMAP(vp, lblkno, NULL, 185 &blkno, &ncontigafter, NULL); 186 if (error) 187 goto single_block_read; 188 if (blkno == -1) 189 goto single_block_read; 190 if (ncontigafter == 0) 191 goto single_block_read; 192 if (ncontigafter + 1 < nblks) 193 nblks = ncontigafter + 1; 194 195 bp = cluster_rbuild(vp, filesize, lblkno, 196 blkno, size, nblks, bp); 197 lblkno += nblks; 198 } else { 199 single_block_read: 200 /* 201 * if it isn't in the cache, then get a chunk from 202 * disk if sequential, otherwise just get the block. 203 */ 204 bp->b_flags |= B_READ | B_RAM; 205 lblkno += 1; 206 } 207 } 208 209 /* 210 * if we have been doing sequential I/O, then do some read-ahead 211 */ 212 rbp = NULL; 213 if (seqcount && (lblkno < (origblkno + seqcount))) { 214 /* 215 * we now build the read-ahead buffer if it is desirable. 216 */ 217 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 218 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 219 blkno != -1) { 220 int nblksread; 221 int ntoread = num_ra + 1; 222 nblksread = (origtotread + size - 1) / size; 223 if (seqcount < nblksread) 224 seqcount = nblksread; 225 if (seqcount < ntoread) 226 ntoread = seqcount; 227 if (num_ra) { 228 rbp = cluster_rbuild(vp, filesize, lblkno, 229 blkno, size, ntoread, NULL); 230 } else { 231 rbp = getblk(vp, lblkno, size, 0, 0); 232 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 233 rbp->b_blkno = blkno; 234 } 235 } 236 } 237 238 /* 239 * handle the synchronous read 240 */ 241 if (bp) { 242 if (bp->b_flags & (B_DONE | B_DELWRI)) { 243 panic("cluster_read: DONE bp"); 244 } else { 245 #if defined(CLUSTERDEBUG) 246 if (rcluster) 247 printf("S(%d,%d,%d) ", 248 bp->b_lblkno, bp->b_bcount, seqcount); 249 #endif 250 if ((bp->b_flags & B_CLUSTER) == 0) 251 vfs_busy_pages(bp, 0); 252 error = VOP_STRATEGY(bp); 253 curproc->p_stats->p_ru.ru_inblock++; 254 } 255 } 256 /* 257 * and if we have read-aheads, do them too 258 */ 259 if (rbp) { 260 if (error) { 261 rbp->b_flags &= ~(B_ASYNC | B_READ); 262 brelse(rbp); 263 } else if (rbp->b_flags & B_CACHE) { 264 rbp->b_flags &= ~(B_ASYNC | B_READ); 265 bqrelse(rbp); 266 } else { 267 #if defined(CLUSTERDEBUG) 268 if (rcluster) { 269 if (bp) 270 printf("A+(%d,%d,%d,%d) ", 271 rbp->b_lblkno, rbp->b_bcount, 272 rbp->b_lblkno - origblkno, 273 seqcount); 274 else 275 printf("A(%d,%d,%d,%d) ", 276 rbp->b_lblkno, rbp->b_bcount, 277 rbp->b_lblkno - origblkno, 278 seqcount); 279 } 280 #endif 281 282 if ((rbp->b_flags & B_CLUSTER) == 0) 283 vfs_busy_pages(rbp, 0); 284 (void) VOP_STRATEGY(rbp); 285 curproc->p_stats->p_ru.ru_inblock++; 286 } 287 } 288 if (reqbp) 289 return (biowait(reqbp)); 290 else 291 return (error); 292 } 293 294 /* 295 * If blocks are contiguous on disk, use this to provide clustered 296 * read ahead. We will read as many blocks as possible sequentially 297 * and then parcel them up into logical blocks in the buffer hash table. 298 */ 299 static struct buf * 300 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 301 struct vnode *vp; 302 u_quad_t filesize; 303 daddr_t lbn; 304 daddr_t blkno; 305 long size; 306 int run; 307 struct buf *fbp; 308 { 309 struct buf *bp, *tbp; 310 daddr_t bn; 311 int i, inc, j; 312 313 #ifdef DIAGNOSTIC 314 if (size != vp->v_mount->mnt_stat.f_iosize) 315 panic("cluster_rbuild: size %d != filesize %d\n", 316 size, vp->v_mount->mnt_stat.f_iosize); 317 #endif 318 /* 319 * avoid a division 320 */ 321 while ((u_quad_t) size * (lbn + run) > filesize) { 322 --run; 323 } 324 325 if (fbp) { 326 tbp = fbp; 327 tbp->b_flags |= B_READ; 328 } else { 329 tbp = getblk(vp, lbn, size, 0, 0); 330 if (tbp->b_flags & B_CACHE) 331 return tbp; 332 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 333 } 334 335 tbp->b_blkno = blkno; 336 if( (tbp->b_flags & B_MALLOC) || 337 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 338 return tbp; 339 340 bp = trypbuf(); 341 if (bp == 0) 342 return tbp; 343 344 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 345 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO; 346 bp->b_iodone = cluster_callback; 347 bp->b_blkno = blkno; 348 bp->b_lblkno = lbn; 349 pbgetvp(vp, bp); 350 351 TAILQ_INIT(&bp->b_cluster.cluster_head); 352 353 bp->b_bcount = 0; 354 bp->b_bufsize = 0; 355 bp->b_npages = 0; 356 357 if (vp->v_maxio == 0) 358 vp->v_maxio = DFLTPHYS; 359 inc = btodb(size); 360 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 361 if (i != 0) { 362 if ((bp->b_npages * PAGE_SIZE) + 363 round_page(size) > vp->v_maxio) 364 break; 365 366 if (incore(vp, lbn + i)) 367 break; 368 369 tbp = getblk(vp, lbn + i, size, 0, 0); 370 371 if ((tbp->b_flags & B_CACHE) || 372 (tbp->b_flags & B_VMIO) == 0) { 373 bqrelse(tbp); 374 break; 375 } 376 377 for (j=0;j<tbp->b_npages;j++) { 378 if (tbp->b_pages[j]->valid) { 379 break; 380 } 381 } 382 383 if (j != tbp->b_npages) { 384 /* 385 * force buffer to be re-constituted later 386 */ 387 tbp->b_flags |= B_RELBUF; 388 brelse(tbp); 389 break; 390 } 391 392 if ((fbp && (i == 1)) || (i == (run - 1))) 393 tbp->b_flags |= B_RAM; 394 tbp->b_flags |= B_READ | B_ASYNC; 395 if (tbp->b_blkno == tbp->b_lblkno) { 396 tbp->b_blkno = bn; 397 } else if (tbp->b_blkno != bn) { 398 brelse(tbp); 399 break; 400 } 401 } 402 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 403 tbp, b_cluster.cluster_entry); 404 for (j = 0; j < tbp->b_npages; j += 1) { 405 vm_page_t m; 406 m = tbp->b_pages[j]; 407 ++m->busy; 408 ++m->object->paging_in_progress; 409 if ((bp->b_npages == 0) || 410 (bp->b_pages[bp->b_npages-1] != m)) { 411 bp->b_pages[bp->b_npages] = m; 412 bp->b_npages++; 413 } 414 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 415 tbp->b_pages[j] = bogus_page; 416 } 417 bp->b_bcount += tbp->b_bcount; 418 bp->b_bufsize += tbp->b_bufsize; 419 } 420 421 for(j=0;j<bp->b_npages;j++) { 422 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 423 VM_PAGE_BITS_ALL) 424 bp->b_pages[j] = bogus_page; 425 } 426 if (bp->b_bufsize > bp->b_kvasize) 427 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)\n", 428 bp->b_bufsize, bp->b_kvasize); 429 bp->b_kvasize = bp->b_bufsize; 430 431 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 432 (vm_page_t *)bp->b_pages, bp->b_npages); 433 return (bp); 434 } 435 436 /* 437 * Cleanup after a clustered read or write. 438 * This is complicated by the fact that any of the buffers might have 439 * extra memory (if there were no empty buffer headers at allocbuf time) 440 * that we will need to shift around. 441 */ 442 void 443 cluster_callback(bp) 444 struct buf *bp; 445 { 446 struct buf *nbp, *tbp; 447 int error = 0; 448 449 /* 450 * Must propogate errors to all the components. 451 */ 452 if (bp->b_flags & B_ERROR) 453 error = bp->b_error; 454 455 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 456 /* 457 * Move memory from the large cluster buffer into the component 458 * buffers and mark IO as done on these. 459 */ 460 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 461 tbp; tbp = nbp) { 462 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 463 if (error) { 464 tbp->b_flags |= B_ERROR; 465 tbp->b_error = error; 466 } else 467 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 468 biodone(tbp); 469 } 470 relpbuf(bp); 471 } 472 473 /* 474 * Do clustered write for FFS. 475 * 476 * Three cases: 477 * 1. Write is not sequential (write asynchronously) 478 * Write is sequential: 479 * 2. beginning of cluster - begin cluster 480 * 3. middle of a cluster - add to cluster 481 * 4. end of a cluster - asynchronously write cluster 482 */ 483 void 484 cluster_write(bp, filesize) 485 struct buf *bp; 486 u_quad_t filesize; 487 { 488 struct vnode *vp; 489 daddr_t lbn; 490 int maxclen, cursize; 491 int lblocksize; 492 int async; 493 494 vp = bp->b_vp; 495 if (vp->v_maxio == 0) 496 vp->v_maxio = DFLTPHYS; 497 if (vp->v_type == VREG) { 498 async = vp->v_mount->mnt_flag & MNT_ASYNC; 499 lblocksize = vp->v_mount->mnt_stat.f_iosize; 500 } else { 501 async = 0; 502 lblocksize = bp->b_bufsize; 503 } 504 lbn = bp->b_lblkno; 505 506 /* Initialize vnode to beginning of file. */ 507 if (lbn == 0) 508 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 509 510 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 511 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 512 maxclen = vp->v_maxio / lblocksize - 1; 513 if (vp->v_clen != 0) { 514 /* 515 * Next block is not sequential. 516 * 517 * If we are not writing at end of file, the process 518 * seeked to another point in the file since its last 519 * write, or we have reached our maximum cluster size, 520 * then push the previous cluster. Otherwise try 521 * reallocating to make it sequential. 522 */ 523 cursize = vp->v_lastw - vp->v_cstart + 1; 524 #ifndef notyet_block_reallocation_enabled 525 if (((u_quad_t)(lbn + 1) * lblocksize) != filesize || 526 lbn != vp->v_lastw + 1 || 527 vp->v_clen <= cursize) { 528 if (!async) 529 cluster_wbuild(vp, lblocksize, 530 vp->v_cstart, cursize); 531 } 532 #else 533 if ((lbn + 1) * lblocksize != filesize || 534 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 535 if (!async) 536 cluster_wbuild(vp, lblocksize, 537 vp->v_cstart, cursize); 538 } else { 539 struct buf **bpp, **endbp; 540 struct cluster_save *buflist; 541 542 buflist = cluster_collectbufs(vp, bp); 543 endbp = &buflist->bs_children 544 [buflist->bs_nchildren - 1]; 545 if (VOP_REALLOCBLKS(vp, buflist)) { 546 /* 547 * Failed, push the previous cluster. 548 */ 549 for (bpp = buflist->bs_children; 550 bpp < endbp; bpp++) 551 brelse(*bpp); 552 free(buflist, M_SEGMENT); 553 cluster_wbuild(vp, lblocksize, 554 vp->v_cstart, cursize); 555 } else { 556 /* 557 * Succeeded, keep building cluster. 558 */ 559 for (bpp = buflist->bs_children; 560 bpp <= endbp; bpp++) 561 bdwrite(*bpp); 562 free(buflist, M_SEGMENT); 563 vp->v_lastw = lbn; 564 vp->v_lasta = bp->b_blkno; 565 return; 566 } 567 } 568 #endif /* notyet_block_reallocation_enabled */ 569 } 570 /* 571 * Consider beginning a cluster. If at end of file, make 572 * cluster as large as possible, otherwise find size of 573 * existing cluster. 574 */ 575 if ((vp->v_type == VREG) && 576 ((u_quad_t) (lbn + 1) * lblocksize) != filesize && 577 (bp->b_blkno == bp->b_lblkno) && 578 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 579 bp->b_blkno == -1)) { 580 bawrite(bp); 581 vp->v_clen = 0; 582 vp->v_lasta = bp->b_blkno; 583 vp->v_cstart = lbn + 1; 584 vp->v_lastw = lbn; 585 return; 586 } 587 vp->v_clen = maxclen; 588 if (!async && maxclen == 0) { /* I/O not contiguous */ 589 vp->v_cstart = lbn + 1; 590 bawrite(bp); 591 } else { /* Wait for rest of cluster */ 592 vp->v_cstart = lbn; 593 bdwrite(bp); 594 } 595 } else if (lbn == vp->v_cstart + vp->v_clen) { 596 /* 597 * At end of cluster, write it out. 598 */ 599 bdwrite(bp); 600 cluster_wbuild(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 601 vp->v_clen = 0; 602 vp->v_cstart = lbn + 1; 603 } else 604 /* 605 * In the middle of a cluster, so just delay the I/O for now. 606 */ 607 bdwrite(bp); 608 vp->v_lastw = lbn; 609 vp->v_lasta = bp->b_blkno; 610 } 611 612 613 /* 614 * This is an awful lot like cluster_rbuild...wish they could be combined. 615 * The last lbn argument is the current block on which I/O is being 616 * performed. Check to see that it doesn't fall in the middle of 617 * the current block (if last_bp == NULL). 618 */ 619 int 620 cluster_wbuild(vp, size, start_lbn, len) 621 struct vnode *vp; 622 long size; 623 daddr_t start_lbn; 624 int len; 625 { 626 struct buf *bp, *tbp; 627 int i, j, s; 628 int totalwritten = 0; 629 int dbsize = btodb(size); 630 while (len > 0) { 631 s = splbio(); 632 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 633 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) { 634 ++start_lbn; 635 --len; 636 splx(s); 637 continue; 638 } 639 bremfree(tbp); 640 tbp->b_flags |= B_BUSY; 641 tbp->b_flags &= ~B_DONE; 642 splx(s); 643 644 /* 645 * Extra memory in the buffer, punt on this buffer. XXX we could 646 * handle this in most cases, but we would have to push the extra 647 * memory down to after our max possible cluster size and then 648 * potentially pull it back up if the cluster was terminated 649 * prematurely--too much hassle. 650 */ 651 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 652 (tbp->b_bcount != tbp->b_bufsize) || 653 (tbp->b_bcount != size) || 654 len == 1) { 655 totalwritten += tbp->b_bufsize; 656 bawrite(tbp); 657 ++start_lbn; 658 --len; 659 continue; 660 } 661 662 bp = trypbuf(); 663 if (bp == NULL) { 664 totalwritten += tbp->b_bufsize; 665 bawrite(tbp); 666 ++start_lbn; 667 --len; 668 continue; 669 } 670 671 TAILQ_INIT(&bp->b_cluster.cluster_head); 672 bp->b_bcount = 0; 673 bp->b_bufsize = 0; 674 bp->b_npages = 0; 675 if (tbp->b_wcred != NOCRED) { 676 bp->b_wcred = tbp->b_wcred; 677 crhold(bp->b_wcred); 678 } 679 680 bp->b_blkno = tbp->b_blkno; 681 bp->b_lblkno = tbp->b_lblkno; 682 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 683 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | 684 (tbp->b_flags & (B_VMIO|B_NEEDCOMMIT)); 685 bp->b_iodone = cluster_callback; 686 pbgetvp(vp, bp); 687 688 for (i = 0; i < len; ++i, ++start_lbn) { 689 if (i != 0) { 690 s = splbio(); 691 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 692 splx(s); 693 break; 694 } 695 696 if ((tbp->b_flags & (B_VMIO|B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI|B_NEEDCOMMIT)) != (B_DELWRI|B_CLUSTEROK|(bp->b_flags & (B_VMIO|B_NEEDCOMMIT)))) { 697 splx(s); 698 break; 699 } 700 701 if (tbp->b_wcred != bp->b_wcred) { 702 splx(s); 703 break; 704 } 705 706 if ((tbp->b_bcount != size) || 707 ((bp->b_blkno + dbsize * i) != tbp->b_blkno) || 708 ((tbp->b_npages + bp->b_npages) > (vp->v_maxio / PAGE_SIZE))) { 709 splx(s); 710 break; 711 } 712 bremfree(tbp); 713 tbp->b_flags |= B_BUSY; 714 tbp->b_flags &= ~B_DONE; 715 splx(s); 716 } 717 718 if (tbp->b_flags & B_VMIO) { 719 vm_page_t m; 720 721 if (i != 0) { 722 for (j = 0; j < tbp->b_npages; j += 1) { 723 m = tbp->b_pages[j]; 724 if (m->flags & PG_BUSY) 725 goto finishcluster; 726 } 727 } 728 729 for (j = 0; j < tbp->b_npages; j += 1) { 730 m = tbp->b_pages[j]; 731 ++m->busy; 732 ++m->object->paging_in_progress; 733 if ((bp->b_npages == 0) || 734 (bp->b_pages[bp->b_npages - 1] != m)) { 735 bp->b_pages[bp->b_npages] = m; 736 bp->b_npages++; 737 } 738 } 739 } 740 bp->b_bcount += size; 741 bp->b_bufsize += size; 742 743 --numdirtybuffers; 744 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 745 tbp->b_flags |= B_ASYNC; 746 s = splbio(); 747 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 748 ++tbp->b_vp->v_numoutput; 749 splx(s); 750 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 751 tbp, b_cluster.cluster_entry); 752 } 753 finishcluster: 754 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 755 (vm_page_t *) bp->b_pages, bp->b_npages); 756 if (bp->b_bufsize > bp->b_kvasize) 757 panic("cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n", 758 bp->b_bufsize, bp->b_kvasize); 759 bp->b_kvasize = bp->b_bufsize; 760 totalwritten += bp->b_bufsize; 761 bp->b_dirtyoff = 0; 762 bp->b_dirtyend = bp->b_bufsize; 763 bawrite(bp); 764 765 len -= i; 766 } 767 return totalwritten; 768 } 769 770 #ifdef notyet_block_reallocation_enabled 771 /* 772 * Collect together all the buffers in a cluster. 773 * Plus add one additional buffer. 774 */ 775 static struct cluster_save * 776 cluster_collectbufs(vp, last_bp) 777 struct vnode *vp; 778 struct buf *last_bp; 779 { 780 struct cluster_save *buflist; 781 daddr_t lbn; 782 int i, len; 783 784 len = vp->v_lastw - vp->v_cstart + 1; 785 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 786 M_SEGMENT, M_WAITOK); 787 buflist->bs_nchildren = 0; 788 buflist->bs_children = (struct buf **) (buflist + 1); 789 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 790 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 791 &buflist->bs_children[i]); 792 buflist->bs_children[i] = last_bp; 793 buflist->bs_nchildren = i + 1; 794 return (buflist); 795 } 796 #endif /* notyet_block_reallocation_enabled */ 797