1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_debug_cluster.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/proc.h> 43 #include <sys/bio.h> 44 #include <sys/buf.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/mount.h> 48 #include <sys/resourcevar.h> 49 #include <sys/vmmeter.h> 50 #include <vm/vm.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_page.h> 53 #include <sys/sysctl.h> 54 55 #if defined(CLUSTERDEBUG) 56 static int rcluster= 0; 57 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 58 "Debug VFS clustering code"); 59 #endif 60 61 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 62 63 static struct cluster_save * 64 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 65 static struct buf * 66 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 67 daddr_t blkno, long size, int run, struct buf *fbp); 68 static void cluster_callback(struct buf *); 69 70 static int write_behind = 1; 71 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 72 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 73 74 static int read_max = 8; 75 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 76 "Cluster read-ahead max block count"); 77 78 /* Page expended to mark partially backed buffers */ 79 extern vm_page_t bogus_page; 80 81 /* 82 * Number of physical bufs (pbufs) this subsystem is allowed. 83 * Manipulated by vm_pager.c 84 */ 85 extern int cluster_pbuf_freecnt; 86 87 /* 88 * Read data to a buf, including read-ahead if we find this to be beneficial. 89 * cluster_read replaces bread. 90 */ 91 int 92 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 93 struct vnode *vp; 94 u_quad_t filesize; 95 daddr_t lblkno; 96 long size; 97 struct ucred *cred; 98 long totread; 99 int seqcount; 100 struct buf **bpp; 101 { 102 struct buf *bp, *rbp, *reqbp; 103 daddr_t blkno, origblkno; 104 int maxra, racluster; 105 int error, ncontig; 106 int i; 107 108 error = 0; 109 110 /* 111 * Try to limit the amount of read-ahead by a few 112 * ad-hoc parameters. This needs work!!! 113 */ 114 racluster = vp->v_mount->mnt_iosize_max / size; 115 maxra = seqcount; 116 maxra = min(read_max, maxra); 117 maxra = min(nbuf/8, maxra); 118 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 119 maxra = (filesize / size) - lblkno; 120 121 /* 122 * get the requested block 123 */ 124 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 125 origblkno = lblkno; 126 127 /* 128 * if it is in the cache, then check to see if the reads have been 129 * sequential. If they have, then try some read-ahead, otherwise 130 * back-off on prospective read-aheads. 131 */ 132 if (bp->b_flags & B_CACHE) { 133 if (!seqcount) { 134 return 0; 135 } else if ((bp->b_flags & B_RAM) == 0) { 136 return 0; 137 } else { 138 int s; 139 bp->b_flags &= ~B_RAM; 140 /* 141 * We do the spl here so that there is no window 142 * between the incore and the b_usecount increment 143 * below. We opt to keep the spl out of the loop 144 * for efficiency. 145 */ 146 s = splbio(); 147 VI_LOCK(vp); 148 for (i = 1; i < maxra; i++) { 149 /* 150 * Stop if the buffer does not exist or it 151 * is invalid (about to go away?) 152 */ 153 rbp = gbincore(&vp->v_bufobj, lblkno+i); 154 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 155 break; 156 157 /* 158 * Set another read-ahead mark so we know 159 * to check again. 160 */ 161 if (((i % racluster) == (racluster - 1)) || 162 (i == (maxra - 1))) 163 rbp->b_flags |= B_RAM; 164 } 165 VI_UNLOCK(vp); 166 splx(s); 167 if (i >= maxra) { 168 return 0; 169 } 170 lblkno += i; 171 } 172 reqbp = bp = NULL; 173 /* 174 * If it isn't in the cache, then get a chunk from 175 * disk if sequential, otherwise just get the block. 176 */ 177 } else { 178 off_t firstread = bp->b_offset; 179 int nblks; 180 181 KASSERT(bp->b_offset != NOOFFSET, 182 ("cluster_read: no buffer offset")); 183 184 ncontig = 0; 185 186 /* 187 * Compute the total number of blocks that we should read 188 * synchronously. 189 */ 190 if (firstread + totread > filesize) 191 totread = filesize - firstread; 192 nblks = howmany(totread, size); 193 if (nblks > racluster) 194 nblks = racluster; 195 196 /* 197 * Now compute the number of contiguous blocks. 198 */ 199 if (nblks > 1) { 200 error = VOP_BMAP(vp, lblkno, NULL, 201 &blkno, &ncontig, NULL); 202 /* 203 * If this failed to map just do the original block. 204 */ 205 if (error || blkno == -1) 206 ncontig = 0; 207 } 208 209 /* 210 * If we have contiguous data available do a cluster 211 * otherwise just read the requested block. 212 */ 213 if (ncontig) { 214 /* Account for our first block. */ 215 ncontig = min(ncontig + 1, nblks); 216 if (ncontig < nblks) 217 nblks = ncontig; 218 bp = cluster_rbuild(vp, filesize, lblkno, 219 blkno, size, nblks, bp); 220 lblkno += (bp->b_bufsize / size); 221 } else { 222 bp->b_flags |= B_RAM; 223 bp->b_iocmd = BIO_READ; 224 lblkno += 1; 225 } 226 } 227 228 /* 229 * handle the synchronous read so that it is available ASAP. 230 */ 231 if (bp) { 232 if ((bp->b_flags & B_CLUSTER) == 0) { 233 vfs_busy_pages(bp, 0); 234 } 235 bp->b_flags &= ~B_INVAL; 236 bp->b_ioflags &= ~BIO_ERROR; 237 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 238 BUF_KERNPROC(bp); 239 bp->b_iooffset = dbtob(bp->b_blkno); 240 bstrategy(bp); 241 curproc->p_stats->p_ru.ru_inblock++; 242 } 243 244 /* 245 * If we have been doing sequential I/O, then do some read-ahead. 246 */ 247 while (lblkno < (origblkno + maxra)) { 248 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 249 if (error) 250 break; 251 252 if (blkno == -1) 253 break; 254 255 /* 256 * We could throttle ncontig here by maxra but we might as 257 * well read the data if it is contiguous. We're throttled 258 * by racluster anyway. 259 */ 260 if (ncontig) { 261 ncontig = min(ncontig + 1, racluster); 262 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 263 size, ncontig, NULL); 264 lblkno += (rbp->b_bufsize / size); 265 if (rbp->b_flags & B_DELWRI) { 266 bqrelse(rbp); 267 continue; 268 } 269 } else { 270 rbp = getblk(vp, lblkno, size, 0, 0, 0); 271 lblkno += 1; 272 if (rbp->b_flags & B_DELWRI) { 273 bqrelse(rbp); 274 continue; 275 } 276 rbp->b_flags |= B_ASYNC | B_RAM; 277 rbp->b_iocmd = BIO_READ; 278 rbp->b_blkno = blkno; 279 } 280 if (rbp->b_flags & B_CACHE) { 281 rbp->b_flags &= ~B_ASYNC; 282 bqrelse(rbp); 283 continue; 284 } 285 if ((rbp->b_flags & B_CLUSTER) == 0) { 286 vfs_busy_pages(rbp, 0); 287 } 288 rbp->b_flags &= ~B_INVAL; 289 rbp->b_ioflags &= ~BIO_ERROR; 290 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 291 BUF_KERNPROC(rbp); 292 rbp->b_iooffset = dbtob(rbp->b_blkno); 293 bstrategy(rbp); 294 curproc->p_stats->p_ru.ru_inblock++; 295 } 296 297 if (reqbp) 298 return (bufwait(reqbp)); 299 else 300 return (error); 301 } 302 303 /* 304 * If blocks are contiguous on disk, use this to provide clustered 305 * read ahead. We will read as many blocks as possible sequentially 306 * and then parcel them up into logical blocks in the buffer hash table. 307 */ 308 static struct buf * 309 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 310 struct vnode *vp; 311 u_quad_t filesize; 312 daddr_t lbn; 313 daddr_t blkno; 314 long size; 315 int run; 316 struct buf *fbp; 317 { 318 struct buf *bp, *tbp; 319 daddr_t bn; 320 int i, inc, j; 321 322 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 323 ("cluster_rbuild: size %ld != filesize %jd\n", 324 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize)); 325 326 /* 327 * avoid a division 328 */ 329 while ((u_quad_t) size * (lbn + run) > filesize) { 330 --run; 331 } 332 333 if (fbp) { 334 tbp = fbp; 335 tbp->b_iocmd = BIO_READ; 336 } else { 337 tbp = getblk(vp, lbn, size, 0, 0, 0); 338 if (tbp->b_flags & B_CACHE) 339 return tbp; 340 tbp->b_flags |= B_ASYNC | B_RAM; 341 tbp->b_iocmd = BIO_READ; 342 } 343 344 tbp->b_blkno = blkno; 345 if( (tbp->b_flags & B_MALLOC) || 346 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 347 return tbp; 348 349 bp = trypbuf(&cluster_pbuf_freecnt); 350 if (bp == 0) 351 return tbp; 352 353 /* 354 * We are synthesizing a buffer out of vm_page_t's, but 355 * if the block size is not page aligned then the starting 356 * address may not be either. Inherit the b_data offset 357 * from the original buffer. 358 */ 359 bp->b_data = (char *)((vm_offset_t)bp->b_data | 360 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 361 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 362 bp->b_iocmd = BIO_READ; 363 bp->b_iodone = cluster_callback; 364 bp->b_blkno = blkno; 365 bp->b_lblkno = lbn; 366 bp->b_offset = tbp->b_offset; 367 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 368 pbgetvp(vp, bp); 369 370 TAILQ_INIT(&bp->b_cluster.cluster_head); 371 372 bp->b_bcount = 0; 373 bp->b_bufsize = 0; 374 bp->b_npages = 0; 375 376 inc = btodb(size); 377 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 378 if (i != 0) { 379 if ((bp->b_npages * PAGE_SIZE) + 380 round_page(size) > vp->v_mount->mnt_iosize_max) { 381 break; 382 } 383 384 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 385 386 /* Don't wait around for locked bufs. */ 387 if (tbp == NULL) 388 break; 389 390 /* 391 * Stop scanning if the buffer is fully valid 392 * (marked B_CACHE), or locked (may be doing a 393 * background write), or if the buffer is not 394 * VMIO backed. The clustering code can only deal 395 * with VMIO-backed buffers. 396 */ 397 VI_LOCK(vp); 398 if ((tbp->b_vflags & BV_BKGRDINPROG) || 399 (tbp->b_flags & B_CACHE) || 400 (tbp->b_flags & B_VMIO) == 0) { 401 VI_UNLOCK(vp); 402 bqrelse(tbp); 403 break; 404 } 405 VI_UNLOCK(vp); 406 407 /* 408 * The buffer must be completely invalid in order to 409 * take part in the cluster. If it is partially valid 410 * then we stop. 411 */ 412 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object); 413 for (j = 0;j < tbp->b_npages; j++) { 414 VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object, 415 MA_OWNED); 416 if (tbp->b_pages[j]->valid) 417 break; 418 } 419 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object); 420 if (j != tbp->b_npages) { 421 bqrelse(tbp); 422 break; 423 } 424 425 /* 426 * Set a read-ahead mark as appropriate 427 */ 428 if ((fbp && (i == 1)) || (i == (run - 1))) 429 tbp->b_flags |= B_RAM; 430 431 /* 432 * Set the buffer up for an async read (XXX should 433 * we do this only if we do not wind up brelse()ing?). 434 * Set the block number if it isn't set, otherwise 435 * if it is make sure it matches the block number we 436 * expect. 437 */ 438 tbp->b_flags |= B_ASYNC; 439 tbp->b_iocmd = BIO_READ; 440 if (tbp->b_blkno == tbp->b_lblkno) { 441 tbp->b_blkno = bn; 442 } else if (tbp->b_blkno != bn) { 443 brelse(tbp); 444 break; 445 } 446 } 447 /* 448 * XXX fbp from caller may not be B_ASYNC, but we are going 449 * to biodone() it in cluster_callback() anyway 450 */ 451 BUF_KERNPROC(tbp); 452 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 453 tbp, b_cluster.cluster_entry); 454 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object); 455 for (j = 0; j < tbp->b_npages; j += 1) { 456 vm_page_t m; 457 m = tbp->b_pages[j]; 458 vm_page_io_start(m); 459 vm_object_pip_add(m->object, 1); 460 if ((bp->b_npages == 0) || 461 (bp->b_pages[bp->b_npages-1] != m)) { 462 bp->b_pages[bp->b_npages] = m; 463 bp->b_npages++; 464 } 465 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 466 tbp->b_pages[j] = bogus_page; 467 } 468 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object); 469 /* 470 * XXX shouldn't this be += size for both, like in 471 * cluster_wbuild()? 472 * 473 * Don't inherit tbp->b_bufsize as it may be larger due to 474 * a non-page-aligned size. Instead just aggregate using 475 * 'size'. 476 */ 477 if (tbp->b_bcount != size) 478 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 479 if (tbp->b_bufsize != size) 480 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 481 bp->b_bcount += size; 482 bp->b_bufsize += size; 483 } 484 485 /* 486 * Fully valid pages in the cluster are already good and do not need 487 * to be re-read from disk. Replace the page with bogus_page 488 */ 489 VM_OBJECT_LOCK(bp->b_bufobj->bo_object); 490 for (j = 0; j < bp->b_npages; j++) { 491 VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED); 492 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 493 VM_PAGE_BITS_ALL) { 494 bp->b_pages[j] = bogus_page; 495 } 496 } 497 VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); 498 if (bp->b_bufsize > bp->b_kvasize) 499 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 500 bp->b_bufsize, bp->b_kvasize); 501 bp->b_kvasize = bp->b_bufsize; 502 503 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 504 (vm_page_t *)bp->b_pages, bp->b_npages); 505 return (bp); 506 } 507 508 /* 509 * Cleanup after a clustered read or write. 510 * This is complicated by the fact that any of the buffers might have 511 * extra memory (if there were no empty buffer headers at allocbuf time) 512 * that we will need to shift around. 513 */ 514 static void 515 cluster_callback(bp) 516 struct buf *bp; 517 { 518 struct buf *nbp, *tbp; 519 int error = 0; 520 521 /* 522 * Must propogate errors to all the components. 523 */ 524 if (bp->b_ioflags & BIO_ERROR) 525 error = bp->b_error; 526 527 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 528 /* 529 * Move memory from the large cluster buffer into the component 530 * buffers and mark IO as done on these. 531 */ 532 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 533 tbp; tbp = nbp) { 534 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 535 if (error) { 536 tbp->b_ioflags |= BIO_ERROR; 537 tbp->b_error = error; 538 } else { 539 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 540 tbp->b_flags &= ~B_INVAL; 541 tbp->b_ioflags &= ~BIO_ERROR; 542 /* 543 * XXX the bdwrite()/bqrelse() issued during 544 * cluster building clears B_RELBUF (see bqrelse() 545 * comment). If direct I/O was specified, we have 546 * to restore it here to allow the buffer and VM 547 * to be freed. 548 */ 549 if (tbp->b_flags & B_DIRECT) 550 tbp->b_flags |= B_RELBUF; 551 } 552 bufdone(tbp); 553 } 554 pbrelvp(bp); 555 relpbuf(bp, &cluster_pbuf_freecnt); 556 } 557 558 /* 559 * cluster_wbuild_wb: 560 * 561 * Implement modified write build for cluster. 562 * 563 * write_behind = 0 write behind disabled 564 * write_behind = 1 write behind normal (default) 565 * write_behind = 2 write behind backed-off 566 */ 567 568 static __inline int 569 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 570 { 571 int r = 0; 572 573 switch(write_behind) { 574 case 2: 575 if (start_lbn < len) 576 break; 577 start_lbn -= len; 578 /* FALLTHROUGH */ 579 case 1: 580 r = cluster_wbuild(vp, size, start_lbn, len); 581 /* FALLTHROUGH */ 582 default: 583 /* FALLTHROUGH */ 584 break; 585 } 586 return(r); 587 } 588 589 /* 590 * Do clustered write for FFS. 591 * 592 * Three cases: 593 * 1. Write is not sequential (write asynchronously) 594 * Write is sequential: 595 * 2. beginning of cluster - begin cluster 596 * 3. middle of a cluster - add to cluster 597 * 4. end of a cluster - asynchronously write cluster 598 */ 599 void 600 cluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount) 601 { 602 daddr_t lbn; 603 int maxclen, cursize; 604 int lblocksize; 605 int async; 606 607 if (vp->v_type == VREG) { 608 async = vp->v_mount->mnt_flag & MNT_ASYNC; 609 lblocksize = vp->v_mount->mnt_stat.f_iosize; 610 } else { 611 async = 0; 612 lblocksize = bp->b_bufsize; 613 } 614 lbn = bp->b_lblkno; 615 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 616 617 /* Initialize vnode to beginning of file. */ 618 if (lbn == 0) 619 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 620 621 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 622 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 623 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 624 if (vp->v_clen != 0) { 625 /* 626 * Next block is not sequential. 627 * 628 * If we are not writing at end of file, the process 629 * seeked to another point in the file since its last 630 * write, or we have reached our maximum cluster size, 631 * then push the previous cluster. Otherwise try 632 * reallocating to make it sequential. 633 * 634 * Change to algorithm: only push previous cluster if 635 * it was sequential from the point of view of the 636 * seqcount heuristic, otherwise leave the buffer 637 * intact so we can potentially optimize the I/O 638 * later on in the buf_daemon or update daemon 639 * flush. 640 */ 641 cursize = vp->v_lastw - vp->v_cstart + 1; 642 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 643 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 644 if (!async && seqcount > 0) { 645 cluster_wbuild_wb(vp, lblocksize, 646 vp->v_cstart, cursize); 647 } 648 } else { 649 struct buf **bpp, **endbp; 650 struct cluster_save *buflist; 651 652 buflist = cluster_collectbufs(vp, bp); 653 endbp = &buflist->bs_children 654 [buflist->bs_nchildren - 1]; 655 if (VOP_REALLOCBLKS(vp, buflist)) { 656 /* 657 * Failed, push the previous cluster 658 * if *really* writing sequentially 659 * in the logical file (seqcount > 1), 660 * otherwise delay it in the hopes that 661 * the low level disk driver can 662 * optimize the write ordering. 663 */ 664 for (bpp = buflist->bs_children; 665 bpp < endbp; bpp++) 666 brelse(*bpp); 667 free(buflist, M_SEGMENT); 668 if (seqcount > 1) { 669 cluster_wbuild_wb(vp, 670 lblocksize, vp->v_cstart, 671 cursize); 672 } 673 } else { 674 /* 675 * Succeeded, keep building cluster. 676 */ 677 for (bpp = buflist->bs_children; 678 bpp <= endbp; bpp++) 679 bdwrite(*bpp); 680 free(buflist, M_SEGMENT); 681 vp->v_lastw = lbn; 682 vp->v_lasta = bp->b_blkno; 683 return; 684 } 685 } 686 } 687 /* 688 * Consider beginning a cluster. If at end of file, make 689 * cluster as large as possible, otherwise find size of 690 * existing cluster. 691 */ 692 if ((vp->v_type == VREG) && 693 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 694 (bp->b_blkno == bp->b_lblkno) && 695 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 696 bp->b_blkno == -1)) { 697 bawrite(bp); 698 vp->v_clen = 0; 699 vp->v_lasta = bp->b_blkno; 700 vp->v_cstart = lbn + 1; 701 vp->v_lastw = lbn; 702 return; 703 } 704 vp->v_clen = maxclen; 705 if (!async && maxclen == 0) { /* I/O not contiguous */ 706 vp->v_cstart = lbn + 1; 707 bawrite(bp); 708 } else { /* Wait for rest of cluster */ 709 vp->v_cstart = lbn; 710 bdwrite(bp); 711 } 712 } else if (lbn == vp->v_cstart + vp->v_clen) { 713 /* 714 * At end of cluster, write it out if seqcount tells us we 715 * are operating sequentially, otherwise let the buf or 716 * update daemon handle it. 717 */ 718 bdwrite(bp); 719 if (seqcount > 1) 720 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 721 vp->v_clen = 0; 722 vp->v_cstart = lbn + 1; 723 } else if (vm_page_count_severe()) { 724 /* 725 * We are low on memory, get it going NOW 726 */ 727 bawrite(bp); 728 } else { 729 /* 730 * In the middle of a cluster, so just delay the I/O for now. 731 */ 732 bdwrite(bp); 733 } 734 vp->v_lastw = lbn; 735 vp->v_lasta = bp->b_blkno; 736 } 737 738 739 /* 740 * This is an awful lot like cluster_rbuild...wish they could be combined. 741 * The last lbn argument is the current block on which I/O is being 742 * performed. Check to see that it doesn't fall in the middle of 743 * the current block (if last_bp == NULL). 744 */ 745 int 746 cluster_wbuild(vp, size, start_lbn, len) 747 struct vnode *vp; 748 long size; 749 daddr_t start_lbn; 750 int len; 751 { 752 struct buf *bp, *tbp; 753 int i, j, s; 754 int totalwritten = 0; 755 int dbsize = btodb(size); 756 757 while (len > 0) { 758 s = splbio(); 759 /* 760 * If the buffer is not delayed-write (i.e. dirty), or it 761 * is delayed-write but either locked or inval, it cannot 762 * partake in the clustered write. 763 */ 764 VI_LOCK(vp); 765 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL || 766 (tbp->b_vflags & BV_BKGRDINPROG)) { 767 VI_UNLOCK(vp); 768 ++start_lbn; 769 --len; 770 splx(s); 771 continue; 772 } 773 if (BUF_LOCK(tbp, 774 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 775 ++start_lbn; 776 --len; 777 splx(s); 778 continue; 779 } 780 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 781 BUF_UNLOCK(tbp); 782 ++start_lbn; 783 --len; 784 splx(s); 785 continue; 786 } 787 bremfree(tbp); 788 tbp->b_flags &= ~B_DONE; 789 splx(s); 790 791 /* 792 * Extra memory in the buffer, punt on this buffer. 793 * XXX we could handle this in most cases, but we would 794 * have to push the extra memory down to after our max 795 * possible cluster size and then potentially pull it back 796 * up if the cluster was terminated prematurely--too much 797 * hassle. 798 */ 799 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 800 (B_CLUSTEROK | B_VMIO)) || 801 (tbp->b_bcount != tbp->b_bufsize) || 802 (tbp->b_bcount != size) || 803 (len == 1) || 804 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 805 totalwritten += tbp->b_bufsize; 806 bawrite(tbp); 807 ++start_lbn; 808 --len; 809 continue; 810 } 811 812 /* 813 * We got a pbuf to make the cluster in. 814 * so initialise it. 815 */ 816 TAILQ_INIT(&bp->b_cluster.cluster_head); 817 bp->b_bcount = 0; 818 bp->b_bufsize = 0; 819 bp->b_npages = 0; 820 if (tbp->b_wcred != NOCRED) 821 bp->b_wcred = crhold(tbp->b_wcred); 822 823 bp->b_blkno = tbp->b_blkno; 824 bp->b_lblkno = tbp->b_lblkno; 825 bp->b_offset = tbp->b_offset; 826 827 /* 828 * We are synthesizing a buffer out of vm_page_t's, but 829 * if the block size is not page aligned then the starting 830 * address may not be either. Inherit the b_data offset 831 * from the original buffer. 832 */ 833 bp->b_data = (char *)((vm_offset_t)bp->b_data | 834 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 835 bp->b_flags |= B_CLUSTER | 836 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 837 bp->b_iodone = cluster_callback; 838 pbgetvp(vp, bp); 839 /* 840 * From this location in the file, scan forward to see 841 * if there are buffers with adjacent data that need to 842 * be written as well. 843 */ 844 for (i = 0; i < len; ++i, ++start_lbn) { 845 if (i != 0) { /* If not the first buffer */ 846 s = splbio(); 847 /* 848 * If the adjacent data is not even in core it 849 * can't need to be written. 850 */ 851 VI_LOCK(vp); 852 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL || 853 (tbp->b_vflags & BV_BKGRDINPROG)) { 854 VI_UNLOCK(vp); 855 splx(s); 856 break; 857 } 858 859 /* 860 * If it IS in core, but has different 861 * characteristics, or is locked (which 862 * means it could be undergoing a background 863 * I/O or be in a weird state), then don't 864 * cluster with it. 865 */ 866 if (BUF_LOCK(tbp, 867 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 868 VI_MTX(vp))) { 869 splx(s); 870 break; 871 } 872 873 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 874 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 875 != (B_DELWRI | B_CLUSTEROK | 876 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 877 tbp->b_wcred != bp->b_wcred) { 878 BUF_UNLOCK(tbp); 879 splx(s); 880 break; 881 } 882 883 /* 884 * Check that the combined cluster 885 * would make sense with regard to pages 886 * and would not be too large 887 */ 888 if ((tbp->b_bcount != size) || 889 ((bp->b_blkno + (dbsize * i)) != 890 tbp->b_blkno) || 891 ((tbp->b_npages + bp->b_npages) > 892 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 893 BUF_UNLOCK(tbp); 894 splx(s); 895 break; 896 } 897 /* 898 * Ok, it's passed all the tests, 899 * so remove it from the free list 900 * and mark it busy. We will use it. 901 */ 902 bremfree(tbp); 903 tbp->b_flags &= ~B_DONE; 904 splx(s); 905 } /* end of code for non-first buffers only */ 906 /* check for latent dependencies to be handled */ 907 if ((LIST_FIRST(&tbp->b_dep)) != NULL) { 908 tbp->b_iocmd = BIO_WRITE; 909 buf_start(tbp); 910 } 911 /* 912 * If the IO is via the VM then we do some 913 * special VM hackery (yuck). Since the buffer's 914 * block size may not be page-aligned it is possible 915 * for a page to be shared between two buffers. We 916 * have to get rid of the duplication when building 917 * the cluster. 918 */ 919 if (tbp->b_flags & B_VMIO) { 920 vm_page_t m; 921 922 VM_OBJECT_LOCK(tbp->b_bufobj->bo_object); 923 if (i != 0) { /* if not first buffer */ 924 for (j = 0; j < tbp->b_npages; j += 1) { 925 m = tbp->b_pages[j]; 926 if (m->flags & PG_BUSY) { 927 VM_OBJECT_UNLOCK( 928 tbp->b_object); 929 bqrelse(tbp); 930 goto finishcluster; 931 } 932 } 933 } 934 for (j = 0; j < tbp->b_npages; j += 1) { 935 m = tbp->b_pages[j]; 936 vm_page_io_start(m); 937 vm_object_pip_add(m->object, 1); 938 if ((bp->b_npages == 0) || 939 (bp->b_pages[bp->b_npages - 1] != m)) { 940 bp->b_pages[bp->b_npages] = m; 941 bp->b_npages++; 942 } 943 } 944 VM_OBJECT_UNLOCK(tbp->b_bufobj->bo_object); 945 } 946 bp->b_bcount += size; 947 bp->b_bufsize += size; 948 949 s = splbio(); 950 bundirty(tbp); 951 tbp->b_flags &= ~B_DONE; 952 tbp->b_ioflags &= ~BIO_ERROR; 953 tbp->b_flags |= B_ASYNC; 954 tbp->b_iocmd = BIO_WRITE; 955 reassignbuf(tbp); /* put on clean list */ 956 bufobj_wref(tbp->b_bufobj); 957 splx(s); 958 BUF_KERNPROC(tbp); 959 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 960 tbp, b_cluster.cluster_entry); 961 } 962 finishcluster: 963 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 964 (vm_page_t *) bp->b_pages, bp->b_npages); 965 if (bp->b_bufsize > bp->b_kvasize) 966 panic( 967 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 968 bp->b_bufsize, bp->b_kvasize); 969 bp->b_kvasize = bp->b_bufsize; 970 totalwritten += bp->b_bufsize; 971 bp->b_dirtyoff = 0; 972 bp->b_dirtyend = bp->b_bufsize; 973 bawrite(bp); 974 975 len -= i; 976 } 977 return totalwritten; 978 } 979 980 /* 981 * Collect together all the buffers in a cluster. 982 * Plus add one additional buffer. 983 */ 984 static struct cluster_save * 985 cluster_collectbufs(vp, last_bp) 986 struct vnode *vp; 987 struct buf *last_bp; 988 { 989 struct cluster_save *buflist; 990 struct buf *bp; 991 daddr_t lbn; 992 int i, len; 993 994 len = vp->v_lastw - vp->v_cstart + 1; 995 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 996 M_SEGMENT, M_WAITOK); 997 buflist->bs_nchildren = 0; 998 buflist->bs_children = (struct buf **) (buflist + 1); 999 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1000 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 1001 buflist->bs_children[i] = bp; 1002 if (bp->b_blkno == bp->b_lblkno) 1003 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, 1004 NULL, NULL); 1005 } 1006 buflist->bs_children[i] = bp = last_bp; 1007 if (bp->b_blkno == bp->b_lblkno) 1008 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1009 buflist->bs_nchildren = i + 1; 1010 return (buflist); 1011 } 1012