1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_debug_cluster.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/proc.h> 47 #include <sys/bio.h> 48 #include <sys/buf.h> 49 #include <sys/vnode.h> 50 #include <sys/malloc.h> 51 #include <sys/mount.h> 52 #include <sys/resourcevar.h> 53 #include <sys/vmmeter.h> 54 #include <vm/vm.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <sys/sysctl.h> 58 59 #if defined(CLUSTERDEBUG) 60 static int rcluster= 0; 61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 62 "Debug VFS clustering code"); 63 #endif 64 65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 66 67 static struct cluster_save * 68 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 69 static struct buf * 70 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 71 daddr_t blkno, long size, int run, struct buf *fbp); 72 73 static int write_behind = 1; 74 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 75 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 76 77 static int read_max = 8; 78 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 79 "Cluster read-ahead max block count"); 80 81 /* Page expended to mark partially backed buffers */ 82 extern vm_page_t bogus_page; 83 84 /* 85 * Number of physical bufs (pbufs) this subsystem is allowed. 86 * Manipulated by vm_pager.c 87 */ 88 extern int cluster_pbuf_freecnt; 89 90 /* 91 * Read data to a buf, including read-ahead if we find this to be beneficial. 92 * cluster_read replaces bread. 93 */ 94 int 95 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 96 struct vnode *vp; 97 u_quad_t filesize; 98 daddr_t lblkno; 99 long size; 100 struct ucred *cred; 101 long totread; 102 int seqcount; 103 struct buf **bpp; 104 { 105 struct buf *bp, *rbp, *reqbp; 106 daddr_t blkno, origblkno; 107 int maxra, racluster; 108 int error, ncontig; 109 int i; 110 111 error = 0; 112 113 /* 114 * Try to limit the amount of read-ahead by a few 115 * ad-hoc parameters. This needs work!!! 116 */ 117 racluster = vp->v_mount->mnt_iosize_max / size; 118 maxra = seqcount; 119 maxra = min(read_max, maxra); 120 maxra = min(nbuf/8, maxra); 121 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 122 maxra = (filesize / size) - lblkno; 123 124 /* 125 * get the requested block 126 */ 127 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 128 origblkno = lblkno; 129 130 /* 131 * if it is in the cache, then check to see if the reads have been 132 * sequential. If they have, then try some read-ahead, otherwise 133 * back-off on prospective read-aheads. 134 */ 135 if (bp->b_flags & B_CACHE) { 136 if (!seqcount) { 137 return 0; 138 } else if ((bp->b_flags & B_RAM) == 0) { 139 return 0; 140 } else { 141 int s; 142 bp->b_flags &= ~B_RAM; 143 /* 144 * We do the spl here so that there is no window 145 * between the incore and the b_usecount increment 146 * below. We opt to keep the spl out of the loop 147 * for efficiency. 148 */ 149 s = splbio(); 150 VI_LOCK(vp); 151 for (i = 1; i < maxra; i++) { 152 /* 153 * Stop if the buffer does not exist or it 154 * is invalid (about to go away?) 155 */ 156 rbp = gbincore(vp, lblkno+i); 157 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 158 break; 159 160 /* 161 * Set another read-ahead mark so we know 162 * to check again. 163 */ 164 if (((i % racluster) == (racluster - 1)) || 165 (i == (maxra - 1))) 166 rbp->b_flags |= B_RAM; 167 } 168 VI_UNLOCK(vp); 169 splx(s); 170 if (i >= maxra) { 171 return 0; 172 } 173 lblkno += i; 174 } 175 reqbp = bp = NULL; 176 /* 177 * If it isn't in the cache, then get a chunk from 178 * disk if sequential, otherwise just get the block. 179 */ 180 } else { 181 off_t firstread = bp->b_offset; 182 int nblks; 183 184 KASSERT(bp->b_offset != NOOFFSET, 185 ("cluster_read: no buffer offset")); 186 187 ncontig = 0; 188 189 /* 190 * Compute the total number of blocks that we should read 191 * synchronously. 192 */ 193 if (firstread + totread > filesize) 194 totread = filesize - firstread; 195 nblks = howmany(totread, size); 196 if (nblks > racluster) 197 nblks = racluster; 198 199 /* 200 * Now compute the number of contiguous blocks. 201 */ 202 if (nblks > 1) { 203 error = VOP_BMAP(vp, lblkno, NULL, 204 &blkno, &ncontig, NULL); 205 /* 206 * If this failed to map just do the original block. 207 */ 208 if (error || blkno == -1) 209 ncontig = 0; 210 } 211 212 /* 213 * If we have contiguous data available do a cluster 214 * otherwise just read the requested block. 215 */ 216 if (ncontig) { 217 /* Account for our first block. */ 218 ncontig = min(ncontig + 1, nblks); 219 if (ncontig < nblks) 220 nblks = ncontig; 221 bp = cluster_rbuild(vp, filesize, lblkno, 222 blkno, size, nblks, bp); 223 lblkno += (bp->b_bufsize / size); 224 } else { 225 bp->b_flags |= B_RAM; 226 bp->b_iocmd = BIO_READ; 227 lblkno += 1; 228 } 229 } 230 231 /* 232 * handle the synchronous read so that it is available ASAP. 233 */ 234 if (bp) { 235 if ((bp->b_flags & B_CLUSTER) == 0) { 236 vfs_busy_pages(bp, 0); 237 } 238 bp->b_flags &= ~B_INVAL; 239 bp->b_ioflags &= ~BIO_ERROR; 240 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 241 BUF_KERNPROC(bp); 242 error = VOP_STRATEGY(vp, bp); 243 curproc->p_stats->p_ru.ru_inblock++; 244 if (error) 245 return (error); 246 } 247 248 /* 249 * If we have been doing sequential I/O, then do some read-ahead. 250 */ 251 while (lblkno < (origblkno + maxra)) { 252 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 253 if (error) 254 break; 255 256 if (blkno == -1) 257 break; 258 259 /* 260 * We could throttle ncontig here by maxra but we might as 261 * well read the data if it is contiguous. We're throttled 262 * by racluster anyway. 263 */ 264 if (ncontig) { 265 ncontig = min(ncontig + 1, racluster); 266 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 267 size, ncontig, NULL); 268 lblkno += (rbp->b_bufsize / size); 269 if (rbp->b_flags & B_DELWRI) { 270 bqrelse(rbp); 271 continue; 272 } 273 } else { 274 rbp = getblk(vp, lblkno, size, 0, 0, 0); 275 lblkno += 1; 276 if (rbp->b_flags & B_DELWRI) { 277 bqrelse(rbp); 278 continue; 279 } 280 rbp->b_flags |= B_ASYNC | B_RAM; 281 rbp->b_iocmd = BIO_READ; 282 rbp->b_blkno = blkno; 283 } 284 if (rbp->b_flags & B_CACHE) { 285 rbp->b_flags &= ~B_ASYNC; 286 bqrelse(rbp); 287 continue; 288 } 289 if ((rbp->b_flags & B_CLUSTER) == 0) { 290 vfs_busy_pages(rbp, 0); 291 } 292 rbp->b_flags &= ~B_INVAL; 293 rbp->b_ioflags &= ~BIO_ERROR; 294 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 295 BUF_KERNPROC(rbp); 296 (void) VOP_STRATEGY(vp, rbp); 297 curproc->p_stats->p_ru.ru_inblock++; 298 } 299 300 if (reqbp) 301 return (bufwait(reqbp)); 302 else 303 return (error); 304 } 305 306 /* 307 * If blocks are contiguous on disk, use this to provide clustered 308 * read ahead. We will read as many blocks as possible sequentially 309 * and then parcel them up into logical blocks in the buffer hash table. 310 */ 311 static struct buf * 312 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 313 struct vnode *vp; 314 u_quad_t filesize; 315 daddr_t lbn; 316 daddr_t blkno; 317 long size; 318 int run; 319 struct buf *fbp; 320 { 321 struct buf *bp, *tbp; 322 daddr_t bn; 323 int i, inc, j; 324 325 GIANT_REQUIRED; 326 327 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 328 ("cluster_rbuild: size %ld != filesize %ld\n", 329 size, vp->v_mount->mnt_stat.f_iosize)); 330 331 /* 332 * avoid a division 333 */ 334 while ((u_quad_t) size * (lbn + run) > filesize) { 335 --run; 336 } 337 338 if (fbp) { 339 tbp = fbp; 340 tbp->b_iocmd = BIO_READ; 341 } else { 342 tbp = getblk(vp, lbn, size, 0, 0, 0); 343 if (tbp->b_flags & B_CACHE) 344 return tbp; 345 tbp->b_flags |= B_ASYNC | B_RAM; 346 tbp->b_iocmd = BIO_READ; 347 } 348 349 tbp->b_blkno = blkno; 350 if( (tbp->b_flags & B_MALLOC) || 351 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 352 return tbp; 353 354 bp = trypbuf(&cluster_pbuf_freecnt); 355 if (bp == 0) 356 return tbp; 357 358 /* 359 * We are synthesizing a buffer out of vm_page_t's, but 360 * if the block size is not page aligned then the starting 361 * address may not be either. Inherit the b_data offset 362 * from the original buffer. 363 */ 364 bp->b_data = (char *)((vm_offset_t)bp->b_data | 365 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 366 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 367 bp->b_iocmd = BIO_READ; 368 bp->b_iodone = cluster_callback; 369 bp->b_blkno = blkno; 370 bp->b_lblkno = lbn; 371 bp->b_offset = tbp->b_offset; 372 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 373 pbgetvp(vp, bp); 374 375 TAILQ_INIT(&bp->b_cluster.cluster_head); 376 377 bp->b_bcount = 0; 378 bp->b_bufsize = 0; 379 bp->b_npages = 0; 380 381 inc = btodb(size); 382 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 383 if (i != 0) { 384 if ((bp->b_npages * PAGE_SIZE) + 385 round_page(size) > vp->v_mount->mnt_iosize_max) { 386 break; 387 } 388 389 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 390 391 /* Don't wait around for locked bufs. */ 392 if (tbp == NULL) 393 break; 394 395 /* 396 * Stop scanning if the buffer is fully valid 397 * (marked B_CACHE), or locked (may be doing a 398 * background write), or if the buffer is not 399 * VMIO backed. The clustering code can only deal 400 * with VMIO-backed buffers. 401 */ 402 VI_LOCK(bp->b_vp); 403 if ((tbp->b_vflags & BV_BKGRDINPROG) || 404 (tbp->b_flags & B_CACHE) || 405 (tbp->b_flags & B_VMIO) == 0) { 406 VI_UNLOCK(bp->b_vp); 407 bqrelse(tbp); 408 break; 409 } 410 VI_UNLOCK(bp->b_vp); 411 412 /* 413 * The buffer must be completely invalid in order to 414 * take part in the cluster. If it is partially valid 415 * then we stop. 416 */ 417 for (j = 0;j < tbp->b_npages; j++) { 418 if (tbp->b_pages[j]->valid) 419 break; 420 } 421 if (j != tbp->b_npages) { 422 bqrelse(tbp); 423 break; 424 } 425 426 /* 427 * Set a read-ahead mark as appropriate 428 */ 429 if ((fbp && (i == 1)) || (i == (run - 1))) 430 tbp->b_flags |= B_RAM; 431 432 /* 433 * Set the buffer up for an async read (XXX should 434 * we do this only if we do not wind up brelse()ing?). 435 * Set the block number if it isn't set, otherwise 436 * if it is make sure it matches the block number we 437 * expect. 438 */ 439 tbp->b_flags |= B_ASYNC; 440 tbp->b_iocmd = BIO_READ; 441 if (tbp->b_blkno == tbp->b_lblkno) { 442 tbp->b_blkno = bn; 443 } else if (tbp->b_blkno != bn) { 444 brelse(tbp); 445 break; 446 } 447 } 448 /* 449 * XXX fbp from caller may not be B_ASYNC, but we are going 450 * to biodone() it in cluster_callback() anyway 451 */ 452 BUF_KERNPROC(tbp); 453 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 454 tbp, b_cluster.cluster_entry); 455 if (tbp->b_object != NULL) 456 VM_OBJECT_LOCK(tbp->b_object); 457 vm_page_lock_queues(); 458 for (j = 0; j < tbp->b_npages; j += 1) { 459 vm_page_t m; 460 m = tbp->b_pages[j]; 461 vm_page_io_start(m); 462 vm_object_pip_add(m->object, 1); 463 if ((bp->b_npages == 0) || 464 (bp->b_pages[bp->b_npages-1] != m)) { 465 bp->b_pages[bp->b_npages] = m; 466 bp->b_npages++; 467 } 468 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 469 tbp->b_pages[j] = bogus_page; 470 } 471 vm_page_unlock_queues(); 472 if (tbp->b_object != NULL) 473 VM_OBJECT_UNLOCK(tbp->b_object); 474 /* 475 * XXX shouldn't this be += size for both, like in 476 * cluster_wbuild()? 477 * 478 * Don't inherit tbp->b_bufsize as it may be larger due to 479 * a non-page-aligned size. Instead just aggregate using 480 * 'size'. 481 */ 482 if (tbp->b_bcount != size) 483 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 484 if (tbp->b_bufsize != size) 485 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 486 bp->b_bcount += size; 487 bp->b_bufsize += size; 488 } 489 490 /* 491 * Fully valid pages in the cluster are already good and do not need 492 * to be re-read from disk. Replace the page with bogus_page 493 */ 494 for (j = 0; j < bp->b_npages; j++) { 495 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 496 VM_PAGE_BITS_ALL) { 497 bp->b_pages[j] = bogus_page; 498 } 499 } 500 if (bp->b_bufsize > bp->b_kvasize) 501 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 502 bp->b_bufsize, bp->b_kvasize); 503 bp->b_kvasize = bp->b_bufsize; 504 505 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 506 (vm_page_t *)bp->b_pages, bp->b_npages); 507 return (bp); 508 } 509 510 /* 511 * Cleanup after a clustered read or write. 512 * This is complicated by the fact that any of the buffers might have 513 * extra memory (if there were no empty buffer headers at allocbuf time) 514 * that we will need to shift around. 515 */ 516 void 517 cluster_callback(bp) 518 struct buf *bp; 519 { 520 struct buf *nbp, *tbp; 521 int error = 0; 522 523 GIANT_REQUIRED; 524 525 /* 526 * Must propogate errors to all the components. 527 */ 528 if (bp->b_ioflags & BIO_ERROR) 529 error = bp->b_error; 530 531 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 532 /* 533 * Move memory from the large cluster buffer into the component 534 * buffers and mark IO as done on these. 535 */ 536 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 537 tbp; tbp = nbp) { 538 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 539 if (error) { 540 tbp->b_ioflags |= BIO_ERROR; 541 tbp->b_error = error; 542 } else { 543 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 544 tbp->b_flags &= ~B_INVAL; 545 tbp->b_ioflags &= ~BIO_ERROR; 546 /* 547 * XXX the bdwrite()/bqrelse() issued during 548 * cluster building clears B_RELBUF (see bqrelse() 549 * comment). If direct I/O was specified, we have 550 * to restore it here to allow the buffer and VM 551 * to be freed. 552 */ 553 if (tbp->b_flags & B_DIRECT) 554 tbp->b_flags |= B_RELBUF; 555 } 556 bufdone(tbp); 557 } 558 relpbuf(bp, &cluster_pbuf_freecnt); 559 } 560 561 /* 562 * cluster_wbuild_wb: 563 * 564 * Implement modified write build for cluster. 565 * 566 * write_behind = 0 write behind disabled 567 * write_behind = 1 write behind normal (default) 568 * write_behind = 2 write behind backed-off 569 */ 570 571 static __inline int 572 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 573 { 574 int r = 0; 575 576 switch(write_behind) { 577 case 2: 578 if (start_lbn < len) 579 break; 580 start_lbn -= len; 581 /* FALLTHROUGH */ 582 case 1: 583 r = cluster_wbuild(vp, size, start_lbn, len); 584 /* FALLTHROUGH */ 585 default: 586 /* FALLTHROUGH */ 587 break; 588 } 589 return(r); 590 } 591 592 /* 593 * Do clustered write for FFS. 594 * 595 * Three cases: 596 * 1. Write is not sequential (write asynchronously) 597 * Write is sequential: 598 * 2. beginning of cluster - begin cluster 599 * 3. middle of a cluster - add to cluster 600 * 4. end of a cluster - asynchronously write cluster 601 */ 602 void 603 cluster_write(bp, filesize, seqcount) 604 struct buf *bp; 605 u_quad_t filesize; 606 int seqcount; 607 { 608 struct vnode *vp; 609 daddr_t lbn; 610 int maxclen, cursize; 611 int lblocksize; 612 int async; 613 614 vp = bp->b_vp; 615 if (vp->v_type == VREG) { 616 async = vp->v_mount->mnt_flag & MNT_ASYNC; 617 lblocksize = vp->v_mount->mnt_stat.f_iosize; 618 } else { 619 async = 0; 620 lblocksize = bp->b_bufsize; 621 } 622 lbn = bp->b_lblkno; 623 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 624 625 /* Initialize vnode to beginning of file. */ 626 if (lbn == 0) 627 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 628 629 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 630 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 631 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 632 if (vp->v_clen != 0) { 633 /* 634 * Next block is not sequential. 635 * 636 * If we are not writing at end of file, the process 637 * seeked to another point in the file since its last 638 * write, or we have reached our maximum cluster size, 639 * then push the previous cluster. Otherwise try 640 * reallocating to make it sequential. 641 * 642 * Change to algorithm: only push previous cluster if 643 * it was sequential from the point of view of the 644 * seqcount heuristic, otherwise leave the buffer 645 * intact so we can potentially optimize the I/O 646 * later on in the buf_daemon or update daemon 647 * flush. 648 */ 649 cursize = vp->v_lastw - vp->v_cstart + 1; 650 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 651 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 652 if (!async && seqcount > 0) { 653 cluster_wbuild_wb(vp, lblocksize, 654 vp->v_cstart, cursize); 655 } 656 } else { 657 struct buf **bpp, **endbp; 658 struct cluster_save *buflist; 659 660 buflist = cluster_collectbufs(vp, bp); 661 endbp = &buflist->bs_children 662 [buflist->bs_nchildren - 1]; 663 if (VOP_REALLOCBLKS(vp, buflist)) { 664 /* 665 * Failed, push the previous cluster 666 * if *really* writing sequentially 667 * in the logical file (seqcount > 1), 668 * otherwise delay it in the hopes that 669 * the low level disk driver can 670 * optimize the write ordering. 671 */ 672 for (bpp = buflist->bs_children; 673 bpp < endbp; bpp++) 674 brelse(*bpp); 675 free(buflist, M_SEGMENT); 676 if (seqcount > 1) { 677 cluster_wbuild_wb(vp, 678 lblocksize, vp->v_cstart, 679 cursize); 680 } 681 } else { 682 /* 683 * Succeeded, keep building cluster. 684 */ 685 for (bpp = buflist->bs_children; 686 bpp <= endbp; bpp++) 687 bdwrite(*bpp); 688 free(buflist, M_SEGMENT); 689 vp->v_lastw = lbn; 690 vp->v_lasta = bp->b_blkno; 691 return; 692 } 693 } 694 } 695 /* 696 * Consider beginning a cluster. If at end of file, make 697 * cluster as large as possible, otherwise find size of 698 * existing cluster. 699 */ 700 if ((vp->v_type == VREG) && 701 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 702 (bp->b_blkno == bp->b_lblkno) && 703 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 704 bp->b_blkno == -1)) { 705 bawrite(bp); 706 vp->v_clen = 0; 707 vp->v_lasta = bp->b_blkno; 708 vp->v_cstart = lbn + 1; 709 vp->v_lastw = lbn; 710 return; 711 } 712 vp->v_clen = maxclen; 713 if (!async && maxclen == 0) { /* I/O not contiguous */ 714 vp->v_cstart = lbn + 1; 715 bawrite(bp); 716 } else { /* Wait for rest of cluster */ 717 vp->v_cstart = lbn; 718 bdwrite(bp); 719 } 720 } else if (lbn == vp->v_cstart + vp->v_clen) { 721 /* 722 * At end of cluster, write it out if seqcount tells us we 723 * are operating sequentially, otherwise let the buf or 724 * update daemon handle it. 725 */ 726 bdwrite(bp); 727 if (seqcount > 1) 728 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 729 vp->v_clen = 0; 730 vp->v_cstart = lbn + 1; 731 } else if (vm_page_count_severe()) { 732 /* 733 * We are low on memory, get it going NOW 734 */ 735 bawrite(bp); 736 } else { 737 /* 738 * In the middle of a cluster, so just delay the I/O for now. 739 */ 740 bdwrite(bp); 741 } 742 vp->v_lastw = lbn; 743 vp->v_lasta = bp->b_blkno; 744 } 745 746 747 /* 748 * This is an awful lot like cluster_rbuild...wish they could be combined. 749 * The last lbn argument is the current block on which I/O is being 750 * performed. Check to see that it doesn't fall in the middle of 751 * the current block (if last_bp == NULL). 752 */ 753 int 754 cluster_wbuild(vp, size, start_lbn, len) 755 struct vnode *vp; 756 long size; 757 daddr_t start_lbn; 758 int len; 759 { 760 struct buf *bp, *tbp; 761 int i, j, s; 762 int totalwritten = 0; 763 int dbsize = btodb(size); 764 765 GIANT_REQUIRED; 766 767 while (len > 0) { 768 s = splbio(); 769 /* 770 * If the buffer is not delayed-write (i.e. dirty), or it 771 * is delayed-write but either locked or inval, it cannot 772 * partake in the clustered write. 773 */ 774 VI_LOCK(vp); 775 if ((tbp = gbincore(vp, start_lbn)) == NULL || 776 (tbp->b_vflags & BV_BKGRDINPROG)) { 777 VI_UNLOCK(vp); 778 ++start_lbn; 779 --len; 780 splx(s); 781 continue; 782 } 783 if (BUF_LOCK(tbp, 784 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 785 ++start_lbn; 786 --len; 787 splx(s); 788 continue; 789 } 790 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 791 BUF_UNLOCK(tbp); 792 ++start_lbn; 793 --len; 794 splx(s); 795 continue; 796 } 797 bremfree(tbp); 798 tbp->b_flags &= ~B_DONE; 799 splx(s); 800 801 /* 802 * Extra memory in the buffer, punt on this buffer. 803 * XXX we could handle this in most cases, but we would 804 * have to push the extra memory down to after our max 805 * possible cluster size and then potentially pull it back 806 * up if the cluster was terminated prematurely--too much 807 * hassle. 808 */ 809 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 810 (B_CLUSTEROK | B_VMIO)) || 811 (tbp->b_bcount != tbp->b_bufsize) || 812 (tbp->b_bcount != size) || 813 (len == 1) || 814 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 815 totalwritten += tbp->b_bufsize; 816 bawrite(tbp); 817 ++start_lbn; 818 --len; 819 continue; 820 } 821 822 /* 823 * We got a pbuf to make the cluster in. 824 * so initialise it. 825 */ 826 TAILQ_INIT(&bp->b_cluster.cluster_head); 827 bp->b_bcount = 0; 828 bp->b_magic = tbp->b_magic; 829 bp->b_op = tbp->b_op; 830 bp->b_bufsize = 0; 831 bp->b_npages = 0; 832 if (tbp->b_wcred != NOCRED) 833 bp->b_wcred = crhold(tbp->b_wcred); 834 835 bp->b_blkno = tbp->b_blkno; 836 bp->b_lblkno = tbp->b_lblkno; 837 bp->b_offset = tbp->b_offset; 838 839 /* 840 * We are synthesizing a buffer out of vm_page_t's, but 841 * if the block size is not page aligned then the starting 842 * address may not be either. Inherit the b_data offset 843 * from the original buffer. 844 */ 845 bp->b_data = (char *)((vm_offset_t)bp->b_data | 846 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 847 bp->b_flags |= B_CLUSTER | 848 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 849 bp->b_iodone = cluster_callback; 850 pbgetvp(vp, bp); 851 /* 852 * From this location in the file, scan forward to see 853 * if there are buffers with adjacent data that need to 854 * be written as well. 855 */ 856 for (i = 0; i < len; ++i, ++start_lbn) { 857 if (i != 0) { /* If not the first buffer */ 858 s = splbio(); 859 /* 860 * If the adjacent data is not even in core it 861 * can't need to be written. 862 */ 863 VI_LOCK(vp); 864 if ((tbp = gbincore(vp, start_lbn)) == NULL || 865 (tbp->b_vflags & BV_BKGRDINPROG)) { 866 VI_UNLOCK(vp); 867 splx(s); 868 break; 869 } 870 871 /* 872 * If it IS in core, but has different 873 * characteristics, or is locked (which 874 * means it could be undergoing a background 875 * I/O or be in a weird state), then don't 876 * cluster with it. 877 */ 878 if (BUF_LOCK(tbp, 879 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 880 VI_MTX(vp))) { 881 splx(s); 882 break; 883 } 884 885 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 886 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 887 != (B_DELWRI | B_CLUSTEROK | 888 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 889 tbp->b_wcred != bp->b_wcred) { 890 BUF_UNLOCK(tbp); 891 splx(s); 892 break; 893 } 894 895 /* 896 * Check that the combined cluster 897 * would make sense with regard to pages 898 * and would not be too large 899 */ 900 if ((tbp->b_bcount != size) || 901 ((bp->b_blkno + (dbsize * i)) != 902 tbp->b_blkno) || 903 ((tbp->b_npages + bp->b_npages) > 904 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 905 BUF_UNLOCK(tbp); 906 splx(s); 907 break; 908 } 909 /* 910 * Ok, it's passed all the tests, 911 * so remove it from the free list 912 * and mark it busy. We will use it. 913 */ 914 bremfree(tbp); 915 tbp->b_flags &= ~B_DONE; 916 splx(s); 917 } /* end of code for non-first buffers only */ 918 /* check for latent dependencies to be handled */ 919 if ((LIST_FIRST(&tbp->b_dep)) != NULL) { 920 tbp->b_iocmd = BIO_WRITE; 921 buf_start(tbp); 922 } 923 /* 924 * If the IO is via the VM then we do some 925 * special VM hackery (yuck). Since the buffer's 926 * block size may not be page-aligned it is possible 927 * for a page to be shared between two buffers. We 928 * have to get rid of the duplication when building 929 * the cluster. 930 */ 931 if (tbp->b_flags & B_VMIO) { 932 vm_page_t m; 933 934 if (i != 0) { /* if not first buffer */ 935 for (j = 0; j < tbp->b_npages; j += 1) { 936 m = tbp->b_pages[j]; 937 if (m->flags & PG_BUSY) { 938 bqrelse(tbp); 939 goto finishcluster; 940 } 941 } 942 } 943 if (tbp->b_object != NULL) 944 VM_OBJECT_LOCK(tbp->b_object); 945 vm_page_lock_queues(); 946 for (j = 0; j < tbp->b_npages; j += 1) { 947 m = tbp->b_pages[j]; 948 vm_page_io_start(m); 949 vm_object_pip_add(m->object, 1); 950 if ((bp->b_npages == 0) || 951 (bp->b_pages[bp->b_npages - 1] != m)) { 952 bp->b_pages[bp->b_npages] = m; 953 bp->b_npages++; 954 } 955 } 956 vm_page_unlock_queues(); 957 if (tbp->b_object != NULL) 958 VM_OBJECT_UNLOCK(tbp->b_object); 959 } 960 bp->b_bcount += size; 961 bp->b_bufsize += size; 962 963 s = splbio(); 964 bundirty(tbp); 965 tbp->b_flags &= ~B_DONE; 966 tbp->b_ioflags &= ~BIO_ERROR; 967 tbp->b_flags |= B_ASYNC; 968 tbp->b_iocmd = BIO_WRITE; 969 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 970 VI_LOCK(tbp->b_vp); 971 ++tbp->b_vp->v_numoutput; 972 VI_UNLOCK(tbp->b_vp); 973 splx(s); 974 BUF_KERNPROC(tbp); 975 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 976 tbp, b_cluster.cluster_entry); 977 } 978 finishcluster: 979 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 980 (vm_page_t *) bp->b_pages, bp->b_npages); 981 if (bp->b_bufsize > bp->b_kvasize) 982 panic( 983 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 984 bp->b_bufsize, bp->b_kvasize); 985 bp->b_kvasize = bp->b_bufsize; 986 totalwritten += bp->b_bufsize; 987 bp->b_dirtyoff = 0; 988 bp->b_dirtyend = bp->b_bufsize; 989 bawrite(bp); 990 991 len -= i; 992 } 993 return totalwritten; 994 } 995 996 /* 997 * Collect together all the buffers in a cluster. 998 * Plus add one additional buffer. 999 */ 1000 static struct cluster_save * 1001 cluster_collectbufs(vp, last_bp) 1002 struct vnode *vp; 1003 struct buf *last_bp; 1004 { 1005 struct cluster_save *buflist; 1006 struct buf *bp; 1007 daddr_t lbn; 1008 int i, len; 1009 1010 len = vp->v_lastw - vp->v_cstart + 1; 1011 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1012 M_SEGMENT, M_WAITOK); 1013 buflist->bs_nchildren = 0; 1014 buflist->bs_children = (struct buf **) (buflist + 1); 1015 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1016 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 1017 buflist->bs_children[i] = bp; 1018 if (bp->b_blkno == bp->b_lblkno) 1019 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1020 NULL, NULL); 1021 } 1022 buflist->bs_children[i] = bp = last_bp; 1023 if (bp->b_blkno == bp->b_lblkno) 1024 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1025 NULL, NULL); 1026 buflist->bs_nchildren = i + 1; 1027 return (buflist); 1028 } 1029