1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD$ 37 */ 38 39 #include "opt_debug_cluster.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/stdint.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/bio.h> 47 #include <sys/buf.h> 48 #include <sys/vnode.h> 49 #include <sys/malloc.h> 50 #include <sys/mount.h> 51 #include <sys/resourcevar.h> 52 #include <sys/vmmeter.h> 53 #include <vm/vm.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_page.h> 56 #include <sys/sysctl.h> 57 58 #if defined(CLUSTERDEBUG) 59 #include <sys/sysctl.h> 60 static int rcluster= 0; 61 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 62 "Debug VFS clustering code"); 63 #endif 64 65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 66 67 static struct cluster_save * 68 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 69 static struct buf * 70 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 71 daddr_t blkno, long size, int run, struct buf *fbp); 72 73 static int write_behind = 1; 74 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 75 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 76 77 /* Page expended to mark partially backed buffers */ 78 extern vm_page_t bogus_page; 79 80 /* 81 * Number of physical bufs (pbufs) this subsystem is allowed. 82 * Manipulated by vm_pager.c 83 */ 84 extern int cluster_pbuf_freecnt; 85 86 /* 87 * Maximum number of blocks for read-ahead. 88 */ 89 #define MAXRA 32 90 91 /* 92 * Read data to a buf, including read-ahead if we find this to be beneficial. 93 * cluster_read replaces bread. 94 */ 95 int 96 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 97 struct vnode *vp; 98 u_quad_t filesize; 99 daddr_t lblkno; 100 long size; 101 struct ucred *cred; 102 long totread; 103 int seqcount; 104 struct buf **bpp; 105 { 106 struct buf *bp, *rbp, *reqbp; 107 daddr_t blkno, origblkno; 108 int error, num_ra; 109 int i; 110 int maxra, racluster; 111 long origtotread; 112 113 error = 0; 114 115 /* 116 * Try to limit the amount of read-ahead by a few 117 * ad-hoc parameters. This needs work!!! 118 */ 119 racluster = vp->v_mount->mnt_iosize_max / size; 120 maxra = 2 * racluster + (totread / size); 121 if (maxra > MAXRA) 122 maxra = MAXRA; 123 if (maxra > nbuf/8) 124 maxra = nbuf/8; 125 126 /* 127 * get the requested block 128 */ 129 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 130 origblkno = lblkno; 131 origtotread = totread; 132 133 /* 134 * if it is in the cache, then check to see if the reads have been 135 * sequential. If they have, then try some read-ahead, otherwise 136 * back-off on prospective read-aheads. 137 */ 138 if (bp->b_flags & B_CACHE) { 139 if (!seqcount) { 140 return 0; 141 } else if ((bp->b_flags & B_RAM) == 0) { 142 return 0; 143 } else { 144 int s; 145 struct buf *tbp; 146 bp->b_flags &= ~B_RAM; 147 /* 148 * We do the spl here so that there is no window 149 * between the incore and the b_usecount increment 150 * below. We opt to keep the spl out of the loop 151 * for efficiency. 152 */ 153 s = splbio(); 154 for (i = 1; i < maxra; i++) { 155 /* 156 * Stop if the buffer does not exist or it 157 * is invalid (about to go away?) 158 */ 159 tbp = gbincore(vp, lblkno+i); 160 if (tbp == NULL || (tbp->b_flags & B_INVAL)) 161 break; 162 163 /* 164 * Set another read-ahead mark so we know 165 * to check again. 166 */ 167 if (((i % racluster) == (racluster - 1)) || 168 (i == (maxra - 1))) 169 tbp->b_flags |= B_RAM; 170 } 171 splx(s); 172 if (i >= maxra) { 173 return 0; 174 } 175 lblkno += i; 176 } 177 reqbp = bp = NULL; 178 } else { 179 off_t firstread = bp->b_offset; 180 181 KASSERT(bp->b_offset != NOOFFSET, 182 ("cluster_read: no buffer offset")); 183 if (firstread + totread > filesize) 184 totread = filesize - firstread; 185 if (totread > size) { 186 int nblks = 0; 187 int ncontigafter; 188 while (totread > 0) { 189 nblks++; 190 totread -= size; 191 } 192 if (nblks == 1) 193 goto single_block_read; 194 if (nblks > racluster) 195 nblks = racluster; 196 197 error = VOP_BMAP(vp, lblkno, NULL, 198 &blkno, &ncontigafter, NULL); 199 if (error) 200 goto single_block_read; 201 if (blkno == -1) 202 goto single_block_read; 203 if (ncontigafter == 0) 204 goto single_block_read; 205 if (ncontigafter + 1 < nblks) 206 nblks = ncontigafter + 1; 207 208 bp = cluster_rbuild(vp, filesize, lblkno, 209 blkno, size, nblks, bp); 210 lblkno += (bp->b_bufsize / size); 211 } else { 212 single_block_read: 213 /* 214 * if it isn't in the cache, then get a chunk from 215 * disk if sequential, otherwise just get the block. 216 */ 217 bp->b_flags |= B_RAM; 218 bp->b_iocmd = BIO_READ; 219 lblkno += 1; 220 } 221 } 222 223 /* 224 * if we have been doing sequential I/O, then do some read-ahead 225 */ 226 rbp = NULL; 227 if (seqcount && (lblkno < (origblkno + seqcount))) { 228 /* 229 * we now build the read-ahead buffer if it is desirable. 230 */ 231 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 232 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 233 blkno != -1) { 234 int nblksread; 235 int ntoread = num_ra + 1; 236 nblksread = (origtotread + size - 1) / size; 237 if (seqcount < nblksread) 238 seqcount = nblksread; 239 if (seqcount < ntoread) 240 ntoread = seqcount; 241 if (num_ra) { 242 rbp = cluster_rbuild(vp, filesize, lblkno, 243 blkno, size, ntoread, NULL); 244 } else { 245 rbp = getblk(vp, lblkno, size, 0, 0); 246 rbp->b_flags |= B_ASYNC | B_RAM; 247 rbp->b_iocmd = BIO_READ; 248 rbp->b_blkno = blkno; 249 } 250 } 251 } 252 253 /* 254 * handle the synchronous read 255 */ 256 if (bp) { 257 #if defined(CLUSTERDEBUG) 258 if (rcluster) 259 printf("S(%ld,%ld,%d) ", 260 (long)bp->b_lblkno, bp->b_bcount, seqcount); 261 #endif 262 if ((bp->b_flags & B_CLUSTER) == 0) { 263 vfs_busy_pages(bp, 0); 264 } 265 bp->b_flags &= ~B_INVAL; 266 bp->b_ioflags &= ~BIO_ERROR; 267 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 268 BUF_KERNPROC(bp); 269 error = VOP_STRATEGY(vp, bp); 270 curproc->p_stats->p_ru.ru_inblock++; 271 } 272 273 /* 274 * and if we have read-aheads, do them too 275 */ 276 if (rbp) { 277 if (error) { 278 rbp->b_flags &= ~B_ASYNC; 279 brelse(rbp); 280 } else if (rbp->b_flags & B_CACHE) { 281 rbp->b_flags &= ~B_ASYNC; 282 bqrelse(rbp); 283 } else { 284 #if defined(CLUSTERDEBUG) 285 if (rcluster) { 286 if (bp) 287 printf("A+"); 288 else 289 printf("A"); 290 printf("(%lld,%ld,%lld,%d) ", 291 (intmax_t)rbp->b_lblkno, rbp->b_bcount, 292 (intmax_t)(rbp->b_lblkno - origblkno), 293 seqcount); 294 } 295 #endif 296 297 if ((rbp->b_flags & B_CLUSTER) == 0) { 298 vfs_busy_pages(rbp, 0); 299 } 300 rbp->b_flags &= ~B_INVAL; 301 rbp->b_ioflags &= ~BIO_ERROR; 302 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 303 BUF_KERNPROC(rbp); 304 (void) VOP_STRATEGY(vp, rbp); 305 curproc->p_stats->p_ru.ru_inblock++; 306 } 307 } 308 if (reqbp) 309 return (bufwait(reqbp)); 310 else 311 return (error); 312 } 313 314 /* 315 * If blocks are contiguous on disk, use this to provide clustered 316 * read ahead. We will read as many blocks as possible sequentially 317 * and then parcel them up into logical blocks in the buffer hash table. 318 */ 319 static struct buf * 320 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 321 struct vnode *vp; 322 u_quad_t filesize; 323 daddr_t lbn; 324 daddr_t blkno; 325 long size; 326 int run; 327 struct buf *fbp; 328 { 329 struct buf *bp, *tbp; 330 daddr_t bn; 331 int i, inc, j; 332 333 GIANT_REQUIRED; 334 335 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 336 ("cluster_rbuild: size %ld != filesize %ld\n", 337 size, vp->v_mount->mnt_stat.f_iosize)); 338 339 /* 340 * avoid a division 341 */ 342 while ((u_quad_t) size * (lbn + run) > filesize) { 343 --run; 344 } 345 346 if (fbp) { 347 tbp = fbp; 348 tbp->b_iocmd = BIO_READ; 349 } else { 350 tbp = getblk(vp, lbn, size, 0, 0); 351 if (tbp->b_flags & B_CACHE) 352 return tbp; 353 tbp->b_flags |= B_ASYNC | B_RAM; 354 tbp->b_iocmd = BIO_READ; 355 } 356 357 tbp->b_blkno = blkno; 358 if( (tbp->b_flags & B_MALLOC) || 359 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 360 return tbp; 361 362 bp = trypbuf(&cluster_pbuf_freecnt); 363 if (bp == 0) 364 return tbp; 365 366 /* 367 * We are synthesizing a buffer out of vm_page_t's, but 368 * if the block size is not page aligned then the starting 369 * address may not be either. Inherit the b_data offset 370 * from the original buffer. 371 */ 372 bp->b_data = (char *)((vm_offset_t)bp->b_data | 373 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 374 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 375 bp->b_iocmd = BIO_READ; 376 bp->b_iodone = cluster_callback; 377 bp->b_blkno = blkno; 378 bp->b_lblkno = lbn; 379 bp->b_offset = tbp->b_offset; 380 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 381 pbgetvp(vp, bp); 382 383 TAILQ_INIT(&bp->b_cluster.cluster_head); 384 385 bp->b_bcount = 0; 386 bp->b_bufsize = 0; 387 bp->b_npages = 0; 388 389 inc = btodb(size); 390 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 391 if (i != 0) { 392 if ((bp->b_npages * PAGE_SIZE) + 393 round_page(size) > vp->v_mount->mnt_iosize_max) { 394 break; 395 } 396 397 /* 398 * Shortcut some checks and try to avoid buffers that 399 * would block in the lock. The same checks have to 400 * be made again after we officially get the buffer. 401 */ 402 if ((tbp = incore(vp, lbn + i)) != NULL && 403 (tbp->b_flags & B_INVAL) == 0) { 404 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 405 break; 406 BUF_UNLOCK(tbp); 407 408 for (j = 0; j < tbp->b_npages; j++) { 409 if (tbp->b_pages[j]->valid) 410 break; 411 } 412 413 if (j != tbp->b_npages) 414 break; 415 416 if (tbp->b_bcount != size) 417 break; 418 } 419 420 tbp = getblk(vp, lbn + i, size, 0, 0); 421 422 /* 423 * Stop scanning if the buffer is fully valid 424 * (marked B_CACHE), or locked (may be doing a 425 * background write), or if the buffer is not 426 * VMIO backed. The clustering code can only deal 427 * with VMIO-backed buffers. 428 */ 429 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 430 (tbp->b_flags & B_VMIO) == 0) { 431 bqrelse(tbp); 432 break; 433 } 434 435 /* 436 * The buffer must be completely invalid in order to 437 * take part in the cluster. If it is partially valid 438 * then we stop. 439 */ 440 for (j = 0;j < tbp->b_npages; j++) { 441 if (tbp->b_pages[j]->valid) 442 break; 443 } 444 if (j != tbp->b_npages) { 445 bqrelse(tbp); 446 break; 447 } 448 449 /* 450 * Set a read-ahead mark as appropriate 451 */ 452 if ((fbp && (i == 1)) || (i == (run - 1))) 453 tbp->b_flags |= B_RAM; 454 455 /* 456 * Set the buffer up for an async read (XXX should 457 * we do this only if we do not wind up brelse()ing?). 458 * Set the block number if it isn't set, otherwise 459 * if it is make sure it matches the block number we 460 * expect. 461 */ 462 tbp->b_flags |= B_ASYNC; 463 tbp->b_iocmd = BIO_READ; 464 if (tbp->b_blkno == tbp->b_lblkno) { 465 tbp->b_blkno = bn; 466 } else if (tbp->b_blkno != bn) { 467 brelse(tbp); 468 break; 469 } 470 } 471 /* 472 * XXX fbp from caller may not be B_ASYNC, but we are going 473 * to biodone() it in cluster_callback() anyway 474 */ 475 BUF_KERNPROC(tbp); 476 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 477 tbp, b_cluster.cluster_entry); 478 for (j = 0; j < tbp->b_npages; j += 1) { 479 vm_page_t m; 480 m = tbp->b_pages[j]; 481 vm_page_io_start(m); 482 vm_object_pip_add(m->object, 1); 483 if ((bp->b_npages == 0) || 484 (bp->b_pages[bp->b_npages-1] != m)) { 485 bp->b_pages[bp->b_npages] = m; 486 bp->b_npages++; 487 } 488 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 489 tbp->b_pages[j] = bogus_page; 490 } 491 /* 492 * XXX shouldn't this be += size for both, like in 493 * cluster_wbuild()? 494 * 495 * Don't inherit tbp->b_bufsize as it may be larger due to 496 * a non-page-aligned size. Instead just aggregate using 497 * 'size'. 498 */ 499 if (tbp->b_bcount != size) 500 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 501 if (tbp->b_bufsize != size) 502 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 503 bp->b_bcount += size; 504 bp->b_bufsize += size; 505 } 506 507 /* 508 * Fully valid pages in the cluster are already good and do not need 509 * to be re-read from disk. Replace the page with bogus_page 510 */ 511 for (j = 0; j < bp->b_npages; j++) { 512 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 513 VM_PAGE_BITS_ALL) { 514 bp->b_pages[j] = bogus_page; 515 } 516 } 517 if (bp->b_bufsize > bp->b_kvasize) 518 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 519 bp->b_bufsize, bp->b_kvasize); 520 bp->b_kvasize = bp->b_bufsize; 521 522 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 523 (vm_page_t *)bp->b_pages, bp->b_npages); 524 return (bp); 525 } 526 527 /* 528 * Cleanup after a clustered read or write. 529 * This is complicated by the fact that any of the buffers might have 530 * extra memory (if there were no empty buffer headers at allocbuf time) 531 * that we will need to shift around. 532 */ 533 void 534 cluster_callback(bp) 535 struct buf *bp; 536 { 537 struct buf *nbp, *tbp; 538 int error = 0; 539 540 GIANT_REQUIRED; 541 542 /* 543 * Must propogate errors to all the components. 544 */ 545 if (bp->b_ioflags & BIO_ERROR) 546 error = bp->b_error; 547 548 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 549 /* 550 * Move memory from the large cluster buffer into the component 551 * buffers and mark IO as done on these. 552 */ 553 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 554 tbp; tbp = nbp) { 555 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 556 if (error) { 557 tbp->b_ioflags |= BIO_ERROR; 558 tbp->b_error = error; 559 } else { 560 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 561 tbp->b_flags &= ~B_INVAL; 562 tbp->b_ioflags &= ~BIO_ERROR; 563 /* 564 * XXX the bdwrite()/bqrelse() issued during 565 * cluster building clears B_RELBUF (see bqrelse() 566 * comment). If direct I/O was specified, we have 567 * to restore it here to allow the buffer and VM 568 * to be freed. 569 */ 570 if (tbp->b_flags & B_DIRECT) 571 tbp->b_flags |= B_RELBUF; 572 } 573 bufdone(tbp); 574 } 575 relpbuf(bp, &cluster_pbuf_freecnt); 576 } 577 578 /* 579 * cluster_wbuild_wb: 580 * 581 * Implement modified write build for cluster. 582 * 583 * write_behind = 0 write behind disabled 584 * write_behind = 1 write behind normal (default) 585 * write_behind = 2 write behind backed-off 586 */ 587 588 static __inline int 589 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 590 { 591 int r = 0; 592 593 switch(write_behind) { 594 case 2: 595 if (start_lbn < len) 596 break; 597 start_lbn -= len; 598 /* fall through */ 599 case 1: 600 r = cluster_wbuild(vp, size, start_lbn, len); 601 /* fall through */ 602 default: 603 /* fall through */ 604 break; 605 } 606 return(r); 607 } 608 609 /* 610 * Do clustered write for FFS. 611 * 612 * Three cases: 613 * 1. Write is not sequential (write asynchronously) 614 * Write is sequential: 615 * 2. beginning of cluster - begin cluster 616 * 3. middle of a cluster - add to cluster 617 * 4. end of a cluster - asynchronously write cluster 618 */ 619 void 620 cluster_write(bp, filesize, seqcount) 621 struct buf *bp; 622 u_quad_t filesize; 623 int seqcount; 624 { 625 struct vnode *vp; 626 daddr_t lbn; 627 int maxclen, cursize; 628 int lblocksize; 629 int async; 630 631 vp = bp->b_vp; 632 if (vp->v_type == VREG) { 633 async = vp->v_mount->mnt_flag & MNT_ASYNC; 634 lblocksize = vp->v_mount->mnt_stat.f_iosize; 635 } else { 636 async = 0; 637 lblocksize = bp->b_bufsize; 638 } 639 lbn = bp->b_lblkno; 640 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 641 642 /* Initialize vnode to beginning of file. */ 643 if (lbn == 0) 644 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 645 646 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 647 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 648 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 649 if (vp->v_clen != 0) { 650 /* 651 * Next block is not sequential. 652 * 653 * If we are not writing at end of file, the process 654 * seeked to another point in the file since its last 655 * write, or we have reached our maximum cluster size, 656 * then push the previous cluster. Otherwise try 657 * reallocating to make it sequential. 658 * 659 * Change to algorithm: only push previous cluster if 660 * it was sequential from the point of view of the 661 * seqcount heuristic, otherwise leave the buffer 662 * intact so we can potentially optimize the I/O 663 * later on in the buf_daemon or update daemon 664 * flush. 665 */ 666 cursize = vp->v_lastw - vp->v_cstart + 1; 667 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 668 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 669 if (!async && seqcount > 0) { 670 cluster_wbuild_wb(vp, lblocksize, 671 vp->v_cstart, cursize); 672 } 673 } else { 674 struct buf **bpp, **endbp; 675 struct cluster_save *buflist; 676 677 buflist = cluster_collectbufs(vp, bp); 678 endbp = &buflist->bs_children 679 [buflist->bs_nchildren - 1]; 680 if (VOP_REALLOCBLKS(vp, buflist)) { 681 /* 682 * Failed, push the previous cluster 683 * if *really* writing sequentially 684 * in the logical file (seqcount > 1), 685 * otherwise delay it in the hopes that 686 * the low level disk driver can 687 * optimize the write ordering. 688 */ 689 for (bpp = buflist->bs_children; 690 bpp < endbp; bpp++) 691 brelse(*bpp); 692 free(buflist, M_SEGMENT); 693 if (seqcount > 1) { 694 cluster_wbuild_wb(vp, 695 lblocksize, vp->v_cstart, 696 cursize); 697 } 698 } else { 699 /* 700 * Succeeded, keep building cluster. 701 */ 702 for (bpp = buflist->bs_children; 703 bpp <= endbp; bpp++) 704 bdwrite(*bpp); 705 free(buflist, M_SEGMENT); 706 vp->v_lastw = lbn; 707 vp->v_lasta = bp->b_blkno; 708 return; 709 } 710 } 711 } 712 /* 713 * Consider beginning a cluster. If at end of file, make 714 * cluster as large as possible, otherwise find size of 715 * existing cluster. 716 */ 717 if ((vp->v_type == VREG) && 718 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 719 (bp->b_blkno == bp->b_lblkno) && 720 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 721 bp->b_blkno == -1)) { 722 bawrite(bp); 723 vp->v_clen = 0; 724 vp->v_lasta = bp->b_blkno; 725 vp->v_cstart = lbn + 1; 726 vp->v_lastw = lbn; 727 return; 728 } 729 vp->v_clen = maxclen; 730 if (!async && maxclen == 0) { /* I/O not contiguous */ 731 vp->v_cstart = lbn + 1; 732 bawrite(bp); 733 } else { /* Wait for rest of cluster */ 734 vp->v_cstart = lbn; 735 bdwrite(bp); 736 } 737 } else if (lbn == vp->v_cstart + vp->v_clen) { 738 /* 739 * At end of cluster, write it out if seqcount tells us we 740 * are operating sequentially, otherwise let the buf or 741 * update daemon handle it. 742 */ 743 bdwrite(bp); 744 if (seqcount > 1) 745 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 746 vp->v_clen = 0; 747 vp->v_cstart = lbn + 1; 748 } else if (vm_page_count_severe()) { 749 /* 750 * We are low on memory, get it going NOW 751 */ 752 bawrite(bp); 753 } else { 754 /* 755 * In the middle of a cluster, so just delay the I/O for now. 756 */ 757 bdwrite(bp); 758 } 759 vp->v_lastw = lbn; 760 vp->v_lasta = bp->b_blkno; 761 } 762 763 764 /* 765 * This is an awful lot like cluster_rbuild...wish they could be combined. 766 * The last lbn argument is the current block on which I/O is being 767 * performed. Check to see that it doesn't fall in the middle of 768 * the current block (if last_bp == NULL). 769 */ 770 int 771 cluster_wbuild(vp, size, start_lbn, len) 772 struct vnode *vp; 773 long size; 774 daddr_t start_lbn; 775 int len; 776 { 777 struct buf *bp, *tbp; 778 int i, j, s; 779 int totalwritten = 0; 780 int dbsize = btodb(size); 781 782 GIANT_REQUIRED; 783 784 while (len > 0) { 785 s = splbio(); 786 /* 787 * If the buffer is not delayed-write (i.e. dirty), or it 788 * is delayed-write but either locked or inval, it cannot 789 * partake in the clustered write. 790 */ 791 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 792 ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) || 793 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 794 ++start_lbn; 795 --len; 796 splx(s); 797 continue; 798 } 799 bremfree(tbp); 800 tbp->b_flags &= ~B_DONE; 801 splx(s); 802 803 /* 804 * Extra memory in the buffer, punt on this buffer. 805 * XXX we could handle this in most cases, but we would 806 * have to push the extra memory down to after our max 807 * possible cluster size and then potentially pull it back 808 * up if the cluster was terminated prematurely--too much 809 * hassle. 810 */ 811 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 812 (B_CLUSTEROK | B_VMIO)) || 813 (tbp->b_bcount != tbp->b_bufsize) || 814 (tbp->b_bcount != size) || 815 (len == 1) || 816 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 817 totalwritten += tbp->b_bufsize; 818 bawrite(tbp); 819 ++start_lbn; 820 --len; 821 continue; 822 } 823 824 /* 825 * We got a pbuf to make the cluster in. 826 * so initialise it. 827 */ 828 TAILQ_INIT(&bp->b_cluster.cluster_head); 829 bp->b_bcount = 0; 830 bp->b_magic = tbp->b_magic; 831 bp->b_op = tbp->b_op; 832 bp->b_bufsize = 0; 833 bp->b_npages = 0; 834 if (tbp->b_wcred != NOCRED) 835 bp->b_wcred = crhold(tbp->b_wcred); 836 837 bp->b_blkno = tbp->b_blkno; 838 bp->b_lblkno = tbp->b_lblkno; 839 bp->b_offset = tbp->b_offset; 840 841 /* 842 * We are synthesizing a buffer out of vm_page_t's, but 843 * if the block size is not page aligned then the starting 844 * address may not be either. Inherit the b_data offset 845 * from the original buffer. 846 */ 847 bp->b_data = (char *)((vm_offset_t)bp->b_data | 848 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 849 bp->b_flags |= B_CLUSTER | 850 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN)); 851 bp->b_iodone = cluster_callback; 852 pbgetvp(vp, bp); 853 /* 854 * From this location in the file, scan forward to see 855 * if there are buffers with adjacent data that need to 856 * be written as well. 857 */ 858 for (i = 0; i < len; ++i, ++start_lbn) { 859 if (i != 0) { /* If not the first buffer */ 860 s = splbio(); 861 /* 862 * If the adjacent data is not even in core it 863 * can't need to be written. 864 */ 865 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 866 splx(s); 867 break; 868 } 869 870 /* 871 * If it IS in core, but has different 872 * characteristics, or is locked (which 873 * means it could be undergoing a background 874 * I/O or be in a weird state), then don't 875 * cluster with it. 876 */ 877 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 878 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 879 != (B_DELWRI | B_CLUSTEROK | 880 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 881 (tbp->b_flags & B_LOCKED) || 882 tbp->b_wcred != bp->b_wcred || 883 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 884 splx(s); 885 break; 886 } 887 888 /* 889 * Check that the combined cluster 890 * would make sense with regard to pages 891 * and would not be too large 892 */ 893 if ((tbp->b_bcount != size) || 894 ((bp->b_blkno + (dbsize * i)) != 895 tbp->b_blkno) || 896 ((tbp->b_npages + bp->b_npages) > 897 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 898 BUF_UNLOCK(tbp); 899 splx(s); 900 break; 901 } 902 /* 903 * Ok, it's passed all the tests, 904 * so remove it from the free list 905 * and mark it busy. We will use it. 906 */ 907 bremfree(tbp); 908 tbp->b_flags &= ~B_DONE; 909 splx(s); 910 } /* end of code for non-first buffers only */ 911 /* check for latent dependencies to be handled */ 912 if ((LIST_FIRST(&tbp->b_dep)) != NULL) 913 buf_start(tbp); 914 /* 915 * If the IO is via the VM then we do some 916 * special VM hackery (yuck). Since the buffer's 917 * block size may not be page-aligned it is possible 918 * for a page to be shared between two buffers. We 919 * have to get rid of the duplication when building 920 * the cluster. 921 */ 922 if (tbp->b_flags & B_VMIO) { 923 vm_page_t m; 924 925 if (i != 0) { /* if not first buffer */ 926 for (j = 0; j < tbp->b_npages; j += 1) { 927 m = tbp->b_pages[j]; 928 if (m->flags & PG_BUSY) { 929 bqrelse(tbp); 930 goto finishcluster; 931 } 932 } 933 } 934 935 for (j = 0; j < tbp->b_npages; j += 1) { 936 m = tbp->b_pages[j]; 937 vm_page_io_start(m); 938 vm_object_pip_add(m->object, 1); 939 if ((bp->b_npages == 0) || 940 (bp->b_pages[bp->b_npages - 1] != m)) { 941 bp->b_pages[bp->b_npages] = m; 942 bp->b_npages++; 943 } 944 } 945 } 946 bp->b_bcount += size; 947 bp->b_bufsize += size; 948 949 s = splbio(); 950 bundirty(tbp); 951 tbp->b_flags &= ~B_DONE; 952 tbp->b_ioflags &= ~BIO_ERROR; 953 tbp->b_flags |= B_ASYNC; 954 tbp->b_iocmd = BIO_WRITE; 955 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 956 ++tbp->b_vp->v_numoutput; 957 splx(s); 958 BUF_KERNPROC(tbp); 959 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 960 tbp, b_cluster.cluster_entry); 961 } 962 finishcluster: 963 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 964 (vm_page_t *) bp->b_pages, bp->b_npages); 965 if (bp->b_bufsize > bp->b_kvasize) 966 panic( 967 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 968 bp->b_bufsize, bp->b_kvasize); 969 bp->b_kvasize = bp->b_bufsize; 970 totalwritten += bp->b_bufsize; 971 bp->b_dirtyoff = 0; 972 bp->b_dirtyend = bp->b_bufsize; 973 bawrite(bp); 974 975 len -= i; 976 } 977 return totalwritten; 978 } 979 980 /* 981 * Collect together all the buffers in a cluster. 982 * Plus add one additional buffer. 983 */ 984 static struct cluster_save * 985 cluster_collectbufs(vp, last_bp) 986 struct vnode *vp; 987 struct buf *last_bp; 988 { 989 struct cluster_save *buflist; 990 struct buf *bp; 991 daddr_t lbn; 992 int i, len; 993 994 len = vp->v_lastw - vp->v_cstart + 1; 995 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 996 M_SEGMENT, M_WAITOK); 997 buflist->bs_nchildren = 0; 998 buflist->bs_children = (struct buf **) (buflist + 1); 999 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1000 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 1001 buflist->bs_children[i] = bp; 1002 if (bp->b_blkno == bp->b_lblkno) 1003 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1004 NULL, NULL); 1005 } 1006 buflist->bs_children[i] = bp = last_bp; 1007 if (bp->b_blkno == bp->b_lblkno) 1008 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1009 NULL, NULL); 1010 buflist->bs_nchildren = i + 1; 1011 return (buflist); 1012 } 1013