1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1993 5 * The Regents of the University of California. All rights reserved. 6 * Modifications/enhancements: 7 * Copyright (c) 1995 John S. Dyson. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/proc.h> 39 #include <sys/bio.h> 40 #include <sys/buf.h> 41 #include <sys/vnode.h> 42 #include <sys/malloc.h> 43 #include <sys/mount.h> 44 #include <sys/racct.h> 45 #include <sys/resourcevar.h> 46 #include <sys/rwlock.h> 47 #include <sys/vmmeter.h> 48 #include <vm/vm.h> 49 #include <vm/vm_object.h> 50 #include <vm/vm_page.h> 51 #include <sys/sysctl.h> 52 53 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer"); 54 static uma_zone_t cluster_pbuf_zone; 55 56 static void cluster_init(void *); 57 static struct cluster_save *cluster_collectbufs(struct vnode *vp, 58 struct vn_clusterw *vnc, struct buf *last_bp, int gbflags); 59 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize, 60 daddr_t lbn, daddr_t blkno, long size, int run, int gbflags, 61 struct buf *fbp); 62 static void cluster_callback(struct buf *); 63 64 static int write_behind = 1; 65 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 66 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 67 68 static int read_max = 64; 69 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 70 "Cluster read-ahead max block count"); 71 72 static int read_min = 1; 73 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0, 74 "Cluster read min block count"); 75 76 SYSINIT(cluster, SI_SUB_CPU, SI_ORDER_ANY, cluster_init, NULL); 77 78 static void 79 cluster_init(void *dummy) 80 { 81 82 cluster_pbuf_zone = pbuf_zsecond_create("clpbuf", nswbuf / 2); 83 } 84 85 /* 86 * Read data to a buf, including read-ahead if we find this to be beneficial. 87 * cluster_read replaces bread. 88 */ 89 int 90 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size, 91 struct ucred *cred, long totread, int seqcount, int gbflags, 92 struct buf **bpp) 93 { 94 struct buf *bp, *rbp, *reqbp; 95 struct bufobj *bo; 96 struct thread *td; 97 daddr_t blkno, origblkno; 98 int maxra, racluster; 99 int error, ncontig; 100 int i; 101 102 error = 0; 103 td = curthread; 104 bo = &vp->v_bufobj; 105 if (!unmapped_buf_allowed) 106 gbflags &= ~GB_UNMAPPED; 107 108 /* 109 * Try to limit the amount of read-ahead by a few 110 * ad-hoc parameters. This needs work!!! 111 */ 112 racluster = vp->v_mount->mnt_iosize_max / size; 113 maxra = seqcount; 114 maxra = min(read_max, maxra); 115 maxra = min(nbuf/8, maxra); 116 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 117 maxra = (filesize / size) - lblkno; 118 119 /* 120 * get the requested block 121 */ 122 error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp); 123 if (error != 0) { 124 *bpp = NULL; 125 return (error); 126 } 127 gbflags &= ~GB_NOSPARSE; 128 origblkno = lblkno; 129 *bpp = reqbp = bp; 130 131 /* 132 * if it is in the cache, then check to see if the reads have been 133 * sequential. If they have, then try some read-ahead, otherwise 134 * back-off on prospective read-aheads. 135 */ 136 if (bp->b_flags & B_CACHE) { 137 if (!seqcount) { 138 return 0; 139 } else if ((bp->b_flags & B_RAM) == 0) { 140 return 0; 141 } else { 142 bp->b_flags &= ~B_RAM; 143 BO_RLOCK(bo); 144 for (i = 1; i < maxra; i++) { 145 /* 146 * Stop if the buffer does not exist or it 147 * is invalid (about to go away?) 148 */ 149 rbp = gbincore(&vp->v_bufobj, lblkno+i); 150 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 151 break; 152 153 /* 154 * Set another read-ahead mark so we know 155 * to check again. (If we can lock the 156 * buffer without waiting) 157 */ 158 if ((((i % racluster) == (racluster - 1)) || 159 (i == (maxra - 1))) 160 && (0 == BUF_LOCK(rbp, 161 LK_EXCLUSIVE | LK_NOWAIT, NULL))) { 162 rbp->b_flags |= B_RAM; 163 BUF_UNLOCK(rbp); 164 } 165 } 166 BO_RUNLOCK(bo); 167 if (i >= maxra) { 168 return 0; 169 } 170 lblkno += i; 171 } 172 reqbp = bp = NULL; 173 /* 174 * If it isn't in the cache, then get a chunk from 175 * disk if sequential, otherwise just get the block. 176 */ 177 } else { 178 off_t firstread = bp->b_offset; 179 int nblks; 180 long minread; 181 182 KASSERT(bp->b_offset != NOOFFSET, 183 ("cluster_read: no buffer offset")); 184 185 ncontig = 0; 186 187 /* 188 * Adjust totread if needed 189 */ 190 minread = read_min * size; 191 if (minread > totread) 192 totread = minread; 193 194 /* 195 * Compute the total number of blocks that we should read 196 * synchronously. 197 */ 198 if (firstread + totread > filesize) 199 totread = filesize - firstread; 200 nblks = howmany(totread, size); 201 if (nblks > racluster) 202 nblks = racluster; 203 204 /* 205 * Now compute the number of contiguous blocks. 206 */ 207 if (nblks > 1) { 208 error = VOP_BMAP(vp, lblkno, NULL, 209 &blkno, &ncontig, NULL); 210 /* 211 * If this failed to map just do the original block. 212 */ 213 if (error || blkno == -1) 214 ncontig = 0; 215 } 216 217 /* 218 * If we have contiguous data available do a cluster 219 * otherwise just read the requested block. 220 */ 221 if (ncontig) { 222 /* Account for our first block. */ 223 ncontig = min(ncontig + 1, nblks); 224 if (ncontig < nblks) 225 nblks = ncontig; 226 bp = cluster_rbuild(vp, filesize, lblkno, 227 blkno, size, nblks, gbflags, bp); 228 lblkno += (bp->b_bufsize / size); 229 } else { 230 bp->b_flags |= B_RAM; 231 bp->b_iocmd = BIO_READ; 232 lblkno += 1; 233 } 234 } 235 236 /* 237 * handle the synchronous read so that it is available ASAP. 238 */ 239 if (bp) { 240 if ((bp->b_flags & B_CLUSTER) == 0) { 241 vfs_busy_pages(bp, 0); 242 } 243 bp->b_flags &= ~B_INVAL; 244 bp->b_ioflags &= ~BIO_ERROR; 245 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 246 BUF_KERNPROC(bp); 247 bp->b_iooffset = dbtob(bp->b_blkno); 248 bstrategy(bp); 249 #ifdef RACCT 250 if (racct_enable) { 251 PROC_LOCK(td->td_proc); 252 racct_add_buf(td->td_proc, bp, 0); 253 PROC_UNLOCK(td->td_proc); 254 } 255 #endif /* RACCT */ 256 td->td_ru.ru_inblock++; 257 } 258 259 /* 260 * If we have been doing sequential I/O, then do some read-ahead. 261 */ 262 while (lblkno < (origblkno + maxra)) { 263 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 264 if (error) 265 break; 266 267 if (blkno == -1) 268 break; 269 270 /* 271 * We could throttle ncontig here by maxra but we might as 272 * well read the data if it is contiguous. We're throttled 273 * by racluster anyway. 274 */ 275 if (ncontig) { 276 ncontig = min(ncontig + 1, racluster); 277 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 278 size, ncontig, gbflags, NULL); 279 lblkno += (rbp->b_bufsize / size); 280 if (rbp->b_flags & B_DELWRI) { 281 bqrelse(rbp); 282 continue; 283 } 284 } else { 285 rbp = getblk(vp, lblkno, size, 0, 0, gbflags); 286 lblkno += 1; 287 if (rbp->b_flags & B_DELWRI) { 288 bqrelse(rbp); 289 continue; 290 } 291 rbp->b_flags |= B_ASYNC | B_RAM; 292 rbp->b_iocmd = BIO_READ; 293 rbp->b_blkno = blkno; 294 } 295 if (rbp->b_flags & B_CACHE) { 296 rbp->b_flags &= ~B_ASYNC; 297 bqrelse(rbp); 298 continue; 299 } 300 if ((rbp->b_flags & B_CLUSTER) == 0) { 301 vfs_busy_pages(rbp, 0); 302 } 303 rbp->b_flags &= ~B_INVAL; 304 rbp->b_ioflags &= ~BIO_ERROR; 305 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 306 BUF_KERNPROC(rbp); 307 rbp->b_iooffset = dbtob(rbp->b_blkno); 308 bstrategy(rbp); 309 #ifdef RACCT 310 if (racct_enable) { 311 PROC_LOCK(td->td_proc); 312 racct_add_buf(td->td_proc, rbp, 0); 313 PROC_UNLOCK(td->td_proc); 314 } 315 #endif /* RACCT */ 316 td->td_ru.ru_inblock++; 317 } 318 319 if (reqbp) { 320 /* 321 * Like bread, always brelse() the buffer when 322 * returning an error. 323 */ 324 error = bufwait(reqbp); 325 if (error != 0) { 326 brelse(reqbp); 327 *bpp = NULL; 328 } 329 } 330 return (error); 331 } 332 333 /* 334 * If blocks are contiguous on disk, use this to provide clustered 335 * read ahead. We will read as many blocks as possible sequentially 336 * and then parcel them up into logical blocks in the buffer hash table. 337 */ 338 static struct buf * 339 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 340 daddr_t blkno, long size, int run, int gbflags, struct buf *fbp) 341 { 342 struct buf *bp, *tbp; 343 daddr_t bn; 344 off_t off; 345 long tinc, tsize; 346 int i, inc, j, k, toff; 347 348 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 349 ("cluster_rbuild: size %ld != f_iosize %jd\n", 350 size, (intmax_t)vp->v_mount->mnt_stat.f_iosize)); 351 352 /* 353 * avoid a division 354 */ 355 while ((u_quad_t) size * (lbn + run) > filesize) { 356 --run; 357 } 358 359 if (fbp) { 360 tbp = fbp; 361 tbp->b_iocmd = BIO_READ; 362 } else { 363 tbp = getblk(vp, lbn, size, 0, 0, gbflags); 364 if (tbp->b_flags & B_CACHE) 365 return tbp; 366 tbp->b_flags |= B_ASYNC | B_RAM; 367 tbp->b_iocmd = BIO_READ; 368 } 369 tbp->b_blkno = blkno; 370 if ( (tbp->b_flags & B_MALLOC) || 371 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 372 return tbp; 373 374 bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT); 375 if (bp == NULL) 376 return tbp; 377 MPASS((bp->b_flags & B_MAXPHYS) != 0); 378 379 /* 380 * We are synthesizing a buffer out of vm_page_t's, but 381 * if the block size is not page aligned then the starting 382 * address may not be either. Inherit the b_data offset 383 * from the original buffer. 384 */ 385 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 386 if ((gbflags & GB_UNMAPPED) != 0) { 387 bp->b_data = unmapped_buf; 388 } else { 389 bp->b_data = (char *)((vm_offset_t)bp->b_data | 390 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 391 } 392 bp->b_iocmd = BIO_READ; 393 bp->b_iodone = cluster_callback; 394 bp->b_blkno = blkno; 395 bp->b_lblkno = lbn; 396 bp->b_offset = tbp->b_offset; 397 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 398 pbgetvp(vp, bp); 399 400 TAILQ_INIT(&bp->b_cluster.cluster_head); 401 402 bp->b_bcount = 0; 403 bp->b_bufsize = 0; 404 bp->b_npages = 0; 405 406 inc = btodb(size); 407 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 408 if (i == 0) { 409 vm_object_pip_add(tbp->b_bufobj->bo_object, 410 tbp->b_npages); 411 vfs_busy_pages_acquire(tbp); 412 } else { 413 if ((bp->b_npages * PAGE_SIZE) + 414 round_page(size) > vp->v_mount->mnt_iosize_max) { 415 break; 416 } 417 418 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT | 419 (gbflags & GB_UNMAPPED)); 420 421 /* Don't wait around for locked bufs. */ 422 if (tbp == NULL) 423 break; 424 425 /* 426 * Stop scanning if the buffer is fully valid 427 * (marked B_CACHE), or locked (may be doing a 428 * background write), or if the buffer is not 429 * VMIO backed. The clustering code can only deal 430 * with VMIO-backed buffers. The bo lock is not 431 * required for the BKGRDINPROG check since it 432 * can not be set without the buf lock. 433 */ 434 if ((tbp->b_vflags & BV_BKGRDINPROG) || 435 (tbp->b_flags & B_CACHE) || 436 (tbp->b_flags & B_VMIO) == 0) { 437 bqrelse(tbp); 438 break; 439 } 440 441 /* 442 * The buffer must be completely invalid in order to 443 * take part in the cluster. If it is partially valid 444 * then we stop. 445 */ 446 off = tbp->b_offset; 447 tsize = size; 448 for (j = 0; tsize > 0; j++) { 449 toff = off & PAGE_MASK; 450 tinc = tsize; 451 if (toff + tinc > PAGE_SIZE) 452 tinc = PAGE_SIZE - toff; 453 if (vm_page_trysbusy(tbp->b_pages[j]) == 0) 454 break; 455 if ((tbp->b_pages[j]->valid & 456 vm_page_bits(toff, tinc)) != 0) { 457 vm_page_sunbusy(tbp->b_pages[j]); 458 break; 459 } 460 vm_object_pip_add(tbp->b_bufobj->bo_object, 1); 461 off += tinc; 462 tsize -= tinc; 463 } 464 if (tsize > 0) { 465 clean_sbusy: 466 vm_object_pip_wakeupn(tbp->b_bufobj->bo_object, 467 j); 468 for (k = 0; k < j; k++) 469 vm_page_sunbusy(tbp->b_pages[k]); 470 bqrelse(tbp); 471 break; 472 } 473 474 /* 475 * Set a read-ahead mark as appropriate 476 */ 477 if ((fbp && (i == 1)) || (i == (run - 1))) 478 tbp->b_flags |= B_RAM; 479 480 /* 481 * Set the buffer up for an async read (XXX should 482 * we do this only if we do not wind up brelse()ing?). 483 * Set the block number if it isn't set, otherwise 484 * if it is make sure it matches the block number we 485 * expect. 486 */ 487 tbp->b_flags |= B_ASYNC; 488 tbp->b_iocmd = BIO_READ; 489 if (tbp->b_blkno == tbp->b_lblkno) { 490 tbp->b_blkno = bn; 491 } else if (tbp->b_blkno != bn) { 492 goto clean_sbusy; 493 } 494 } 495 /* 496 * XXX fbp from caller may not be B_ASYNC, but we are going 497 * to biodone() it in cluster_callback() anyway 498 */ 499 BUF_KERNPROC(tbp); 500 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 501 tbp, b_cluster.cluster_entry); 502 for (j = 0; j < tbp->b_npages; j += 1) { 503 vm_page_t m; 504 505 m = tbp->b_pages[j]; 506 if ((bp->b_npages == 0) || 507 (bp->b_pages[bp->b_npages-1] != m)) { 508 bp->b_pages[bp->b_npages] = m; 509 bp->b_npages++; 510 } 511 if (vm_page_all_valid(m)) 512 tbp->b_pages[j] = bogus_page; 513 } 514 515 /* 516 * Don't inherit tbp->b_bufsize as it may be larger due to 517 * a non-page-aligned size. Instead just aggregate using 518 * 'size'. 519 */ 520 if (tbp->b_bcount != size) 521 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 522 if (tbp->b_bufsize != size) 523 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 524 bp->b_bcount += size; 525 bp->b_bufsize += size; 526 } 527 528 /* 529 * Fully valid pages in the cluster are already good and do not need 530 * to be re-read from disk. Replace the page with bogus_page 531 */ 532 for (j = 0; j < bp->b_npages; j++) { 533 if (vm_page_all_valid(bp->b_pages[j])) 534 bp->b_pages[j] = bogus_page; 535 } 536 if (bp->b_bufsize > bp->b_kvasize) 537 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 538 bp->b_bufsize, bp->b_kvasize); 539 540 if (buf_mapped(bp)) { 541 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 542 (vm_page_t *)bp->b_pages, bp->b_npages); 543 } 544 return (bp); 545 } 546 547 /* 548 * Cleanup after a clustered read or write. 549 * This is complicated by the fact that any of the buffers might have 550 * extra memory (if there were no empty buffer headers at allocbuf time) 551 * that we will need to shift around. 552 */ 553 static void 554 cluster_callback(struct buf *bp) 555 { 556 struct buf *nbp, *tbp; 557 int error = 0; 558 559 /* 560 * Must propagate errors to all the components. 561 */ 562 if (bp->b_ioflags & BIO_ERROR) 563 error = bp->b_error; 564 565 if (buf_mapped(bp)) { 566 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), 567 bp->b_npages); 568 } 569 /* 570 * Move memory from the large cluster buffer into the component 571 * buffers and mark IO as done on these. 572 */ 573 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 574 tbp; tbp = nbp) { 575 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 576 if (error) { 577 tbp->b_ioflags |= BIO_ERROR; 578 tbp->b_error = error; 579 } else { 580 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 581 tbp->b_flags &= ~B_INVAL; 582 tbp->b_ioflags &= ~BIO_ERROR; 583 /* 584 * XXX the bdwrite()/bqrelse() issued during 585 * cluster building clears B_RELBUF (see bqrelse() 586 * comment). If direct I/O was specified, we have 587 * to restore it here to allow the buffer and VM 588 * to be freed. 589 */ 590 if (tbp->b_flags & B_DIRECT) 591 tbp->b_flags |= B_RELBUF; 592 } 593 bufdone(tbp); 594 } 595 pbrelvp(bp); 596 uma_zfree(cluster_pbuf_zone, bp); 597 } 598 599 /* 600 * cluster_wbuild_wb: 601 * 602 * Implement modified write build for cluster. 603 * 604 * write_behind = 0 write behind disabled 605 * write_behind = 1 write behind normal (default) 606 * write_behind = 2 write behind backed-off 607 */ 608 609 static __inline int 610 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len, 611 int gbflags) 612 { 613 int r = 0; 614 615 switch (write_behind) { 616 case 2: 617 if (start_lbn < len) 618 break; 619 start_lbn -= len; 620 /* FALLTHROUGH */ 621 case 1: 622 r = cluster_wbuild(vp, size, start_lbn, len, gbflags); 623 /* FALLTHROUGH */ 624 default: 625 /* FALLTHROUGH */ 626 break; 627 } 628 return(r); 629 } 630 631 /* 632 * Do clustered write for FFS. 633 * 634 * Three cases: 635 * 1. Write is not sequential (write asynchronously) 636 * Write is sequential: 637 * 2. beginning of cluster - begin cluster 638 * 3. middle of a cluster - add to cluster 639 * 4. end of a cluster - asynchronously write cluster 640 */ 641 void 642 cluster_write(struct vnode *vp, struct vn_clusterw *vnc, struct buf *bp, 643 u_quad_t filesize, int seqcount, int gbflags) 644 { 645 daddr_t lbn, pbn; 646 int maxclen, cursize; 647 int lblocksize; 648 int async; 649 650 if (!unmapped_buf_allowed) 651 gbflags &= ~GB_UNMAPPED; 652 653 if (vp->v_type == VREG) { 654 async = DOINGASYNC(vp); 655 lblocksize = vp->v_mount->mnt_stat.f_iosize; 656 } else { 657 async = 0; 658 lblocksize = bp->b_bufsize; 659 } 660 lbn = bp->b_lblkno; 661 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 662 663 /* Initialize vnode to beginning of file. */ 664 if (lbn == 0) 665 vnc->v_lasta = vnc->v_clen = vnc->v_cstart = vnc->v_lastw = 0; 666 667 if (vnc->v_clen == 0 || lbn != vnc->v_lastw + 1 || 668 (bp->b_blkno != vnc->v_lasta + btodb(lblocksize))) { 669 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 670 if (vnc->v_clen != 0) { 671 /* 672 * Next block is not sequential. 673 * 674 * If we are not writing at end of file, the process 675 * seeked to another point in the file since its last 676 * write, or we have reached our maximum cluster size, 677 * then push the previous cluster. Otherwise try 678 * reallocating to make it sequential. 679 * 680 * Change to algorithm: only push previous cluster if 681 * it was sequential from the point of view of the 682 * seqcount heuristic, otherwise leave the buffer 683 * intact so we can potentially optimize the I/O 684 * later on in the buf_daemon or update daemon 685 * flush. 686 */ 687 cursize = vnc->v_lastw - vnc->v_cstart + 1; 688 if ((u_quad_t)bp->b_offset + lblocksize != filesize || 689 lbn != vnc->v_lastw + 1 || vnc->v_clen <= cursize) { 690 if (!async && seqcount > 0) { 691 cluster_wbuild_wb(vp, lblocksize, 692 vnc->v_cstart, cursize, gbflags); 693 } 694 } else { 695 struct buf **bpp, **endbp; 696 struct cluster_save *buflist; 697 698 buflist = cluster_collectbufs(vp, vnc, bp, 699 gbflags); 700 if (buflist == NULL) { 701 /* 702 * Cluster build failed so just write 703 * it now. 704 */ 705 bawrite(bp); 706 return; 707 } 708 endbp = &buflist->bs_children 709 [buflist->bs_nchildren - 1]; 710 if (VOP_REALLOCBLKS(vp, buflist)) { 711 /* 712 * Failed, push the previous cluster 713 * if *really* writing sequentially 714 * in the logical file (seqcount > 1), 715 * otherwise delay it in the hopes that 716 * the low level disk driver can 717 * optimize the write ordering. 718 */ 719 for (bpp = buflist->bs_children; 720 bpp < endbp; bpp++) 721 brelse(*bpp); 722 free(buflist, M_SEGMENT); 723 if (seqcount > 1) { 724 cluster_wbuild_wb(vp, 725 lblocksize, vnc->v_cstart, 726 cursize, gbflags); 727 } 728 } else { 729 /* 730 * Succeeded, keep building cluster. 731 */ 732 for (bpp = buflist->bs_children; 733 bpp <= endbp; bpp++) 734 bdwrite(*bpp); 735 free(buflist, M_SEGMENT); 736 vnc->v_lastw = lbn; 737 vnc->v_lasta = bp->b_blkno; 738 return; 739 } 740 } 741 } 742 /* 743 * Consider beginning a cluster. If at end of file, make 744 * cluster as large as possible, otherwise find size of 745 * existing cluster. 746 */ 747 if (vp->v_type == VREG && 748 (u_quad_t) bp->b_offset + lblocksize != filesize && 749 bp->b_blkno == bp->b_lblkno && 750 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, 751 NULL) != 0 || bp->b_blkno == -1)) { 752 pbn = bp->b_blkno; 753 bawrite(bp); 754 vnc->v_clen = 0; 755 vnc->v_lasta = pbn; 756 vnc->v_cstart = lbn + 1; 757 vnc->v_lastw = lbn; 758 return; 759 } 760 vnc->v_clen = maxclen; 761 pbn = bp->b_blkno; 762 if (!async && maxclen == 0) { /* I/O not contiguous */ 763 vnc->v_cstart = lbn + 1; 764 bawrite(bp); 765 } else { /* Wait for rest of cluster */ 766 vnc->v_cstart = lbn; 767 bdwrite(bp); 768 } 769 } else if (lbn == vnc->v_cstart + vnc->v_clen) { 770 /* 771 * At end of cluster, write it out if seqcount tells us we 772 * are operating sequentially, otherwise let the buf or 773 * update daemon handle it. 774 */ 775 pbn = bp->b_blkno; 776 bdwrite(bp); 777 if (seqcount > 1) { 778 cluster_wbuild_wb(vp, lblocksize, vnc->v_cstart, 779 vnc->v_clen + 1, gbflags); 780 } 781 vnc->v_clen = 0; 782 vnc->v_cstart = lbn + 1; 783 } else if (vm_page_count_severe()) { 784 /* 785 * We are low on memory, get it going NOW 786 */ 787 pbn = bp->b_blkno; 788 bawrite(bp); 789 } else { 790 /* 791 * In the middle of a cluster, so just delay the I/O for now. 792 */ 793 pbn = bp->b_blkno; 794 bdwrite(bp); 795 } 796 vnc->v_lastw = lbn; 797 vnc->v_lasta = pbn; 798 } 799 800 /* 801 * This is an awful lot like cluster_rbuild...wish they could be combined. 802 * The last lbn argument is the current block on which I/O is being 803 * performed. Check to see that it doesn't fall in the middle of 804 * the current block (if last_bp == NULL). 805 */ 806 int 807 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len, 808 int gbflags) 809 { 810 struct buf *bp, *tbp; 811 struct bufobj *bo; 812 int i, j; 813 int totalwritten = 0; 814 int dbsize = btodb(size); 815 816 if (!unmapped_buf_allowed) 817 gbflags &= ~GB_UNMAPPED; 818 819 bo = &vp->v_bufobj; 820 while (len > 0) { 821 /* 822 * If the buffer is not delayed-write (i.e. dirty), or it 823 * is delayed-write but either locked or inval, it cannot 824 * partake in the clustered write. 825 */ 826 BO_LOCK(bo); 827 if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL || 828 (tbp->b_vflags & BV_BKGRDINPROG)) { 829 BO_UNLOCK(bo); 830 ++start_lbn; 831 --len; 832 continue; 833 } 834 if (BUF_LOCK(tbp, 835 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) { 836 ++start_lbn; 837 --len; 838 continue; 839 } 840 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 841 BUF_UNLOCK(tbp); 842 ++start_lbn; 843 --len; 844 continue; 845 } 846 bremfree(tbp); 847 tbp->b_flags &= ~B_DONE; 848 849 /* 850 * Extra memory in the buffer, punt on this buffer. 851 * XXX we could handle this in most cases, but we would 852 * have to push the extra memory down to after our max 853 * possible cluster size and then potentially pull it back 854 * up if the cluster was terminated prematurely--too much 855 * hassle. 856 */ 857 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 858 (B_CLUSTEROK | B_VMIO)) || 859 (tbp->b_bcount != tbp->b_bufsize) || 860 (tbp->b_bcount != size) || 861 (len == 1) || 862 ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) { 863 totalwritten += tbp->b_bufsize; 864 bawrite(tbp); 865 ++start_lbn; 866 --len; 867 continue; 868 } 869 MPASS((bp->b_flags & B_MAXPHYS) != 0); 870 871 /* 872 * We got a pbuf to make the cluster in. 873 * so initialise it. 874 */ 875 TAILQ_INIT(&bp->b_cluster.cluster_head); 876 bp->b_bcount = 0; 877 bp->b_bufsize = 0; 878 bp->b_npages = 0; 879 if (tbp->b_wcred != NOCRED) 880 bp->b_wcred = crhold(tbp->b_wcred); 881 882 bp->b_blkno = tbp->b_blkno; 883 bp->b_lblkno = tbp->b_lblkno; 884 bp->b_offset = tbp->b_offset; 885 886 /* 887 * We are synthesizing a buffer out of vm_page_t's, but 888 * if the block size is not page aligned then the starting 889 * address may not be either. Inherit the b_data offset 890 * from the original buffer. 891 */ 892 if ((gbflags & GB_UNMAPPED) == 0 || 893 (tbp->b_flags & B_VMIO) == 0) { 894 bp->b_data = (char *)((vm_offset_t)bp->b_data | 895 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 896 } else { 897 bp->b_data = unmapped_buf; 898 } 899 bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO | 900 B_NEEDCOMMIT)); 901 bp->b_iodone = cluster_callback; 902 pbgetvp(vp, bp); 903 /* 904 * From this location in the file, scan forward to see 905 * if there are buffers with adjacent data that need to 906 * be written as well. 907 */ 908 for (i = 0; i < len; ++i, ++start_lbn) { 909 if (i != 0) { /* If not the first buffer */ 910 /* 911 * If the adjacent data is not even in core it 912 * can't need to be written. 913 */ 914 BO_LOCK(bo); 915 if ((tbp = gbincore(bo, start_lbn)) == NULL || 916 (tbp->b_vflags & BV_BKGRDINPROG)) { 917 BO_UNLOCK(bo); 918 break; 919 } 920 921 /* 922 * If it IS in core, but has different 923 * characteristics, or is locked (which 924 * means it could be undergoing a background 925 * I/O or be in a weird state), then don't 926 * cluster with it. 927 */ 928 if (BUF_LOCK(tbp, 929 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 930 BO_LOCKPTR(bo))) 931 break; 932 933 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 934 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 935 != (B_DELWRI | B_CLUSTEROK | 936 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 937 tbp->b_wcred != bp->b_wcred) { 938 BUF_UNLOCK(tbp); 939 break; 940 } 941 942 /* 943 * Check that the combined cluster 944 * would make sense with regard to pages 945 * and would not be too large 946 */ 947 if ((tbp->b_bcount != size) || 948 ((bp->b_blkno + (dbsize * i)) != 949 tbp->b_blkno) || 950 ((tbp->b_npages + bp->b_npages) > 951 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 952 BUF_UNLOCK(tbp); 953 break; 954 } 955 956 /* 957 * Ok, it's passed all the tests, 958 * so remove it from the free list 959 * and mark it busy. We will use it. 960 */ 961 bremfree(tbp); 962 tbp->b_flags &= ~B_DONE; 963 } /* end of code for non-first buffers only */ 964 /* 965 * If the IO is via the VM then we do some 966 * special VM hackery (yuck). Since the buffer's 967 * block size may not be page-aligned it is possible 968 * for a page to be shared between two buffers. We 969 * have to get rid of the duplication when building 970 * the cluster. 971 */ 972 if (tbp->b_flags & B_VMIO) { 973 vm_page_t m; 974 975 if (i == 0) { 976 vfs_busy_pages_acquire(tbp); 977 } else { /* if not first buffer */ 978 for (j = 0; j < tbp->b_npages; j += 1) { 979 m = tbp->b_pages[j]; 980 if (vm_page_trysbusy(m) == 0) { 981 for (j--; j >= 0; j--) 982 vm_page_sunbusy( 983 tbp->b_pages[j]); 984 bqrelse(tbp); 985 goto finishcluster; 986 } 987 } 988 } 989 vm_object_pip_add(tbp->b_bufobj->bo_object, 990 tbp->b_npages); 991 for (j = 0; j < tbp->b_npages; j += 1) { 992 m = tbp->b_pages[j]; 993 if ((bp->b_npages == 0) || 994 (bp->b_pages[bp->b_npages - 1] != m)) { 995 bp->b_pages[bp->b_npages] = m; 996 bp->b_npages++; 997 } 998 } 999 } 1000 bp->b_bcount += size; 1001 bp->b_bufsize += size; 1002 /* 1003 * If any of the clustered buffers have their 1004 * B_BARRIER flag set, transfer that request to 1005 * the cluster. 1006 */ 1007 bp->b_flags |= (tbp->b_flags & B_BARRIER); 1008 tbp->b_flags &= ~(B_DONE | B_BARRIER); 1009 tbp->b_flags |= B_ASYNC; 1010 tbp->b_ioflags &= ~BIO_ERROR; 1011 tbp->b_iocmd = BIO_WRITE; 1012 bundirty(tbp); 1013 reassignbuf(tbp); /* put on clean list */ 1014 bufobj_wref(tbp->b_bufobj); 1015 BUF_KERNPROC(tbp); 1016 buf_track(tbp, __func__); 1017 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 1018 tbp, b_cluster.cluster_entry); 1019 } 1020 finishcluster: 1021 if (buf_mapped(bp)) { 1022 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 1023 (vm_page_t *)bp->b_pages, bp->b_npages); 1024 } 1025 if (bp->b_bufsize > bp->b_kvasize) 1026 panic( 1027 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 1028 bp->b_bufsize, bp->b_kvasize); 1029 totalwritten += bp->b_bufsize; 1030 bp->b_dirtyoff = 0; 1031 bp->b_dirtyend = bp->b_bufsize; 1032 bawrite(bp); 1033 1034 len -= i; 1035 } 1036 return totalwritten; 1037 } 1038 1039 /* 1040 * Collect together all the buffers in a cluster. 1041 * Plus add one additional buffer. 1042 */ 1043 static struct cluster_save * 1044 cluster_collectbufs(struct vnode *vp, struct vn_clusterw *vnc, 1045 struct buf *last_bp, int gbflags) 1046 { 1047 struct cluster_save *buflist; 1048 struct buf *bp; 1049 daddr_t lbn; 1050 int i, j, len, error; 1051 1052 len = vnc->v_lastw - vnc->v_cstart + 1; 1053 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1054 M_SEGMENT, M_WAITOK); 1055 buflist->bs_nchildren = 0; 1056 buflist->bs_children = (struct buf **) (buflist + 1); 1057 for (lbn = vnc->v_cstart, i = 0; i < len; lbn++, i++) { 1058 error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED, 1059 gbflags, &bp); 1060 if (error != 0) { 1061 /* 1062 * If read fails, release collected buffers 1063 * and return failure. 1064 */ 1065 for (j = 0; j < i; j++) 1066 brelse(buflist->bs_children[j]); 1067 free(buflist, M_SEGMENT); 1068 return (NULL); 1069 } 1070 buflist->bs_children[i] = bp; 1071 if (bp->b_blkno == bp->b_lblkno) 1072 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, 1073 NULL, NULL); 1074 } 1075 buflist->bs_children[i] = bp = last_bp; 1076 if (bp->b_blkno == bp->b_lblkno) 1077 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1078 buflist->bs_nchildren = i + 1; 1079 return (buflist); 1080 } 1081 1082 void 1083 cluster_init_vn(struct vn_clusterw *vnc) 1084 { 1085 vnc->v_lasta = 0; 1086 vnc->v_clen = 0; 1087 vnc->v_cstart = 0; 1088 vnc->v_lastw = 0; 1089 } 1090