1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * Copyright (c) 1994,1997 John S. Dyson 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Konstantin Belousov 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * this file contains a new buffer I/O scheme implementing a coherent 34 * VM object and buffer cache scheme. Pains have been taken to make 35 * sure that the performance degradation associated with schemes such 36 * as this is not realized. 37 * 38 * Author: John S. Dyson 39 * Significant help during the development and debugging phases 40 * had been provided by David Greenman, also of the FreeBSD core team. 41 * 42 * see man buf(9) for more info. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #include <sys/devicestat.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fail.h> 56 #include <sys/limits.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mount.h> 60 #include <sys/mutex.h> 61 #include <sys/kernel.h> 62 #include <sys/kthread.h> 63 #include <sys/proc.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sysctl.h> 67 #include <sys/vmem.h> 68 #include <sys/vmmeter.h> 69 #include <sys/vnode.h> 70 #include <geom/geom.h> 71 #include <vm/vm.h> 72 #include <vm/vm_param.h> 73 #include <vm/vm_kern.h> 74 #include <vm/vm_pageout.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_map.h> 79 #include "opt_compat.h" 80 #include "opt_directio.h" 81 #include "opt_swap.h" 82 83 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer"); 84 85 struct bio_ops bioops; /* I/O operation notification */ 86 87 struct buf_ops buf_ops_bio = { 88 .bop_name = "buf_ops_bio", 89 .bop_write = bufwrite, 90 .bop_strategy = bufstrategy, 91 .bop_sync = bufsync, 92 .bop_bdflush = bufbdflush, 93 }; 94 95 /* 96 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has 97 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c. 98 */ 99 struct buf *buf; /* buffer header pool */ 100 caddr_t unmapped_buf; 101 102 static struct proc *bufdaemonproc; 103 104 static int inmem(struct vnode *vp, daddr_t blkno); 105 static void vm_hold_free_pages(struct buf *bp, int newbsize); 106 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, 107 vm_offset_t to); 108 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); 109 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, 110 vm_page_t m); 111 static void vfs_clean_pages_dirty_buf(struct buf *bp); 112 static void vfs_setdirty_locked_object(struct buf *bp); 113 static void vfs_vmio_release(struct buf *bp); 114 static int vfs_bio_clcheck(struct vnode *vp, int size, 115 daddr_t lblkno, daddr_t blkno); 116 static int buf_flush(struct vnode *vp, int); 117 static int flushbufqueues(struct vnode *, int, int); 118 static void buf_daemon(void); 119 static void bremfreel(struct buf *bp); 120 static __inline void bd_wakeup(void); 121 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 122 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 123 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS); 124 #endif 125 126 int vmiodirenable = TRUE; 127 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 128 "Use the VM system for directory writes"); 129 long runningbufspace; 130 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 131 "Amount of presently outstanding async buffer io"); 132 static long bufspace; 133 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 134 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 135 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, 136 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers"); 137 #else 138 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 139 "Virtual memory used for buffers"); 140 #endif 141 static long unmapped_bufspace; 142 SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD, 143 &unmapped_bufspace, 0, 144 "Amount of unmapped buffers, inclusive in the bufspace"); 145 static long maxbufspace; 146 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 147 "Maximum allowed value of bufspace (including buf_daemon)"); 148 static long bufmallocspace; 149 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 150 "Amount of malloced memory for buffers"); 151 static long maxbufmallocspace; 152 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0, 153 "Maximum amount of malloced memory for buffers"); 154 static long lobufspace; 155 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 156 "Minimum amount of buffers we want to have"); 157 long hibufspace; 158 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 159 "Maximum allowed value of bufspace (excluding buf_daemon)"); 160 static int bufreusecnt; 161 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0, 162 "Number of times we have reused a buffer"); 163 static int buffreekvacnt; 164 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0, 165 "Number of times we have freed the KVA space from some buffer"); 166 static int bufdefragcnt; 167 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0, 168 "Number of times we have had to repeat buffer allocation to defragment"); 169 static long lorunningspace; 170 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 171 "Minimum preferred space used for in-progress I/O"); 172 static long hirunningspace; 173 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 174 "Maximum amount of space to use for in-progress I/O"); 175 int dirtybufferflushes; 176 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes, 177 0, "Number of bdwrite to bawrite conversions to limit dirty buffers"); 178 int bdwriteskip; 179 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip, 180 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk"); 181 int altbufferflushes; 182 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes, 183 0, "Number of fsync flushes to limit dirty buffers"); 184 static int recursiveflushes; 185 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes, 186 0, "Number of flushes skipped due to being recursive"); 187 static int numdirtybuffers; 188 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 189 "Number of buffers that are dirty (has unwritten changes) at the moment"); 190 static int lodirtybuffers; 191 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 192 "How many buffers we want to have free before bufdaemon can sleep"); 193 static int hidirtybuffers; 194 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 195 "When the number of dirty buffers is considered severe"); 196 int dirtybufthresh; 197 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh, 198 0, "Number of bdwrite to bawrite conversions to clear dirty buffers"); 199 static int numfreebuffers; 200 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 201 "Number of free buffers"); 202 static int lofreebuffers; 203 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 204 "XXX Unused"); 205 static int hifreebuffers; 206 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 207 "XXX Complicatedly unused"); 208 static int getnewbufcalls; 209 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0, 210 "Number of calls to getnewbuf"); 211 static int getnewbufrestarts; 212 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0, 213 "Number of times getnewbuf has had to restart a buffer aquisition"); 214 static int mappingrestarts; 215 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0, 216 "Number of times getblk has had to restart a buffer mapping for " 217 "unmapped buffer"); 218 static int flushbufqtarget = 100; 219 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0, 220 "Amount of work to do in flushbufqueues when helping bufdaemon"); 221 static long notbufdflushes; 222 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, ¬bufdflushes, 0, 223 "Number of dirty buffer flushes done by the bufdaemon helpers"); 224 static long barrierwrites; 225 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0, 226 "Number of barrier writes"); 227 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, 228 &unmapped_buf_allowed, 0, 229 "Permit the use of the unmapped i/o"); 230 231 /* 232 * Lock for the non-dirty bufqueues 233 */ 234 static struct mtx_padalign bqclean; 235 236 /* 237 * Lock for the dirty queue. 238 */ 239 static struct mtx_padalign bqdirty; 240 241 /* 242 * This lock synchronizes access to bd_request. 243 */ 244 static struct mtx_padalign bdlock; 245 246 /* 247 * This lock protects the runningbufreq and synchronizes runningbufwakeup and 248 * waitrunningbufspace(). 249 */ 250 static struct mtx_padalign rbreqlock; 251 252 /* 253 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. 254 */ 255 static struct mtx_padalign nblock; 256 257 /* 258 * Lock that protects bdirtywait. 259 */ 260 static struct mtx_padalign bdirtylock; 261 262 /* 263 * Wakeup point for bufdaemon, as well as indicator of whether it is already 264 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it 265 * is idling. 266 */ 267 static int bd_request; 268 269 /* 270 * Request for the buf daemon to write more buffers than is indicated by 271 * lodirtybuf. This may be necessary to push out excess dependencies or 272 * defragment the address space where a simple count of the number of dirty 273 * buffers is insufficient to characterize the demand for flushing them. 274 */ 275 static int bd_speedupreq; 276 277 /* 278 * bogus page -- for I/O to/from partially complete buffers 279 * this is a temporary solution to the problem, but it is not 280 * really that bad. it would be better to split the buffer 281 * for input in the case of buffers partially already in memory, 282 * but the code is intricate enough already. 283 */ 284 vm_page_t bogus_page; 285 286 /* 287 * Synchronization (sleep/wakeup) variable for active buffer space requests. 288 * Set when wait starts, cleared prior to wakeup(). 289 * Used in runningbufwakeup() and waitrunningbufspace(). 290 */ 291 static int runningbufreq; 292 293 /* 294 * Synchronization (sleep/wakeup) variable for buffer requests. 295 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done 296 * by and/or. 297 * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(), 298 * getnewbuf(), and getblk(). 299 */ 300 static int needsbuffer; 301 302 /* 303 * Synchronization for bwillwrite() waiters. 304 */ 305 static int bdirtywait; 306 307 /* 308 * Definitions for the buffer free lists. 309 */ 310 #define BUFFER_QUEUES 5 /* number of free buffer queues */ 311 312 #define QUEUE_NONE 0 /* on no queue */ 313 #define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */ 314 #define QUEUE_DIRTY 2 /* B_DELWRI buffers */ 315 #define QUEUE_EMPTYKVA 3 /* empty buffer headers w/KVA assignment */ 316 #define QUEUE_EMPTY 4 /* empty buffer headers */ 317 #define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */ 318 319 /* Queues for free buffers with various properties */ 320 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } }; 321 #ifdef INVARIANTS 322 static int bq_len[BUFFER_QUEUES]; 323 #endif 324 325 /* 326 * Single global constant for BUF_WMESG, to avoid getting multiple references. 327 * buf_wmesg is referred from macros. 328 */ 329 const char *buf_wmesg = BUF_WMESG; 330 331 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 332 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 333 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 334 335 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 336 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 337 static int 338 sysctl_bufspace(SYSCTL_HANDLER_ARGS) 339 { 340 long lvalue; 341 int ivalue; 342 343 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long)) 344 return (sysctl_handle_long(oidp, arg1, arg2, req)); 345 lvalue = *(long *)arg1; 346 if (lvalue > INT_MAX) 347 /* On overflow, still write out a long to trigger ENOMEM. */ 348 return (sysctl_handle_long(oidp, &lvalue, 0, req)); 349 ivalue = lvalue; 350 return (sysctl_handle_int(oidp, &ivalue, 0, req)); 351 } 352 #endif 353 354 #ifdef DIRECTIO 355 extern void ffs_rawread_setup(void); 356 #endif /* DIRECTIO */ 357 358 /* 359 * bqlock: 360 * 361 * Return the appropriate queue lock based on the index. 362 */ 363 static inline struct mtx * 364 bqlock(int qindex) 365 { 366 367 if (qindex == QUEUE_DIRTY) 368 return (struct mtx *)(&bqdirty); 369 return (struct mtx *)(&bqclean); 370 } 371 372 /* 373 * bdirtywakeup: 374 * 375 * Wakeup any bwillwrite() waiters. 376 */ 377 static void 378 bdirtywakeup(void) 379 { 380 mtx_lock(&bdirtylock); 381 if (bdirtywait) { 382 bdirtywait = 0; 383 wakeup(&bdirtywait); 384 } 385 mtx_unlock(&bdirtylock); 386 } 387 388 /* 389 * bdirtysub: 390 * 391 * Decrement the numdirtybuffers count by one and wakeup any 392 * threads blocked in bwillwrite(). 393 */ 394 static void 395 bdirtysub(void) 396 { 397 398 if (atomic_fetchadd_int(&numdirtybuffers, -1) == 399 (lodirtybuffers + hidirtybuffers) / 2) 400 bdirtywakeup(); 401 } 402 403 /* 404 * bdirtyadd: 405 * 406 * Increment the numdirtybuffers count by one and wakeup the buf 407 * daemon if needed. 408 */ 409 static void 410 bdirtyadd(void) 411 { 412 413 /* 414 * Only do the wakeup once as we cross the boundary. The 415 * buf daemon will keep running until the condition clears. 416 */ 417 if (atomic_fetchadd_int(&numdirtybuffers, 1) == 418 (lodirtybuffers + hidirtybuffers) / 2) 419 bd_wakeup(); 420 } 421 422 /* 423 * bufspacewakeup: 424 * 425 * Called when buffer space is potentially available for recovery. 426 * getnewbuf() will block on this flag when it is unable to free 427 * sufficient buffer space. Buffer space becomes recoverable when 428 * bp's get placed back in the queues. 429 */ 430 431 static __inline void 432 bufspacewakeup(void) 433 { 434 435 /* 436 * If someone is waiting for BUF space, wake them up. Even 437 * though we haven't freed the kva space yet, the waiting 438 * process will be able to now. 439 */ 440 mtx_lock(&nblock); 441 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 442 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 443 wakeup(&needsbuffer); 444 } 445 mtx_unlock(&nblock); 446 } 447 448 /* 449 * runningwakeup: 450 * 451 * Wake up processes that are waiting on asynchronous writes to fall 452 * below lorunningspace. 453 */ 454 static void 455 runningwakeup(void) 456 { 457 458 mtx_lock(&rbreqlock); 459 if (runningbufreq) { 460 runningbufreq = 0; 461 wakeup(&runningbufreq); 462 } 463 mtx_unlock(&rbreqlock); 464 } 465 466 /* 467 * runningbufwakeup: 468 * 469 * Decrement the outstanding write count according. 470 */ 471 void 472 runningbufwakeup(struct buf *bp) 473 { 474 long space, bspace; 475 476 bspace = bp->b_runningbufspace; 477 if (bspace == 0) 478 return; 479 space = atomic_fetchadd_long(&runningbufspace, -bspace); 480 KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld", 481 space, bspace)); 482 bp->b_runningbufspace = 0; 483 /* 484 * Only acquire the lock and wakeup on the transition from exceeding 485 * the threshold to falling below it. 486 */ 487 if (space < lorunningspace) 488 return; 489 if (space - bspace > lorunningspace) 490 return; 491 runningwakeup(); 492 } 493 494 /* 495 * bufcountadd: 496 * 497 * Called when a buffer has been added to one of the free queues to 498 * account for the buffer and to wakeup anyone waiting for free buffers. 499 * This typically occurs when large amounts of metadata are being handled 500 * by the buffer cache ( else buffer space runs out first, usually ). 501 */ 502 static __inline void 503 bufcountadd(struct buf *bp) 504 { 505 int old; 506 507 KASSERT((bp->b_flags & B_INFREECNT) == 0, 508 ("buf %p already counted as free", bp)); 509 bp->b_flags |= B_INFREECNT; 510 old = atomic_fetchadd_int(&numfreebuffers, 1); 511 KASSERT(old >= 0 && old < nbuf, 512 ("numfreebuffers climbed to %d", old + 1)); 513 mtx_lock(&nblock); 514 if (needsbuffer) { 515 needsbuffer &= ~VFS_BIO_NEED_ANY; 516 if (numfreebuffers >= hifreebuffers) 517 needsbuffer &= ~VFS_BIO_NEED_FREE; 518 wakeup(&needsbuffer); 519 } 520 mtx_unlock(&nblock); 521 } 522 523 /* 524 * bufcountsub: 525 * 526 * Decrement the numfreebuffers count as needed. 527 */ 528 static void 529 bufcountsub(struct buf *bp) 530 { 531 int old; 532 533 /* 534 * Fixup numfreebuffers count. If the buffer is invalid or not 535 * delayed-write, the buffer was free and we must decrement 536 * numfreebuffers. 537 */ 538 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 539 KASSERT((bp->b_flags & B_INFREECNT) != 0, 540 ("buf %p not counted in numfreebuffers", bp)); 541 bp->b_flags &= ~B_INFREECNT; 542 old = atomic_fetchadd_int(&numfreebuffers, -1); 543 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); 544 } 545 } 546 547 /* 548 * waitrunningbufspace() 549 * 550 * runningbufspace is a measure of the amount of I/O currently 551 * running. This routine is used in async-write situations to 552 * prevent creating huge backups of pending writes to a device. 553 * Only asynchronous writes are governed by this function. 554 * 555 * This does NOT turn an async write into a sync write. It waits 556 * for earlier writes to complete and generally returns before the 557 * caller's write has reached the device. 558 */ 559 void 560 waitrunningbufspace(void) 561 { 562 563 mtx_lock(&rbreqlock); 564 while (runningbufspace > hirunningspace) { 565 runningbufreq = 1; 566 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0); 567 } 568 mtx_unlock(&rbreqlock); 569 } 570 571 572 /* 573 * vfs_buf_test_cache: 574 * 575 * Called when a buffer is extended. This function clears the B_CACHE 576 * bit if the newly extended portion of the buffer does not contain 577 * valid data. 578 */ 579 static __inline 580 void 581 vfs_buf_test_cache(struct buf *bp, 582 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 583 vm_page_t m) 584 { 585 586 VM_OBJECT_ASSERT_LOCKED(m->object); 587 if (bp->b_flags & B_CACHE) { 588 int base = (foff + off) & PAGE_MASK; 589 if (vm_page_is_valid(m, base, size) == 0) 590 bp->b_flags &= ~B_CACHE; 591 } 592 } 593 594 /* Wake up the buffer daemon if necessary */ 595 static __inline void 596 bd_wakeup(void) 597 { 598 599 mtx_lock(&bdlock); 600 if (bd_request == 0) { 601 bd_request = 1; 602 wakeup(&bd_request); 603 } 604 mtx_unlock(&bdlock); 605 } 606 607 /* 608 * bd_speedup - speedup the buffer cache flushing code 609 */ 610 void 611 bd_speedup(void) 612 { 613 int needwake; 614 615 mtx_lock(&bdlock); 616 needwake = 0; 617 if (bd_speedupreq == 0 || bd_request == 0) 618 needwake = 1; 619 bd_speedupreq = 1; 620 bd_request = 1; 621 if (needwake) 622 wakeup(&bd_request); 623 mtx_unlock(&bdlock); 624 } 625 626 #ifdef __i386__ 627 #define TRANSIENT_DENOM 5 628 #else 629 #define TRANSIENT_DENOM 10 630 #endif 631 632 /* 633 * Calculating buffer cache scaling values and reserve space for buffer 634 * headers. This is called during low level kernel initialization and 635 * may be called more then once. We CANNOT write to the memory area 636 * being reserved at this time. 637 */ 638 caddr_t 639 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) 640 { 641 int tuned_nbuf; 642 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz; 643 644 /* 645 * physmem_est is in pages. Convert it to kilobytes (assumes 646 * PAGE_SIZE is >= 1K) 647 */ 648 physmem_est = physmem_est * (PAGE_SIZE / 1024); 649 650 /* 651 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 652 * For the first 64MB of ram nominally allocate sufficient buffers to 653 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 654 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing 655 * the buffer cache we limit the eventual kva reservation to 656 * maxbcache bytes. 657 * 658 * factor represents the 1/4 x ram conversion. 659 */ 660 if (nbuf == 0) { 661 int factor = 4 * BKVASIZE / 1024; 662 663 nbuf = 50; 664 if (physmem_est > 4096) 665 nbuf += min((physmem_est - 4096) / factor, 666 65536 / factor); 667 if (physmem_est > 65536) 668 nbuf += min((physmem_est - 65536) * 2 / (factor * 5), 669 32 * 1024 * 1024 / (factor * 5)); 670 671 if (maxbcache && nbuf > maxbcache / BKVASIZE) 672 nbuf = maxbcache / BKVASIZE; 673 tuned_nbuf = 1; 674 } else 675 tuned_nbuf = 0; 676 677 /* XXX Avoid unsigned long overflows later on with maxbufspace. */ 678 maxbuf = (LONG_MAX / 3) / BKVASIZE; 679 if (nbuf > maxbuf) { 680 if (!tuned_nbuf) 681 printf("Warning: nbufs lowered from %d to %ld\n", nbuf, 682 maxbuf); 683 nbuf = maxbuf; 684 } 685 686 /* 687 * Ideal allocation size for the transient bio submap if 10% 688 * of the maximal space buffer map. This roughly corresponds 689 * to the amount of the buffer mapped for typical UFS load. 690 * 691 * Clip the buffer map to reserve space for the transient 692 * BIOs, if its extent is bigger than 90% (80% on i386) of the 693 * maximum buffer map extent on the platform. 694 * 695 * The fall-back to the maxbuf in case of maxbcache unset, 696 * allows to not trim the buffer KVA for the architectures 697 * with ample KVA space. 698 */ 699 if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { 700 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; 701 buf_sz = (long)nbuf * BKVASIZE; 702 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * 703 (TRANSIENT_DENOM - 1)) { 704 /* 705 * There is more KVA than memory. Do not 706 * adjust buffer map size, and assign the rest 707 * of maxbuf to transient map. 708 */ 709 biotmap_sz = maxbuf_sz - buf_sz; 710 } else { 711 /* 712 * Buffer map spans all KVA we could afford on 713 * this platform. Give 10% (20% on i386) of 714 * the buffer map to the transient bio map. 715 */ 716 biotmap_sz = buf_sz / TRANSIENT_DENOM; 717 buf_sz -= biotmap_sz; 718 } 719 if (biotmap_sz / INT_MAX > MAXPHYS) 720 bio_transient_maxcnt = INT_MAX; 721 else 722 bio_transient_maxcnt = biotmap_sz / MAXPHYS; 723 /* 724 * Artifically limit to 1024 simultaneous in-flight I/Os 725 * using the transient mapping. 726 */ 727 if (bio_transient_maxcnt > 1024) 728 bio_transient_maxcnt = 1024; 729 if (tuned_nbuf) 730 nbuf = buf_sz / BKVASIZE; 731 } 732 733 /* 734 * swbufs are used as temporary holders for I/O, such as paging I/O. 735 * We have no less then 16 and no more then 256. 736 */ 737 nswbuf = max(min(nbuf/4, 256), 16); 738 #ifdef NSWBUF_MIN 739 if (nswbuf < NSWBUF_MIN) 740 nswbuf = NSWBUF_MIN; 741 #endif 742 #ifdef DIRECTIO 743 ffs_rawread_setup(); 744 #endif 745 746 /* 747 * Reserve space for the buffer cache buffers 748 */ 749 swbuf = (void *)v; 750 v = (caddr_t)(swbuf + nswbuf); 751 buf = (void *)v; 752 v = (caddr_t)(buf + nbuf); 753 754 return(v); 755 } 756 757 /* Initialize the buffer subsystem. Called before use of any buffers. */ 758 void 759 bufinit(void) 760 { 761 struct buf *bp; 762 int i; 763 764 mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF); 765 mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF); 766 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); 767 mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF); 768 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); 769 mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); 770 771 /* next, make a null set of free lists */ 772 for (i = 0; i < BUFFER_QUEUES; i++) 773 TAILQ_INIT(&bufqueues[i]); 774 775 /* finally, initialize each buffer header and stick on empty q */ 776 for (i = 0; i < nbuf; i++) { 777 bp = &buf[i]; 778 bzero(bp, sizeof *bp); 779 bp->b_flags = B_INVAL | B_INFREECNT; 780 bp->b_rcred = NOCRED; 781 bp->b_wcred = NOCRED; 782 bp->b_qindex = QUEUE_EMPTY; 783 bp->b_xflags = 0; 784 LIST_INIT(&bp->b_dep); 785 BUF_LOCKINIT(bp); 786 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 787 #ifdef INVARIANTS 788 bq_len[QUEUE_EMPTY]++; 789 #endif 790 } 791 792 /* 793 * maxbufspace is the absolute maximum amount of buffer space we are 794 * allowed to reserve in KVM and in real terms. The absolute maximum 795 * is nominally used by buf_daemon. hibufspace is the nominal maximum 796 * used by most other processes. The differential is required to 797 * ensure that buf_daemon is able to run when other processes might 798 * be blocked waiting for buffer space. 799 * 800 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 801 * this may result in KVM fragmentation which is not handled optimally 802 * by the system. 803 */ 804 maxbufspace = (long)nbuf * BKVASIZE; 805 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 806 lobufspace = hibufspace - MAXBSIZE; 807 808 /* 809 * Note: The 16 MiB upper limit for hirunningspace was chosen 810 * arbitrarily and may need further tuning. It corresponds to 811 * 128 outstanding write IO requests (if IO size is 128 KiB), 812 * which fits with many RAID controllers' tagged queuing limits. 813 * The lower 1 MiB limit is the historical upper limit for 814 * hirunningspace. 815 */ 816 hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE), 817 16 * 1024 * 1024), 1024 * 1024); 818 lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE); 819 820 /* 821 * Limit the amount of malloc memory since it is wired permanently into 822 * the kernel space. Even though this is accounted for in the buffer 823 * allocation, we don't want the malloced region to grow uncontrolled. 824 * The malloc scheme improves memory utilization significantly on average 825 * (small) directories. 826 */ 827 maxbufmallocspace = hibufspace / 20; 828 829 /* 830 * Reduce the chance of a deadlock occuring by limiting the number 831 * of delayed-write dirty buffers we allow to stack up. 832 */ 833 hidirtybuffers = nbuf / 4 + 20; 834 dirtybufthresh = hidirtybuffers * 9 / 10; 835 numdirtybuffers = 0; 836 /* 837 * To support extreme low-memory systems, make sure hidirtybuffers cannot 838 * eat up all available buffer space. This occurs when our minimum cannot 839 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 840 * BKVASIZE'd buffers. 841 */ 842 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 843 hidirtybuffers >>= 1; 844 } 845 lodirtybuffers = hidirtybuffers / 2; 846 847 /* 848 * Try to keep the number of free buffers in the specified range, 849 * and give special processes (e.g. like buf_daemon) access to an 850 * emergency reserve. 851 */ 852 lofreebuffers = nbuf / 18 + 5; 853 hifreebuffers = 2 * lofreebuffers; 854 numfreebuffers = nbuf; 855 856 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 857 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 858 unmapped_buf = (caddr_t)kva_alloc(MAXPHYS); 859 } 860 861 #ifdef INVARIANTS 862 static inline void 863 vfs_buf_check_mapped(struct buf *bp) 864 { 865 866 KASSERT((bp->b_flags & B_UNMAPPED) == 0, 867 ("mapped buf %p %x", bp, bp->b_flags)); 868 KASSERT(bp->b_kvabase != unmapped_buf, 869 ("mapped buf: b_kvabase was not updated %p", bp)); 870 KASSERT(bp->b_data != unmapped_buf, 871 ("mapped buf: b_data was not updated %p", bp)); 872 } 873 874 static inline void 875 vfs_buf_check_unmapped(struct buf *bp) 876 { 877 878 KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED, 879 ("unmapped buf %p %x", bp, bp->b_flags)); 880 KASSERT(bp->b_kvabase == unmapped_buf, 881 ("unmapped buf: corrupted b_kvabase %p", bp)); 882 KASSERT(bp->b_data == unmapped_buf, 883 ("unmapped buf: corrupted b_data %p", bp)); 884 } 885 886 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp) 887 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp) 888 #else 889 #define BUF_CHECK_MAPPED(bp) do {} while (0) 890 #define BUF_CHECK_UNMAPPED(bp) do {} while (0) 891 #endif 892 893 static void 894 bpmap_qenter(struct buf *bp) 895 { 896 897 BUF_CHECK_MAPPED(bp); 898 899 /* 900 * bp->b_data is relative to bp->b_offset, but 901 * bp->b_offset may be offset into the first page. 902 */ 903 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 904 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 905 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 906 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 907 } 908 909 /* 910 * bfreekva() - free the kva allocation for a buffer. 911 * 912 * Since this call frees up buffer space, we call bufspacewakeup(). 913 */ 914 static void 915 bfreekva(struct buf *bp) 916 { 917 918 if (bp->b_kvasize == 0) 919 return; 920 921 atomic_add_int(&buffreekvacnt, 1); 922 atomic_subtract_long(&bufspace, bp->b_kvasize); 923 if ((bp->b_flags & B_UNMAPPED) == 0) { 924 BUF_CHECK_MAPPED(bp); 925 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, 926 bp->b_kvasize); 927 } else { 928 BUF_CHECK_UNMAPPED(bp); 929 if ((bp->b_flags & B_KVAALLOC) != 0) { 930 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvaalloc, 931 bp->b_kvasize); 932 } 933 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 934 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 935 } 936 bp->b_kvasize = 0; 937 bufspacewakeup(); 938 } 939 940 /* 941 * binsfree: 942 * 943 * Insert the buffer into the appropriate free list. 944 */ 945 static void 946 binsfree(struct buf *bp, int qindex) 947 { 948 struct mtx *olock, *nlock; 949 950 BUF_ASSERT_XLOCKED(bp); 951 952 olock = bqlock(bp->b_qindex); 953 nlock = bqlock(qindex); 954 mtx_lock(olock); 955 /* Handle delayed bremfree() processing. */ 956 if (bp->b_flags & B_REMFREE) 957 bremfreel(bp); 958 959 if (bp->b_qindex != QUEUE_NONE) 960 panic("binsfree: free buffer onto another queue???"); 961 962 bp->b_qindex = qindex; 963 if (olock != nlock) { 964 mtx_unlock(olock); 965 mtx_lock(nlock); 966 } 967 if (bp->b_flags & B_AGE) 968 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 969 else 970 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 971 #ifdef INVARIANTS 972 bq_len[bp->b_qindex]++; 973 #endif 974 mtx_unlock(nlock); 975 976 /* 977 * Something we can maybe free or reuse. 978 */ 979 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 980 bufspacewakeup(); 981 982 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 983 bufcountadd(bp); 984 } 985 986 /* 987 * bremfree: 988 * 989 * Mark the buffer for removal from the appropriate free list. 990 * 991 */ 992 void 993 bremfree(struct buf *bp) 994 { 995 996 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 997 KASSERT((bp->b_flags & B_REMFREE) == 0, 998 ("bremfree: buffer %p already marked for delayed removal.", bp)); 999 KASSERT(bp->b_qindex != QUEUE_NONE, 1000 ("bremfree: buffer %p not on a queue.", bp)); 1001 BUF_ASSERT_XLOCKED(bp); 1002 1003 bp->b_flags |= B_REMFREE; 1004 bufcountsub(bp); 1005 } 1006 1007 /* 1008 * bremfreef: 1009 * 1010 * Force an immediate removal from a free list. Used only in nfs when 1011 * it abuses the b_freelist pointer. 1012 */ 1013 void 1014 bremfreef(struct buf *bp) 1015 { 1016 struct mtx *qlock; 1017 1018 qlock = bqlock(bp->b_qindex); 1019 mtx_lock(qlock); 1020 bremfreel(bp); 1021 mtx_unlock(qlock); 1022 } 1023 1024 /* 1025 * bremfreel: 1026 * 1027 * Removes a buffer from the free list, must be called with the 1028 * correct qlock held. 1029 */ 1030 static void 1031 bremfreel(struct buf *bp) 1032 { 1033 1034 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", 1035 bp, bp->b_vp, bp->b_flags); 1036 KASSERT(bp->b_qindex != QUEUE_NONE, 1037 ("bremfreel: buffer %p not on a queue.", bp)); 1038 BUF_ASSERT_XLOCKED(bp); 1039 mtx_assert(bqlock(bp->b_qindex), MA_OWNED); 1040 1041 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 1042 #ifdef INVARIANTS 1043 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow", 1044 bp->b_qindex)); 1045 bq_len[bp->b_qindex]--; 1046 #endif 1047 bp->b_qindex = QUEUE_NONE; 1048 /* 1049 * If this was a delayed bremfree() we only need to remove the buffer 1050 * from the queue and return the stats are already done. 1051 */ 1052 if (bp->b_flags & B_REMFREE) { 1053 bp->b_flags &= ~B_REMFREE; 1054 return; 1055 } 1056 bufcountsub(bp); 1057 } 1058 1059 /* 1060 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must 1061 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set, 1062 * the buffer is valid and we do not have to do anything. 1063 */ 1064 void 1065 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, 1066 int cnt, struct ucred * cred) 1067 { 1068 struct buf *rabp; 1069 int i; 1070 1071 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 1072 if (inmem(vp, *rablkno)) 1073 continue; 1074 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0); 1075 1076 if ((rabp->b_flags & B_CACHE) == 0) { 1077 if (!TD_IS_IDLETHREAD(curthread)) 1078 curthread->td_ru.ru_inblock++; 1079 rabp->b_flags |= B_ASYNC; 1080 rabp->b_flags &= ~B_INVAL; 1081 rabp->b_ioflags &= ~BIO_ERROR; 1082 rabp->b_iocmd = BIO_READ; 1083 if (rabp->b_rcred == NOCRED && cred != NOCRED) 1084 rabp->b_rcred = crhold(cred); 1085 vfs_busy_pages(rabp, 0); 1086 BUF_KERNPROC(rabp); 1087 rabp->b_iooffset = dbtob(rabp->b_blkno); 1088 bstrategy(rabp); 1089 } else { 1090 brelse(rabp); 1091 } 1092 } 1093 } 1094 1095 /* 1096 * Entry point for bread() and breadn() via #defines in sys/buf.h. 1097 * 1098 * Get a buffer with the specified data. Look in the cache first. We 1099 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 1100 * is set, the buffer is valid and we do not have to do anything, see 1101 * getblk(). Also starts asynchronous I/O on read-ahead blocks. 1102 */ 1103 int 1104 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, 1105 int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp) 1106 { 1107 struct buf *bp; 1108 int rv = 0, readwait = 0; 1109 1110 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size); 1111 /* 1112 * Can only return NULL if GB_LOCK_NOWAIT flag is specified. 1113 */ 1114 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags); 1115 if (bp == NULL) 1116 return (EBUSY); 1117 1118 /* if not found in cache, do some I/O */ 1119 if ((bp->b_flags & B_CACHE) == 0) { 1120 if (!TD_IS_IDLETHREAD(curthread)) 1121 curthread->td_ru.ru_inblock++; 1122 bp->b_iocmd = BIO_READ; 1123 bp->b_flags &= ~B_INVAL; 1124 bp->b_ioflags &= ~BIO_ERROR; 1125 if (bp->b_rcred == NOCRED && cred != NOCRED) 1126 bp->b_rcred = crhold(cred); 1127 vfs_busy_pages(bp, 0); 1128 bp->b_iooffset = dbtob(bp->b_blkno); 1129 bstrategy(bp); 1130 ++readwait; 1131 } 1132 1133 breada(vp, rablkno, rabsize, cnt, cred); 1134 1135 if (readwait) { 1136 rv = bufwait(bp); 1137 } 1138 return (rv); 1139 } 1140 1141 /* 1142 * Write, release buffer on completion. (Done by iodone 1143 * if async). Do not bother writing anything if the buffer 1144 * is invalid. 1145 * 1146 * Note that we set B_CACHE here, indicating that buffer is 1147 * fully valid and thus cacheable. This is true even of NFS 1148 * now so we set it generally. This could be set either here 1149 * or in biodone() since the I/O is synchronous. We put it 1150 * here. 1151 */ 1152 int 1153 bufwrite(struct buf *bp) 1154 { 1155 int oldflags; 1156 struct vnode *vp; 1157 long space; 1158 int vp_md; 1159 1160 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1161 if (bp->b_flags & B_INVAL) { 1162 brelse(bp); 1163 return (0); 1164 } 1165 1166 if (bp->b_flags & B_BARRIER) 1167 barrierwrites++; 1168 1169 oldflags = bp->b_flags; 1170 1171 BUF_ASSERT_HELD(bp); 1172 1173 if (bp->b_pin_count > 0) 1174 bunpin_wait(bp); 1175 1176 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG), 1177 ("FFS background buffer should not get here %p", bp)); 1178 1179 vp = bp->b_vp; 1180 if (vp) 1181 vp_md = vp->v_vflag & VV_MD; 1182 else 1183 vp_md = 0; 1184 1185 /* 1186 * Mark the buffer clean. Increment the bufobj write count 1187 * before bundirty() call, to prevent other thread from seeing 1188 * empty dirty list and zero counter for writes in progress, 1189 * falsely indicating that the bufobj is clean. 1190 */ 1191 bufobj_wref(bp->b_bufobj); 1192 bundirty(bp); 1193 1194 bp->b_flags &= ~B_DONE; 1195 bp->b_ioflags &= ~BIO_ERROR; 1196 bp->b_flags |= B_CACHE; 1197 bp->b_iocmd = BIO_WRITE; 1198 1199 vfs_busy_pages(bp, 1); 1200 1201 /* 1202 * Normal bwrites pipeline writes 1203 */ 1204 bp->b_runningbufspace = bp->b_bufsize; 1205 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace); 1206 1207 if (!TD_IS_IDLETHREAD(curthread)) 1208 curthread->td_ru.ru_oublock++; 1209 if (oldflags & B_ASYNC) 1210 BUF_KERNPROC(bp); 1211 bp->b_iooffset = dbtob(bp->b_blkno); 1212 bstrategy(bp); 1213 1214 if ((oldflags & B_ASYNC) == 0) { 1215 int rtval = bufwait(bp); 1216 brelse(bp); 1217 return (rtval); 1218 } else if (space > hirunningspace) { 1219 /* 1220 * don't allow the async write to saturate the I/O 1221 * system. We will not deadlock here because 1222 * we are blocking waiting for I/O that is already in-progress 1223 * to complete. We do not block here if it is the update 1224 * or syncer daemon trying to clean up as that can lead 1225 * to deadlock. 1226 */ 1227 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md) 1228 waitrunningbufspace(); 1229 } 1230 1231 return (0); 1232 } 1233 1234 void 1235 bufbdflush(struct bufobj *bo, struct buf *bp) 1236 { 1237 struct buf *nbp; 1238 1239 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) { 1240 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); 1241 altbufferflushes++; 1242 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) { 1243 BO_LOCK(bo); 1244 /* 1245 * Try to find a buffer to flush. 1246 */ 1247 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 1248 if ((nbp->b_vflags & BV_BKGRDINPROG) || 1249 BUF_LOCK(nbp, 1250 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 1251 continue; 1252 if (bp == nbp) 1253 panic("bdwrite: found ourselves"); 1254 BO_UNLOCK(bo); 1255 /* Don't countdeps with the bo lock held. */ 1256 if (buf_countdeps(nbp, 0)) { 1257 BO_LOCK(bo); 1258 BUF_UNLOCK(nbp); 1259 continue; 1260 } 1261 if (nbp->b_flags & B_CLUSTEROK) { 1262 vfs_bio_awrite(nbp); 1263 } else { 1264 bremfree(nbp); 1265 bawrite(nbp); 1266 } 1267 dirtybufferflushes++; 1268 break; 1269 } 1270 if (nbp == NULL) 1271 BO_UNLOCK(bo); 1272 } 1273 } 1274 1275 /* 1276 * Delayed write. (Buffer is marked dirty). Do not bother writing 1277 * anything if the buffer is marked invalid. 1278 * 1279 * Note that since the buffer must be completely valid, we can safely 1280 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1281 * biodone() in order to prevent getblk from writing the buffer 1282 * out synchronously. 1283 */ 1284 void 1285 bdwrite(struct buf *bp) 1286 { 1287 struct thread *td = curthread; 1288 struct vnode *vp; 1289 struct bufobj *bo; 1290 1291 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1292 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1293 KASSERT((bp->b_flags & B_BARRIER) == 0, 1294 ("Barrier request in delayed write %p", bp)); 1295 BUF_ASSERT_HELD(bp); 1296 1297 if (bp->b_flags & B_INVAL) { 1298 brelse(bp); 1299 return; 1300 } 1301 1302 /* 1303 * If we have too many dirty buffers, don't create any more. 1304 * If we are wildly over our limit, then force a complete 1305 * cleanup. Otherwise, just keep the situation from getting 1306 * out of control. Note that we have to avoid a recursive 1307 * disaster and not try to clean up after our own cleanup! 1308 */ 1309 vp = bp->b_vp; 1310 bo = bp->b_bufobj; 1311 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) { 1312 td->td_pflags |= TDP_INBDFLUSH; 1313 BO_BDFLUSH(bo, bp); 1314 td->td_pflags &= ~TDP_INBDFLUSH; 1315 } else 1316 recursiveflushes++; 1317 1318 bdirty(bp); 1319 /* 1320 * Set B_CACHE, indicating that the buffer is fully valid. This is 1321 * true even of NFS now. 1322 */ 1323 bp->b_flags |= B_CACHE; 1324 1325 /* 1326 * This bmap keeps the system from needing to do the bmap later, 1327 * perhaps when the system is attempting to do a sync. Since it 1328 * is likely that the indirect block -- or whatever other datastructure 1329 * that the filesystem needs is still in memory now, it is a good 1330 * thing to do this. Note also, that if the pageout daemon is 1331 * requesting a sync -- there might not be enough memory to do 1332 * the bmap then... So, this is important to do. 1333 */ 1334 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { 1335 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1336 } 1337 1338 /* 1339 * Set the *dirty* buffer range based upon the VM system dirty 1340 * pages. 1341 * 1342 * Mark the buffer pages as clean. We need to do this here to 1343 * satisfy the vnode_pager and the pageout daemon, so that it 1344 * thinks that the pages have been "cleaned". Note that since 1345 * the pages are in a delayed write buffer -- the VFS layer 1346 * "will" see that the pages get written out on the next sync, 1347 * or perhaps the cluster will be completed. 1348 */ 1349 vfs_clean_pages_dirty_buf(bp); 1350 bqrelse(bp); 1351 1352 /* 1353 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1354 * due to the softdep code. 1355 */ 1356 } 1357 1358 /* 1359 * bdirty: 1360 * 1361 * Turn buffer into delayed write request. We must clear BIO_READ and 1362 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 1363 * itself to properly update it in the dirty/clean lists. We mark it 1364 * B_DONE to ensure that any asynchronization of the buffer properly 1365 * clears B_DONE ( else a panic will occur later ). 1366 * 1367 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 1368 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 1369 * should only be called if the buffer is known-good. 1370 * 1371 * Since the buffer is not on a queue, we do not update the numfreebuffers 1372 * count. 1373 * 1374 * The buffer must be on QUEUE_NONE. 1375 */ 1376 void 1377 bdirty(struct buf *bp) 1378 { 1379 1380 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", 1381 bp, bp->b_vp, bp->b_flags); 1382 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1383 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1384 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1385 BUF_ASSERT_HELD(bp); 1386 bp->b_flags &= ~(B_RELBUF); 1387 bp->b_iocmd = BIO_WRITE; 1388 1389 if ((bp->b_flags & B_DELWRI) == 0) { 1390 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; 1391 reassignbuf(bp); 1392 bdirtyadd(); 1393 } 1394 } 1395 1396 /* 1397 * bundirty: 1398 * 1399 * Clear B_DELWRI for buffer. 1400 * 1401 * Since the buffer is not on a queue, we do not update the numfreebuffers 1402 * count. 1403 * 1404 * The buffer must be on QUEUE_NONE. 1405 */ 1406 1407 void 1408 bundirty(struct buf *bp) 1409 { 1410 1411 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1412 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1413 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1414 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1415 BUF_ASSERT_HELD(bp); 1416 1417 if (bp->b_flags & B_DELWRI) { 1418 bp->b_flags &= ~B_DELWRI; 1419 reassignbuf(bp); 1420 bdirtysub(); 1421 } 1422 /* 1423 * Since it is now being written, we can clear its deferred write flag. 1424 */ 1425 bp->b_flags &= ~B_DEFERRED; 1426 } 1427 1428 /* 1429 * bawrite: 1430 * 1431 * Asynchronous write. Start output on a buffer, but do not wait for 1432 * it to complete. The buffer is released when the output completes. 1433 * 1434 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1435 * B_INVAL buffers. Not us. 1436 */ 1437 void 1438 bawrite(struct buf *bp) 1439 { 1440 1441 bp->b_flags |= B_ASYNC; 1442 (void) bwrite(bp); 1443 } 1444 1445 /* 1446 * babarrierwrite: 1447 * 1448 * Asynchronous barrier write. Start output on a buffer, but do not 1449 * wait for it to complete. Place a write barrier after this write so 1450 * that this buffer and all buffers written before it are committed to 1451 * the disk before any buffers written after this write are committed 1452 * to the disk. The buffer is released when the output completes. 1453 */ 1454 void 1455 babarrierwrite(struct buf *bp) 1456 { 1457 1458 bp->b_flags |= B_ASYNC | B_BARRIER; 1459 (void) bwrite(bp); 1460 } 1461 1462 /* 1463 * bbarrierwrite: 1464 * 1465 * Synchronous barrier write. Start output on a buffer and wait for 1466 * it to complete. Place a write barrier after this write so that 1467 * this buffer and all buffers written before it are committed to 1468 * the disk before any buffers written after this write are committed 1469 * to the disk. The buffer is released when the output completes. 1470 */ 1471 int 1472 bbarrierwrite(struct buf *bp) 1473 { 1474 1475 bp->b_flags |= B_BARRIER; 1476 return (bwrite(bp)); 1477 } 1478 1479 /* 1480 * bwillwrite: 1481 * 1482 * Called prior to the locking of any vnodes when we are expecting to 1483 * write. We do not want to starve the buffer cache with too many 1484 * dirty buffers so we block here. By blocking prior to the locking 1485 * of any vnodes we attempt to avoid the situation where a locked vnode 1486 * prevents the various system daemons from flushing related buffers. 1487 */ 1488 void 1489 bwillwrite(void) 1490 { 1491 1492 if (numdirtybuffers >= hidirtybuffers) { 1493 mtx_lock(&bdirtylock); 1494 while (numdirtybuffers >= hidirtybuffers) { 1495 bdirtywait = 1; 1496 msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4), 1497 "flswai", 0); 1498 } 1499 mtx_unlock(&bdirtylock); 1500 } 1501 } 1502 1503 /* 1504 * Return true if we have too many dirty buffers. 1505 */ 1506 int 1507 buf_dirty_count_severe(void) 1508 { 1509 1510 return(numdirtybuffers >= hidirtybuffers); 1511 } 1512 1513 static __noinline int 1514 buf_vm_page_count_severe(void) 1515 { 1516 1517 KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1); 1518 1519 return vm_page_count_severe(); 1520 } 1521 1522 /* 1523 * brelse: 1524 * 1525 * Release a busy buffer and, if requested, free its resources. The 1526 * buffer will be stashed in the appropriate bufqueue[] allowing it 1527 * to be accessed later as a cache entity or reused for other purposes. 1528 */ 1529 void 1530 brelse(struct buf *bp) 1531 { 1532 int qindex; 1533 1534 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X", 1535 bp, bp->b_vp, bp->b_flags); 1536 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1537 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1538 1539 if (BUF_LOCKRECURSED(bp)) { 1540 /* 1541 * Do not process, in particular, do not handle the 1542 * B_INVAL/B_RELBUF and do not release to free list. 1543 */ 1544 BUF_UNLOCK(bp); 1545 return; 1546 } 1547 1548 if (bp->b_flags & B_MANAGED) { 1549 bqrelse(bp); 1550 return; 1551 } 1552 1553 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && 1554 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) { 1555 /* 1556 * Failed write, redirty. Must clear BIO_ERROR to prevent 1557 * pages from being scrapped. If the error is anything 1558 * other than an I/O error (EIO), assume that retrying 1559 * is futile. 1560 */ 1561 bp->b_ioflags &= ~BIO_ERROR; 1562 bdirty(bp); 1563 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 1564 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { 1565 /* 1566 * Either a failed I/O or we were asked to free or not 1567 * cache the buffer. 1568 */ 1569 bp->b_flags |= B_INVAL; 1570 if (!LIST_EMPTY(&bp->b_dep)) 1571 buf_deallocate(bp); 1572 if (bp->b_flags & B_DELWRI) 1573 bdirtysub(); 1574 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1575 if ((bp->b_flags & B_VMIO) == 0) { 1576 if (bp->b_bufsize) 1577 allocbuf(bp, 0); 1578 if (bp->b_vp) 1579 brelvp(bp); 1580 } 1581 } 1582 1583 /* 1584 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1585 * is called with B_DELWRI set, the underlying pages may wind up 1586 * getting freed causing a previous write (bdwrite()) to get 'lost' 1587 * because pages associated with a B_DELWRI bp are marked clean. 1588 * 1589 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1590 * if B_DELWRI is set. 1591 * 1592 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1593 * on pages to return pages to the VM page queues. 1594 */ 1595 if (bp->b_flags & B_DELWRI) 1596 bp->b_flags &= ~B_RELBUF; 1597 else if (buf_vm_page_count_severe()) { 1598 /* 1599 * BKGRDINPROG can only be set with the buf and bufobj 1600 * locks both held. We tolerate a race to clear it here. 1601 */ 1602 if (!(bp->b_vflags & BV_BKGRDINPROG)) 1603 bp->b_flags |= B_RELBUF; 1604 } 1605 1606 /* 1607 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1608 * constituted, not even NFS buffers now. Two flags effect this. If 1609 * B_INVAL, the struct buf is invalidated but the VM object is kept 1610 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1611 * 1612 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 1613 * invalidated. BIO_ERROR cannot be set for a failed write unless the 1614 * buffer is also B_INVAL because it hits the re-dirtying code above. 1615 * 1616 * Normally we can do this whether a buffer is B_DELWRI or not. If 1617 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1618 * the commit state and we cannot afford to lose the buffer. If the 1619 * buffer has a background write in progress, we need to keep it 1620 * around to prevent it from being reconstituted and starting a second 1621 * background write. 1622 */ 1623 if ((bp->b_flags & B_VMIO) 1624 && !(bp->b_vp->v_mount != NULL && 1625 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1626 !vn_isdisk(bp->b_vp, NULL) && 1627 (bp->b_flags & B_DELWRI)) 1628 ) { 1629 1630 int i, j, resid; 1631 vm_page_t m; 1632 off_t foff; 1633 vm_pindex_t poff; 1634 vm_object_t obj; 1635 1636 obj = bp->b_bufobj->bo_object; 1637 1638 /* 1639 * Get the base offset and length of the buffer. Note that 1640 * in the VMIO case if the buffer block size is not 1641 * page-aligned then b_data pointer may not be page-aligned. 1642 * But our b_pages[] array *IS* page aligned. 1643 * 1644 * block sizes less then DEV_BSIZE (usually 512) are not 1645 * supported due to the page granularity bits (m->valid, 1646 * m->dirty, etc...). 1647 * 1648 * See man buf(9) for more information 1649 */ 1650 resid = bp->b_bufsize; 1651 foff = bp->b_offset; 1652 for (i = 0; i < bp->b_npages; i++) { 1653 int had_bogus = 0; 1654 1655 m = bp->b_pages[i]; 1656 1657 /* 1658 * If we hit a bogus page, fixup *all* the bogus pages 1659 * now. 1660 */ 1661 if (m == bogus_page) { 1662 poff = OFF_TO_IDX(bp->b_offset); 1663 had_bogus = 1; 1664 1665 VM_OBJECT_RLOCK(obj); 1666 for (j = i; j < bp->b_npages; j++) { 1667 vm_page_t mtmp; 1668 mtmp = bp->b_pages[j]; 1669 if (mtmp == bogus_page) { 1670 mtmp = vm_page_lookup(obj, poff + j); 1671 if (!mtmp) { 1672 panic("brelse: page missing\n"); 1673 } 1674 bp->b_pages[j] = mtmp; 1675 } 1676 } 1677 VM_OBJECT_RUNLOCK(obj); 1678 1679 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) { 1680 BUF_CHECK_MAPPED(bp); 1681 pmap_qenter( 1682 trunc_page((vm_offset_t)bp->b_data), 1683 bp->b_pages, bp->b_npages); 1684 } 1685 m = bp->b_pages[i]; 1686 } 1687 if ((bp->b_flags & B_NOCACHE) || 1688 (bp->b_ioflags & BIO_ERROR && 1689 bp->b_iocmd == BIO_READ)) { 1690 int poffset = foff & PAGE_MASK; 1691 int presid = resid > (PAGE_SIZE - poffset) ? 1692 (PAGE_SIZE - poffset) : resid; 1693 1694 KASSERT(presid >= 0, ("brelse: extra page")); 1695 VM_OBJECT_WLOCK(obj); 1696 while (vm_page_xbusied(m)) { 1697 vm_page_lock(m); 1698 VM_OBJECT_WUNLOCK(obj); 1699 vm_page_busy_sleep(m, "mbncsh"); 1700 VM_OBJECT_WLOCK(obj); 1701 } 1702 if (pmap_page_wired_mappings(m) == 0) 1703 vm_page_set_invalid(m, poffset, presid); 1704 VM_OBJECT_WUNLOCK(obj); 1705 if (had_bogus) 1706 printf("avoided corruption bug in bogus_page/brelse code\n"); 1707 } 1708 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1709 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1710 } 1711 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1712 vfs_vmio_release(bp); 1713 1714 } else if (bp->b_flags & B_VMIO) { 1715 1716 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1717 vfs_vmio_release(bp); 1718 } 1719 1720 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) { 1721 if (bp->b_bufsize != 0) 1722 allocbuf(bp, 0); 1723 if (bp->b_vp != NULL) 1724 brelvp(bp); 1725 } 1726 1727 /* 1728 * If the buffer has junk contents signal it and eventually 1729 * clean up B_DELWRI and diassociate the vnode so that gbincore() 1730 * doesn't find it. 1731 */ 1732 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || 1733 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) 1734 bp->b_flags |= B_INVAL; 1735 if (bp->b_flags & B_INVAL) { 1736 if (bp->b_flags & B_DELWRI) 1737 bundirty(bp); 1738 if (bp->b_vp) 1739 brelvp(bp); 1740 } 1741 1742 /* buffers with no memory */ 1743 if (bp->b_bufsize == 0) { 1744 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1745 if (bp->b_vflags & BV_BKGRDINPROG) 1746 panic("losing buffer 1"); 1747 if (bp->b_kvasize) 1748 qindex = QUEUE_EMPTYKVA; 1749 else 1750 qindex = QUEUE_EMPTY; 1751 bp->b_flags |= B_AGE; 1752 /* buffers with junk contents */ 1753 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || 1754 (bp->b_ioflags & BIO_ERROR)) { 1755 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1756 if (bp->b_vflags & BV_BKGRDINPROG) 1757 panic("losing buffer 2"); 1758 qindex = QUEUE_CLEAN; 1759 bp->b_flags |= B_AGE; 1760 /* remaining buffers */ 1761 } else if (bp->b_flags & B_DELWRI) 1762 qindex = QUEUE_DIRTY; 1763 else 1764 qindex = QUEUE_CLEAN; 1765 1766 binsfree(bp, qindex); 1767 1768 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT); 1769 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1770 panic("brelse: not dirty"); 1771 /* unlock */ 1772 BUF_UNLOCK(bp); 1773 } 1774 1775 /* 1776 * Release a buffer back to the appropriate queue but do not try to free 1777 * it. The buffer is expected to be used again soon. 1778 * 1779 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1780 * biodone() to requeue an async I/O on completion. It is also used when 1781 * known good buffers need to be requeued but we think we may need the data 1782 * again soon. 1783 * 1784 * XXX we should be able to leave the B_RELBUF hint set on completion. 1785 */ 1786 void 1787 bqrelse(struct buf *bp) 1788 { 1789 int qindex; 1790 1791 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1792 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1793 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1794 1795 if (BUF_LOCKRECURSED(bp)) { 1796 /* do not release to free list */ 1797 BUF_UNLOCK(bp); 1798 return; 1799 } 1800 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1801 1802 if (bp->b_flags & B_MANAGED) { 1803 if (bp->b_flags & B_REMFREE) 1804 bremfreef(bp); 1805 goto out; 1806 } 1807 1808 /* buffers with stale but valid contents */ 1809 if (bp->b_flags & B_DELWRI) { 1810 qindex = QUEUE_DIRTY; 1811 } else { 1812 if ((bp->b_flags & B_DELWRI) == 0 && 1813 (bp->b_xflags & BX_VNDIRTY)) 1814 panic("bqrelse: not dirty"); 1815 /* 1816 * BKGRDINPROG can only be set with the buf and bufobj 1817 * locks both held. We tolerate a race to clear it here. 1818 */ 1819 if (buf_vm_page_count_severe() && 1820 (bp->b_vflags & BV_BKGRDINPROG) == 0) { 1821 /* 1822 * We are too low on memory, we have to try to free 1823 * the buffer (most importantly: the wired pages 1824 * making up its backing store) *now*. 1825 */ 1826 brelse(bp); 1827 return; 1828 } 1829 qindex = QUEUE_CLEAN; 1830 } 1831 binsfree(bp, qindex); 1832 1833 out: 1834 /* unlock */ 1835 BUF_UNLOCK(bp); 1836 } 1837 1838 /* Give pages used by the bp back to the VM system (where possible) */ 1839 static void 1840 vfs_vmio_release(struct buf *bp) 1841 { 1842 int i; 1843 vm_page_t m; 1844 1845 if ((bp->b_flags & B_UNMAPPED) == 0) { 1846 BUF_CHECK_MAPPED(bp); 1847 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages); 1848 } else 1849 BUF_CHECK_UNMAPPED(bp); 1850 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 1851 for (i = 0; i < bp->b_npages; i++) { 1852 m = bp->b_pages[i]; 1853 bp->b_pages[i] = NULL; 1854 /* 1855 * In order to keep page LRU ordering consistent, put 1856 * everything on the inactive queue. 1857 */ 1858 vm_page_lock(m); 1859 vm_page_unwire(m, 0); 1860 1861 /* 1862 * Might as well free the page if we can and it has 1863 * no valid data. We also free the page if the 1864 * buffer was used for direct I/O 1865 */ 1866 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { 1867 if (m->wire_count == 0 && !vm_page_busied(m)) 1868 vm_page_free(m); 1869 } else if (bp->b_flags & B_DIRECT) 1870 vm_page_try_to_free(m); 1871 else if (buf_vm_page_count_severe()) 1872 vm_page_try_to_cache(m); 1873 vm_page_unlock(m); 1874 } 1875 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 1876 1877 if (bp->b_bufsize) { 1878 bufspacewakeup(); 1879 bp->b_bufsize = 0; 1880 } 1881 bp->b_npages = 0; 1882 bp->b_flags &= ~B_VMIO; 1883 if (bp->b_vp) 1884 brelvp(bp); 1885 } 1886 1887 /* 1888 * Check to see if a block at a particular lbn is available for a clustered 1889 * write. 1890 */ 1891 static int 1892 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) 1893 { 1894 struct buf *bpa; 1895 int match; 1896 1897 match = 0; 1898 1899 /* If the buf isn't in core skip it */ 1900 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) 1901 return (0); 1902 1903 /* If the buf is busy we don't want to wait for it */ 1904 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 1905 return (0); 1906 1907 /* Only cluster with valid clusterable delayed write buffers */ 1908 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != 1909 (B_DELWRI | B_CLUSTEROK)) 1910 goto done; 1911 1912 if (bpa->b_bufsize != size) 1913 goto done; 1914 1915 /* 1916 * Check to see if it is in the expected place on disk and that the 1917 * block has been mapped. 1918 */ 1919 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno)) 1920 match = 1; 1921 done: 1922 BUF_UNLOCK(bpa); 1923 return (match); 1924 } 1925 1926 /* 1927 * vfs_bio_awrite: 1928 * 1929 * Implement clustered async writes for clearing out B_DELWRI buffers. 1930 * This is much better then the old way of writing only one buffer at 1931 * a time. Note that we may not be presented with the buffers in the 1932 * correct order, so we search for the cluster in both directions. 1933 */ 1934 int 1935 vfs_bio_awrite(struct buf *bp) 1936 { 1937 struct bufobj *bo; 1938 int i; 1939 int j; 1940 daddr_t lblkno = bp->b_lblkno; 1941 struct vnode *vp = bp->b_vp; 1942 int ncl; 1943 int nwritten; 1944 int size; 1945 int maxcl; 1946 int gbflags; 1947 1948 bo = &vp->v_bufobj; 1949 gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 1950 /* 1951 * right now we support clustered writing only to regular files. If 1952 * we find a clusterable block we could be in the middle of a cluster 1953 * rather then at the beginning. 1954 */ 1955 if ((vp->v_type == VREG) && 1956 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1957 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1958 1959 size = vp->v_mount->mnt_stat.f_iosize; 1960 maxcl = MAXPHYS / size; 1961 1962 BO_RLOCK(bo); 1963 for (i = 1; i < maxcl; i++) 1964 if (vfs_bio_clcheck(vp, size, lblkno + i, 1965 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) 1966 break; 1967 1968 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 1969 if (vfs_bio_clcheck(vp, size, lblkno - j, 1970 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) 1971 break; 1972 BO_RUNLOCK(bo); 1973 --j; 1974 ncl = i + j; 1975 /* 1976 * this is a possible cluster write 1977 */ 1978 if (ncl != 1) { 1979 BUF_UNLOCK(bp); 1980 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl, 1981 gbflags); 1982 return (nwritten); 1983 } 1984 } 1985 bremfree(bp); 1986 bp->b_flags |= B_ASYNC; 1987 /* 1988 * default (old) behavior, writing out only one block 1989 * 1990 * XXX returns b_bufsize instead of b_bcount for nwritten? 1991 */ 1992 nwritten = bp->b_bufsize; 1993 (void) bwrite(bp); 1994 1995 return (nwritten); 1996 } 1997 1998 static void 1999 setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags) 2000 { 2001 2002 KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2003 bp->b_kvasize == 0, ("call bfreekva(%p)", bp)); 2004 if ((gbflags & GB_UNMAPPED) == 0) { 2005 bp->b_kvabase = (caddr_t)addr; 2006 } else if ((gbflags & GB_KVAALLOC) != 0) { 2007 KASSERT((gbflags & GB_UNMAPPED) != 0, 2008 ("GB_KVAALLOC without GB_UNMAPPED")); 2009 bp->b_kvaalloc = (caddr_t)addr; 2010 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2011 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2012 } 2013 bp->b_kvasize = maxsize; 2014 } 2015 2016 /* 2017 * Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if 2018 * needed. 2019 */ 2020 static int 2021 allocbufkva(struct buf *bp, int maxsize, int gbflags) 2022 { 2023 vm_offset_t addr; 2024 2025 bfreekva(bp); 2026 addr = 0; 2027 2028 if (vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr)) { 2029 /* 2030 * Buffer map is too fragmented. Request the caller 2031 * to defragment the map. 2032 */ 2033 atomic_add_int(&bufdefragcnt, 1); 2034 return (1); 2035 } 2036 setbufkva(bp, addr, maxsize, gbflags); 2037 atomic_add_long(&bufspace, bp->b_kvasize); 2038 return (0); 2039 } 2040 2041 /* 2042 * Ask the bufdaemon for help, or act as bufdaemon itself, when a 2043 * locked vnode is supplied. 2044 */ 2045 static void 2046 getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo, 2047 int defrag) 2048 { 2049 struct thread *td; 2050 char *waitmsg; 2051 int fl, flags, norunbuf; 2052 2053 mtx_assert(&bqclean, MA_OWNED); 2054 2055 if (defrag) { 2056 flags = VFS_BIO_NEED_BUFSPACE; 2057 waitmsg = "nbufkv"; 2058 } else if (bufspace >= hibufspace) { 2059 waitmsg = "nbufbs"; 2060 flags = VFS_BIO_NEED_BUFSPACE; 2061 } else { 2062 waitmsg = "newbuf"; 2063 flags = VFS_BIO_NEED_ANY; 2064 } 2065 mtx_lock(&nblock); 2066 needsbuffer |= flags; 2067 mtx_unlock(&nblock); 2068 mtx_unlock(&bqclean); 2069 2070 bd_speedup(); /* heeeelp */ 2071 if ((gbflags & GB_NOWAIT_BD) != 0) 2072 return; 2073 2074 td = curthread; 2075 mtx_lock(&nblock); 2076 while (needsbuffer & flags) { 2077 if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) { 2078 mtx_unlock(&nblock); 2079 /* 2080 * getblk() is called with a vnode locked, and 2081 * some majority of the dirty buffers may as 2082 * well belong to the vnode. Flushing the 2083 * buffers there would make a progress that 2084 * cannot be achieved by the buf_daemon, that 2085 * cannot lock the vnode. 2086 */ 2087 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) | 2088 (td->td_pflags & TDP_NORUNNINGBUF); 2089 /* play bufdaemon */ 2090 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF; 2091 fl = buf_flush(vp, flushbufqtarget); 2092 td->td_pflags &= norunbuf; 2093 mtx_lock(&nblock); 2094 if (fl != 0) 2095 continue; 2096 if ((needsbuffer & flags) == 0) 2097 break; 2098 } 2099 if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag, 2100 waitmsg, slptimeo)) 2101 break; 2102 } 2103 mtx_unlock(&nblock); 2104 } 2105 2106 static void 2107 getnewbuf_reuse_bp(struct buf *bp, int qindex) 2108 { 2109 2110 CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d " 2111 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags, 2112 bp->b_kvasize, bp->b_bufsize, qindex); 2113 mtx_assert(&bqclean, MA_NOTOWNED); 2114 2115 /* 2116 * Note: we no longer distinguish between VMIO and non-VMIO 2117 * buffers. 2118 */ 2119 KASSERT((bp->b_flags & B_DELWRI) == 0, 2120 ("delwri buffer %p found in queue %d", bp, qindex)); 2121 2122 if (qindex == QUEUE_CLEAN) { 2123 if (bp->b_flags & B_VMIO) { 2124 bp->b_flags &= ~B_ASYNC; 2125 vfs_vmio_release(bp); 2126 } 2127 if (bp->b_vp != NULL) 2128 brelvp(bp); 2129 } 2130 2131 /* 2132 * Get the rest of the buffer freed up. b_kva* is still valid 2133 * after this operation. 2134 */ 2135 2136 if (bp->b_rcred != NOCRED) { 2137 crfree(bp->b_rcred); 2138 bp->b_rcred = NOCRED; 2139 } 2140 if (bp->b_wcred != NOCRED) { 2141 crfree(bp->b_wcred); 2142 bp->b_wcred = NOCRED; 2143 } 2144 if (!LIST_EMPTY(&bp->b_dep)) 2145 buf_deallocate(bp); 2146 if (bp->b_vflags & BV_BKGRDINPROG) 2147 panic("losing buffer 3"); 2148 KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p. qindex: %d", 2149 bp, bp->b_vp, qindex)); 2150 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, 2151 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags)); 2152 2153 if (bp->b_bufsize) 2154 allocbuf(bp, 0); 2155 2156 bp->b_flags &= B_UNMAPPED | B_KVAALLOC; 2157 bp->b_ioflags = 0; 2158 bp->b_xflags = 0; 2159 KASSERT((bp->b_flags & B_INFREECNT) == 0, 2160 ("buf %p still counted as free?", bp)); 2161 bp->b_vflags = 0; 2162 bp->b_vp = NULL; 2163 bp->b_blkno = bp->b_lblkno = 0; 2164 bp->b_offset = NOOFFSET; 2165 bp->b_iodone = 0; 2166 bp->b_error = 0; 2167 bp->b_resid = 0; 2168 bp->b_bcount = 0; 2169 bp->b_npages = 0; 2170 bp->b_dirtyoff = bp->b_dirtyend = 0; 2171 bp->b_bufobj = NULL; 2172 bp->b_pin_count = 0; 2173 bp->b_fsprivate1 = NULL; 2174 bp->b_fsprivate2 = NULL; 2175 bp->b_fsprivate3 = NULL; 2176 2177 LIST_INIT(&bp->b_dep); 2178 } 2179 2180 static int flushingbufs; 2181 2182 static struct buf * 2183 getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata) 2184 { 2185 struct buf *bp, *nbp; 2186 int nqindex, qindex, pass; 2187 2188 KASSERT(!unmapped || !defrag, ("both unmapped and defrag")); 2189 2190 pass = 1; 2191 restart: 2192 atomic_add_int(&getnewbufrestarts, 1); 2193 2194 /* 2195 * Setup for scan. If we do not have enough free buffers, 2196 * we setup a degenerate case that immediately fails. Note 2197 * that if we are specially marked process, we are allowed to 2198 * dip into our reserves. 2199 * 2200 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 2201 * for the allocation of the mapped buffer. For unmapped, the 2202 * easiest is to start with EMPTY outright. 2203 * 2204 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 2205 * However, there are a number of cases (defragging, reusing, ...) 2206 * where we cannot backup. 2207 */ 2208 nbp = NULL; 2209 mtx_lock(&bqclean); 2210 if (!defrag && unmapped) { 2211 nqindex = QUEUE_EMPTY; 2212 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2213 } 2214 if (nbp == NULL) { 2215 nqindex = QUEUE_EMPTYKVA; 2216 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2217 } 2218 2219 /* 2220 * If no EMPTYKVA buffers and we are either defragging or 2221 * reusing, locate a CLEAN buffer to free or reuse. If 2222 * bufspace useage is low skip this step so we can allocate a 2223 * new buffer. 2224 */ 2225 if (nbp == NULL && (defrag || bufspace >= lobufspace)) { 2226 nqindex = QUEUE_CLEAN; 2227 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2228 } 2229 2230 /* 2231 * If we could not find or were not allowed to reuse a CLEAN 2232 * buffer, check to see if it is ok to use an EMPTY buffer. 2233 * We can only use an EMPTY buffer if allocating its KVA would 2234 * not otherwise run us out of buffer space. No KVA is needed 2235 * for the unmapped allocation. 2236 */ 2237 if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace || 2238 metadata)) { 2239 nqindex = QUEUE_EMPTY; 2240 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2241 } 2242 2243 /* 2244 * All available buffers might be clean, retry ignoring the 2245 * lobufspace as the last resort. 2246 */ 2247 if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) { 2248 nqindex = QUEUE_CLEAN; 2249 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2250 } 2251 2252 /* 2253 * Run scan, possibly freeing data and/or kva mappings on the fly 2254 * depending. 2255 */ 2256 while ((bp = nbp) != NULL) { 2257 qindex = nqindex; 2258 2259 /* 2260 * Calculate next bp (we can only use it if we do not 2261 * block or do other fancy things). 2262 */ 2263 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 2264 switch (qindex) { 2265 case QUEUE_EMPTY: 2266 nqindex = QUEUE_EMPTYKVA; 2267 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2268 if (nbp != NULL) 2269 break; 2270 /* FALLTHROUGH */ 2271 case QUEUE_EMPTYKVA: 2272 nqindex = QUEUE_CLEAN; 2273 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2274 if (nbp != NULL) 2275 break; 2276 /* FALLTHROUGH */ 2277 case QUEUE_CLEAN: 2278 if (metadata && pass == 1) { 2279 pass = 2; 2280 nqindex = QUEUE_EMPTY; 2281 nbp = TAILQ_FIRST( 2282 &bufqueues[QUEUE_EMPTY]); 2283 } 2284 /* 2285 * nbp is NULL. 2286 */ 2287 break; 2288 } 2289 } 2290 /* 2291 * If we are defragging then we need a buffer with 2292 * b_kvasize != 0. XXX this situation should no longer 2293 * occur, if defrag is non-zero the buffer's b_kvasize 2294 * should also be non-zero at this point. XXX 2295 */ 2296 if (defrag && bp->b_kvasize == 0) { 2297 printf("Warning: defrag empty buffer %p\n", bp); 2298 continue; 2299 } 2300 2301 /* 2302 * Start freeing the bp. This is somewhat involved. nbp 2303 * remains valid only for QUEUE_EMPTY[KVA] bp's. 2304 */ 2305 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2306 continue; 2307 /* 2308 * BKGRDINPROG can only be set with the buf and bufobj 2309 * locks both held. We tolerate a race to clear it here. 2310 */ 2311 if (bp->b_vflags & BV_BKGRDINPROG) { 2312 BUF_UNLOCK(bp); 2313 continue; 2314 } 2315 2316 KASSERT(bp->b_qindex == qindex, 2317 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2318 2319 bremfreel(bp); 2320 mtx_unlock(&bqclean); 2321 /* 2322 * NOTE: nbp is now entirely invalid. We can only restart 2323 * the scan from this point on. 2324 */ 2325 2326 getnewbuf_reuse_bp(bp, qindex); 2327 mtx_assert(&bqclean, MA_NOTOWNED); 2328 2329 /* 2330 * If we are defragging then free the buffer. 2331 */ 2332 if (defrag) { 2333 bp->b_flags |= B_INVAL; 2334 bfreekva(bp); 2335 brelse(bp); 2336 defrag = 0; 2337 goto restart; 2338 } 2339 2340 /* 2341 * Notify any waiters for the buffer lock about 2342 * identity change by freeing the buffer. 2343 */ 2344 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) { 2345 bp->b_flags |= B_INVAL; 2346 bfreekva(bp); 2347 brelse(bp); 2348 goto restart; 2349 } 2350 2351 if (metadata) 2352 break; 2353 2354 /* 2355 * If we are overcomitted then recover the buffer and its 2356 * KVM space. This occurs in rare situations when multiple 2357 * processes are blocked in getnewbuf() or allocbuf(). 2358 */ 2359 if (bufspace >= hibufspace) 2360 flushingbufs = 1; 2361 if (flushingbufs && bp->b_kvasize != 0) { 2362 bp->b_flags |= B_INVAL; 2363 bfreekva(bp); 2364 brelse(bp); 2365 goto restart; 2366 } 2367 if (bufspace < lobufspace) 2368 flushingbufs = 0; 2369 break; 2370 } 2371 return (bp); 2372 } 2373 2374 /* 2375 * getnewbuf: 2376 * 2377 * Find and initialize a new buffer header, freeing up existing buffers 2378 * in the bufqueues as necessary. The new buffer is returned locked. 2379 * 2380 * Important: B_INVAL is not set. If the caller wishes to throw the 2381 * buffer away, the caller must set B_INVAL prior to calling brelse(). 2382 * 2383 * We block if: 2384 * We have insufficient buffer headers 2385 * We have insufficient buffer space 2386 * buffer_arena is too fragmented ( space reservation fails ) 2387 * If we have to flush dirty buffers ( but we try to avoid this ) 2388 */ 2389 static struct buf * 2390 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, 2391 int gbflags) 2392 { 2393 struct buf *bp; 2394 int defrag, metadata; 2395 2396 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 2397 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 2398 if (!unmapped_buf_allowed) 2399 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC); 2400 2401 defrag = 0; 2402 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || 2403 vp->v_type == VCHR) 2404 metadata = 1; 2405 else 2406 metadata = 0; 2407 /* 2408 * We can't afford to block since we might be holding a vnode lock, 2409 * which may prevent system daemons from running. We deal with 2410 * low-memory situations by proactively returning memory and running 2411 * async I/O rather then sync I/O. 2412 */ 2413 atomic_add_int(&getnewbufcalls, 1); 2414 atomic_subtract_int(&getnewbufrestarts, 1); 2415 restart: 2416 bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED | 2417 GB_KVAALLOC)) == GB_UNMAPPED, metadata); 2418 if (bp != NULL) 2419 defrag = 0; 2420 2421 /* 2422 * If we exhausted our list, sleep as appropriate. We may have to 2423 * wakeup various daemons and write out some dirty buffers. 2424 * 2425 * Generally we are sleeping due to insufficient buffer space. 2426 */ 2427 if (bp == NULL) { 2428 mtx_assert(&bqclean, MA_OWNED); 2429 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag); 2430 mtx_assert(&bqclean, MA_NOTOWNED); 2431 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) { 2432 mtx_assert(&bqclean, MA_NOTOWNED); 2433 2434 bfreekva(bp); 2435 bp->b_flags |= B_UNMAPPED; 2436 bp->b_kvabase = bp->b_data = unmapped_buf; 2437 bp->b_kvasize = maxsize; 2438 atomic_add_long(&bufspace, bp->b_kvasize); 2439 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2440 atomic_add_int(&bufreusecnt, 1); 2441 } else { 2442 mtx_assert(&bqclean, MA_NOTOWNED); 2443 2444 /* 2445 * We finally have a valid bp. We aren't quite out of the 2446 * woods, we still have to reserve kva space. In order 2447 * to keep fragmentation sane we only allocate kva in 2448 * BKVASIZE chunks. 2449 */ 2450 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2451 2452 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED | 2453 B_KVAALLOC)) == B_UNMAPPED) { 2454 if (allocbufkva(bp, maxsize, gbflags)) { 2455 defrag = 1; 2456 bp->b_flags |= B_INVAL; 2457 brelse(bp); 2458 goto restart; 2459 } 2460 atomic_add_int(&bufreusecnt, 1); 2461 } else if ((bp->b_flags & B_KVAALLOC) != 0 && 2462 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) { 2463 /* 2464 * If the reused buffer has KVA allocated, 2465 * reassign b_kvaalloc to b_kvabase. 2466 */ 2467 bp->b_kvabase = bp->b_kvaalloc; 2468 bp->b_flags &= ~B_KVAALLOC; 2469 atomic_subtract_long(&unmapped_bufspace, 2470 bp->b_kvasize); 2471 atomic_add_int(&bufreusecnt, 1); 2472 } else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2473 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED | 2474 GB_KVAALLOC)) { 2475 /* 2476 * The case of reused buffer already have KVA 2477 * mapped, but the request is for unmapped 2478 * buffer with KVA allocated. 2479 */ 2480 bp->b_kvaalloc = bp->b_kvabase; 2481 bp->b_data = bp->b_kvabase = unmapped_buf; 2482 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2483 atomic_add_long(&unmapped_bufspace, 2484 bp->b_kvasize); 2485 atomic_add_int(&bufreusecnt, 1); 2486 } 2487 if ((gbflags & GB_UNMAPPED) == 0) { 2488 bp->b_saveaddr = bp->b_kvabase; 2489 bp->b_data = bp->b_saveaddr; 2490 bp->b_flags &= ~B_UNMAPPED; 2491 BUF_CHECK_MAPPED(bp); 2492 } 2493 } 2494 return (bp); 2495 } 2496 2497 /* 2498 * buf_daemon: 2499 * 2500 * buffer flushing daemon. Buffers are normally flushed by the 2501 * update daemon but if it cannot keep up this process starts to 2502 * take the load in an attempt to prevent getnewbuf() from blocking. 2503 */ 2504 2505 static struct kproc_desc buf_kp = { 2506 "bufdaemon", 2507 buf_daemon, 2508 &bufdaemonproc 2509 }; 2510 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp); 2511 2512 static int 2513 buf_flush(struct vnode *vp, int target) 2514 { 2515 int flushed; 2516 2517 flushed = flushbufqueues(vp, target, 0); 2518 if (flushed == 0) { 2519 /* 2520 * Could not find any buffers without rollback 2521 * dependencies, so just write the first one 2522 * in the hopes of eventually making progress. 2523 */ 2524 if (vp != NULL && target > 2) 2525 target /= 2; 2526 flushbufqueues(vp, target, 1); 2527 } 2528 return (flushed); 2529 } 2530 2531 static void 2532 buf_daemon() 2533 { 2534 int lodirty; 2535 2536 /* 2537 * This process needs to be suspended prior to shutdown sync. 2538 */ 2539 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc, 2540 SHUTDOWN_PRI_LAST); 2541 2542 /* 2543 * This process is allowed to take the buffer cache to the limit 2544 */ 2545 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED; 2546 mtx_lock(&bdlock); 2547 for (;;) { 2548 bd_request = 0; 2549 mtx_unlock(&bdlock); 2550 2551 kproc_suspend_check(bufdaemonproc); 2552 lodirty = lodirtybuffers; 2553 if (bd_speedupreq) { 2554 lodirty = numdirtybuffers / 2; 2555 bd_speedupreq = 0; 2556 } 2557 /* 2558 * Do the flush. Limit the amount of in-transit I/O we 2559 * allow to build up, otherwise we would completely saturate 2560 * the I/O system. 2561 */ 2562 while (numdirtybuffers > lodirty) { 2563 if (buf_flush(NULL, numdirtybuffers - lodirty) == 0) 2564 break; 2565 kern_yield(PRI_USER); 2566 } 2567 2568 /* 2569 * Only clear bd_request if we have reached our low water 2570 * mark. The buf_daemon normally waits 1 second and 2571 * then incrementally flushes any dirty buffers that have 2572 * built up, within reason. 2573 * 2574 * If we were unable to hit our low water mark and couldn't 2575 * find any flushable buffers, we sleep for a short period 2576 * to avoid endless loops on unlockable buffers. 2577 */ 2578 mtx_lock(&bdlock); 2579 if (numdirtybuffers <= lodirtybuffers) { 2580 /* 2581 * We reached our low water mark, reset the 2582 * request and sleep until we are needed again. 2583 * The sleep is just so the suspend code works. 2584 */ 2585 bd_request = 0; 2586 /* 2587 * Do an extra wakeup in case dirty threshold 2588 * changed via sysctl and the explicit transition 2589 * out of shortfall was missed. 2590 */ 2591 bdirtywakeup(); 2592 if (runningbufspace <= lorunningspace) 2593 runningwakeup(); 2594 msleep(&bd_request, &bdlock, PVM, "psleep", hz); 2595 } else { 2596 /* 2597 * We couldn't find any flushable dirty buffers but 2598 * still have too many dirty buffers, we 2599 * have to sleep and try again. (rare) 2600 */ 2601 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10); 2602 } 2603 } 2604 } 2605 2606 /* 2607 * flushbufqueues: 2608 * 2609 * Try to flush a buffer in the dirty queue. We must be careful to 2610 * free up B_INVAL buffers instead of write them, which NFS is 2611 * particularly sensitive to. 2612 */ 2613 static int flushwithdeps = 0; 2614 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps, 2615 0, "Number of buffers flushed with dependecies that require rollbacks"); 2616 2617 static int 2618 flushbufqueues(struct vnode *lvp, int target, int flushdeps) 2619 { 2620 struct buf *sentinel; 2621 struct vnode *vp; 2622 struct mount *mp; 2623 struct buf *bp; 2624 int hasdeps; 2625 int flushed; 2626 int queue; 2627 2628 flushed = 0; 2629 queue = QUEUE_DIRTY; 2630 bp = NULL; 2631 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO); 2632 sentinel->b_qindex = QUEUE_SENTINEL; 2633 mtx_lock(&bqdirty); 2634 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist); 2635 while (flushed != target) { 2636 bp = TAILQ_NEXT(sentinel, b_freelist); 2637 if (bp != NULL) { 2638 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2639 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel, 2640 b_freelist); 2641 } else 2642 break; 2643 /* 2644 * Skip sentinels inserted by other invocations of the 2645 * flushbufqueues(), taking care to not reorder them. 2646 */ 2647 if (bp->b_qindex == QUEUE_SENTINEL) 2648 continue; 2649 /* 2650 * Only flush the buffers that belong to the 2651 * vnode locked by the curthread. 2652 */ 2653 if (lvp != NULL && bp->b_vp != lvp) 2654 continue; 2655 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2656 continue; 2657 if (bp->b_pin_count > 0) { 2658 BUF_UNLOCK(bp); 2659 continue; 2660 } 2661 /* 2662 * BKGRDINPROG can only be set with the buf and bufobj 2663 * locks both held. We tolerate a race to clear it here. 2664 */ 2665 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || 2666 (bp->b_flags & B_DELWRI) == 0) { 2667 BUF_UNLOCK(bp); 2668 continue; 2669 } 2670 if (bp->b_flags & B_INVAL) { 2671 bremfreel(bp); 2672 mtx_unlock(&bqdirty); 2673 brelse(bp); 2674 flushed++; 2675 mtx_lock(&bqdirty); 2676 continue; 2677 } 2678 2679 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { 2680 if (flushdeps == 0) { 2681 BUF_UNLOCK(bp); 2682 continue; 2683 } 2684 hasdeps = 1; 2685 } else 2686 hasdeps = 0; 2687 /* 2688 * We must hold the lock on a vnode before writing 2689 * one of its buffers. Otherwise we may confuse, or 2690 * in the case of a snapshot vnode, deadlock the 2691 * system. 2692 * 2693 * The lock order here is the reverse of the normal 2694 * of vnode followed by buf lock. This is ok because 2695 * the NOWAIT will prevent deadlock. 2696 */ 2697 vp = bp->b_vp; 2698 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2699 BUF_UNLOCK(bp); 2700 continue; 2701 } 2702 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) { 2703 mtx_unlock(&bqdirty); 2704 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X", 2705 bp, bp->b_vp, bp->b_flags); 2706 if (curproc == bufdaemonproc) 2707 vfs_bio_awrite(bp); 2708 else { 2709 bremfree(bp); 2710 bwrite(bp); 2711 notbufdflushes++; 2712 } 2713 vn_finished_write(mp); 2714 VOP_UNLOCK(vp, 0); 2715 flushwithdeps += hasdeps; 2716 flushed++; 2717 2718 /* 2719 * Sleeping on runningbufspace while holding 2720 * vnode lock leads to deadlock. 2721 */ 2722 if (curproc == bufdaemonproc && 2723 runningbufspace > hirunningspace) 2724 waitrunningbufspace(); 2725 mtx_lock(&bqdirty); 2726 continue; 2727 } 2728 vn_finished_write(mp); 2729 BUF_UNLOCK(bp); 2730 } 2731 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2732 mtx_unlock(&bqdirty); 2733 free(sentinel, M_TEMP); 2734 return (flushed); 2735 } 2736 2737 /* 2738 * Check to see if a block is currently memory resident. 2739 */ 2740 struct buf * 2741 incore(struct bufobj *bo, daddr_t blkno) 2742 { 2743 struct buf *bp; 2744 2745 BO_RLOCK(bo); 2746 bp = gbincore(bo, blkno); 2747 BO_RUNLOCK(bo); 2748 return (bp); 2749 } 2750 2751 /* 2752 * Returns true if no I/O is needed to access the 2753 * associated VM object. This is like incore except 2754 * it also hunts around in the VM system for the data. 2755 */ 2756 2757 static int 2758 inmem(struct vnode * vp, daddr_t blkno) 2759 { 2760 vm_object_t obj; 2761 vm_offset_t toff, tinc, size; 2762 vm_page_t m; 2763 vm_ooffset_t off; 2764 2765 ASSERT_VOP_LOCKED(vp, "inmem"); 2766 2767 if (incore(&vp->v_bufobj, blkno)) 2768 return 1; 2769 if (vp->v_mount == NULL) 2770 return 0; 2771 obj = vp->v_object; 2772 if (obj == NULL) 2773 return (0); 2774 2775 size = PAGE_SIZE; 2776 if (size > vp->v_mount->mnt_stat.f_iosize) 2777 size = vp->v_mount->mnt_stat.f_iosize; 2778 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2779 2780 VM_OBJECT_RLOCK(obj); 2781 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2782 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2783 if (!m) 2784 goto notinmem; 2785 tinc = size; 2786 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2787 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2788 if (vm_page_is_valid(m, 2789 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2790 goto notinmem; 2791 } 2792 VM_OBJECT_RUNLOCK(obj); 2793 return 1; 2794 2795 notinmem: 2796 VM_OBJECT_RUNLOCK(obj); 2797 return (0); 2798 } 2799 2800 /* 2801 * Set the dirty range for a buffer based on the status of the dirty 2802 * bits in the pages comprising the buffer. The range is limited 2803 * to the size of the buffer. 2804 * 2805 * Tell the VM system that the pages associated with this buffer 2806 * are clean. This is used for delayed writes where the data is 2807 * going to go to disk eventually without additional VM intevention. 2808 * 2809 * Note that while we only really need to clean through to b_bcount, we 2810 * just go ahead and clean through to b_bufsize. 2811 */ 2812 static void 2813 vfs_clean_pages_dirty_buf(struct buf *bp) 2814 { 2815 vm_ooffset_t foff, noff, eoff; 2816 vm_page_t m; 2817 int i; 2818 2819 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0) 2820 return; 2821 2822 foff = bp->b_offset; 2823 KASSERT(bp->b_offset != NOOFFSET, 2824 ("vfs_clean_pages_dirty_buf: no buffer offset")); 2825 2826 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 2827 vfs_drain_busy_pages(bp); 2828 vfs_setdirty_locked_object(bp); 2829 for (i = 0; i < bp->b_npages; i++) { 2830 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 2831 eoff = noff; 2832 if (eoff > bp->b_offset + bp->b_bufsize) 2833 eoff = bp->b_offset + bp->b_bufsize; 2834 m = bp->b_pages[i]; 2835 vfs_page_set_validclean(bp, foff, m); 2836 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2837 foff = noff; 2838 } 2839 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 2840 } 2841 2842 static void 2843 vfs_setdirty_locked_object(struct buf *bp) 2844 { 2845 vm_object_t object; 2846 int i; 2847 2848 object = bp->b_bufobj->bo_object; 2849 VM_OBJECT_ASSERT_WLOCKED(object); 2850 2851 /* 2852 * We qualify the scan for modified pages on whether the 2853 * object has been flushed yet. 2854 */ 2855 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) { 2856 vm_offset_t boffset; 2857 vm_offset_t eoffset; 2858 2859 /* 2860 * test the pages to see if they have been modified directly 2861 * by users through the VM system. 2862 */ 2863 for (i = 0; i < bp->b_npages; i++) 2864 vm_page_test_dirty(bp->b_pages[i]); 2865 2866 /* 2867 * Calculate the encompassing dirty range, boffset and eoffset, 2868 * (eoffset - boffset) bytes. 2869 */ 2870 2871 for (i = 0; i < bp->b_npages; i++) { 2872 if (bp->b_pages[i]->dirty) 2873 break; 2874 } 2875 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2876 2877 for (i = bp->b_npages - 1; i >= 0; --i) { 2878 if (bp->b_pages[i]->dirty) { 2879 break; 2880 } 2881 } 2882 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2883 2884 /* 2885 * Fit it to the buffer. 2886 */ 2887 2888 if (eoffset > bp->b_bcount) 2889 eoffset = bp->b_bcount; 2890 2891 /* 2892 * If we have a good dirty range, merge with the existing 2893 * dirty range. 2894 */ 2895 2896 if (boffset < eoffset) { 2897 if (bp->b_dirtyoff > boffset) 2898 bp->b_dirtyoff = boffset; 2899 if (bp->b_dirtyend < eoffset) 2900 bp->b_dirtyend = eoffset; 2901 } 2902 } 2903 } 2904 2905 /* 2906 * Allocate the KVA mapping for an existing buffer. It handles the 2907 * cases of both B_UNMAPPED buffer, and buffer with the preallocated 2908 * KVA which is not mapped (B_KVAALLOC). 2909 */ 2910 static void 2911 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags) 2912 { 2913 struct buf *scratch_bp; 2914 int bsize, maxsize, need_mapping, need_kva; 2915 off_t offset; 2916 2917 need_mapping = (bp->b_flags & B_UNMAPPED) != 0 && 2918 (gbflags & GB_UNMAPPED) == 0; 2919 need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED && 2920 (gbflags & GB_KVAALLOC) != 0; 2921 if (!need_mapping && !need_kva) 2922 return; 2923 2924 BUF_CHECK_UNMAPPED(bp); 2925 2926 if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) { 2927 /* 2928 * Buffer is not mapped, but the KVA was already 2929 * reserved at the time of the instantiation. Use the 2930 * allocated space. 2931 */ 2932 bp->b_flags &= ~B_KVAALLOC; 2933 KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0")); 2934 bp->b_kvabase = bp->b_kvaalloc; 2935 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 2936 goto has_addr; 2937 } 2938 2939 /* 2940 * Calculate the amount of the address space we would reserve 2941 * if the buffer was mapped. 2942 */ 2943 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize; 2944 offset = blkno * bsize; 2945 maxsize = size + (offset & PAGE_MASK); 2946 maxsize = imax(maxsize, bsize); 2947 2948 mapping_loop: 2949 if (allocbufkva(bp, maxsize, gbflags)) { 2950 /* 2951 * Request defragmentation. getnewbuf() returns us the 2952 * allocated space by the scratch buffer KVA. 2953 */ 2954 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags | 2955 (GB_UNMAPPED | GB_KVAALLOC)); 2956 if (scratch_bp == NULL) { 2957 if ((gbflags & GB_NOWAIT_BD) != 0) { 2958 /* 2959 * XXXKIB: defragmentation cannot 2960 * succeed, not sure what else to do. 2961 */ 2962 panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp); 2963 } 2964 atomic_add_int(&mappingrestarts, 1); 2965 goto mapping_loop; 2966 } 2967 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0, 2968 ("scratch bp !B_KVAALLOC %p", scratch_bp)); 2969 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc, 2970 scratch_bp->b_kvasize, gbflags); 2971 2972 /* Get rid of the scratch buffer. */ 2973 scratch_bp->b_kvasize = 0; 2974 scratch_bp->b_flags |= B_INVAL; 2975 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 2976 brelse(scratch_bp); 2977 } 2978 if (!need_mapping) 2979 return; 2980 2981 has_addr: 2982 bp->b_saveaddr = bp->b_kvabase; 2983 bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */ 2984 bp->b_flags &= ~B_UNMAPPED; 2985 BUF_CHECK_MAPPED(bp); 2986 bpmap_qenter(bp); 2987 } 2988 2989 /* 2990 * getblk: 2991 * 2992 * Get a block given a specified block and offset into a file/device. 2993 * The buffers B_DONE bit will be cleared on return, making it almost 2994 * ready for an I/O initiation. B_INVAL may or may not be set on 2995 * return. The caller should clear B_INVAL prior to initiating a 2996 * READ. 2997 * 2998 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2999 * an existing buffer. 3000 * 3001 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 3002 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 3003 * and then cleared based on the backing VM. If the previous buffer is 3004 * non-0-sized but invalid, B_CACHE will be cleared. 3005 * 3006 * If getblk() must create a new buffer, the new buffer is returned with 3007 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 3008 * case it is returned with B_INVAL clear and B_CACHE set based on the 3009 * backing VM. 3010 * 3011 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 3012 * B_CACHE bit is clear. 3013 * 3014 * What this means, basically, is that the caller should use B_CACHE to 3015 * determine whether the buffer is fully valid or not and should clear 3016 * B_INVAL prior to issuing a read. If the caller intends to validate 3017 * the buffer by loading its data area with something, the caller needs 3018 * to clear B_INVAL. If the caller does this without issuing an I/O, 3019 * the caller should set B_CACHE ( as an optimization ), else the caller 3020 * should issue the I/O and biodone() will set B_CACHE if the I/O was 3021 * a write attempt or if it was a successfull read. If the caller 3022 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 3023 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 3024 */ 3025 struct buf * 3026 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, 3027 int flags) 3028 { 3029 struct buf *bp; 3030 struct bufobj *bo; 3031 int bsize, error, maxsize, vmio; 3032 off_t offset; 3033 3034 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); 3035 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 3036 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 3037 ASSERT_VOP_LOCKED(vp, "getblk"); 3038 if (size > MAXBSIZE) 3039 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 3040 if (!unmapped_buf_allowed) 3041 flags &= ~(GB_UNMAPPED | GB_KVAALLOC); 3042 3043 bo = &vp->v_bufobj; 3044 loop: 3045 BO_RLOCK(bo); 3046 bp = gbincore(bo, blkno); 3047 if (bp != NULL) { 3048 int lockflags; 3049 /* 3050 * Buffer is in-core. If the buffer is not busy nor managed, 3051 * it must be on a queue. 3052 */ 3053 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; 3054 3055 if (flags & GB_LOCK_NOWAIT) 3056 lockflags |= LK_NOWAIT; 3057 3058 error = BUF_TIMELOCK(bp, lockflags, 3059 BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); 3060 3061 /* 3062 * If we slept and got the lock we have to restart in case 3063 * the buffer changed identities. 3064 */ 3065 if (error == ENOLCK) 3066 goto loop; 3067 /* We timed out or were interrupted. */ 3068 else if (error) 3069 return (NULL); 3070 /* If recursed, assume caller knows the rules. */ 3071 else if (BUF_LOCKRECURSED(bp)) 3072 goto end; 3073 3074 /* 3075 * The buffer is locked. B_CACHE is cleared if the buffer is 3076 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 3077 * and for a VMIO buffer B_CACHE is adjusted according to the 3078 * backing VM cache. 3079 */ 3080 if (bp->b_flags & B_INVAL) 3081 bp->b_flags &= ~B_CACHE; 3082 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 3083 bp->b_flags |= B_CACHE; 3084 if (bp->b_flags & B_MANAGED) 3085 MPASS(bp->b_qindex == QUEUE_NONE); 3086 else 3087 bremfree(bp); 3088 3089 /* 3090 * check for size inconsistencies for non-VMIO case. 3091 */ 3092 if (bp->b_bcount != size) { 3093 if ((bp->b_flags & B_VMIO) == 0 || 3094 (size > bp->b_kvasize)) { 3095 if (bp->b_flags & B_DELWRI) { 3096 /* 3097 * If buffer is pinned and caller does 3098 * not want sleep waiting for it to be 3099 * unpinned, bail out 3100 * */ 3101 if (bp->b_pin_count > 0) { 3102 if (flags & GB_LOCK_NOWAIT) { 3103 bqrelse(bp); 3104 return (NULL); 3105 } else { 3106 bunpin_wait(bp); 3107 } 3108 } 3109 bp->b_flags |= B_NOCACHE; 3110 bwrite(bp); 3111 } else { 3112 if (LIST_EMPTY(&bp->b_dep)) { 3113 bp->b_flags |= B_RELBUF; 3114 brelse(bp); 3115 } else { 3116 bp->b_flags |= B_NOCACHE; 3117 bwrite(bp); 3118 } 3119 } 3120 goto loop; 3121 } 3122 } 3123 3124 /* 3125 * Handle the case of unmapped buffer which should 3126 * become mapped, or the buffer for which KVA 3127 * reservation is requested. 3128 */ 3129 bp_unmapped_get_kva(bp, blkno, size, flags); 3130 3131 /* 3132 * If the size is inconsistant in the VMIO case, we can resize 3133 * the buffer. This might lead to B_CACHE getting set or 3134 * cleared. If the size has not changed, B_CACHE remains 3135 * unchanged from its previous state. 3136 */ 3137 if (bp->b_bcount != size) 3138 allocbuf(bp, size); 3139 3140 KASSERT(bp->b_offset != NOOFFSET, 3141 ("getblk: no buffer offset")); 3142 3143 /* 3144 * A buffer with B_DELWRI set and B_CACHE clear must 3145 * be committed before we can return the buffer in 3146 * order to prevent the caller from issuing a read 3147 * ( due to B_CACHE not being set ) and overwriting 3148 * it. 3149 * 3150 * Most callers, including NFS and FFS, need this to 3151 * operate properly either because they assume they 3152 * can issue a read if B_CACHE is not set, or because 3153 * ( for example ) an uncached B_DELWRI might loop due 3154 * to softupdates re-dirtying the buffer. In the latter 3155 * case, B_CACHE is set after the first write completes, 3156 * preventing further loops. 3157 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 3158 * above while extending the buffer, we cannot allow the 3159 * buffer to remain with B_CACHE set after the write 3160 * completes or it will represent a corrupt state. To 3161 * deal with this we set B_NOCACHE to scrap the buffer 3162 * after the write. 3163 * 3164 * We might be able to do something fancy, like setting 3165 * B_CACHE in bwrite() except if B_DELWRI is already set, 3166 * so the below call doesn't set B_CACHE, but that gets real 3167 * confusing. This is much easier. 3168 */ 3169 3170 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 3171 bp->b_flags |= B_NOCACHE; 3172 bwrite(bp); 3173 goto loop; 3174 } 3175 bp->b_flags &= ~B_DONE; 3176 } else { 3177 /* 3178 * Buffer is not in-core, create new buffer. The buffer 3179 * returned by getnewbuf() is locked. Note that the returned 3180 * buffer is also considered valid (not marked B_INVAL). 3181 */ 3182 BO_RUNLOCK(bo); 3183 /* 3184 * If the user does not want us to create the buffer, bail out 3185 * here. 3186 */ 3187 if (flags & GB_NOCREAT) 3188 return NULL; 3189 if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread)) 3190 return NULL; 3191 3192 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; 3193 offset = blkno * bsize; 3194 vmio = vp->v_object != NULL; 3195 if (vmio) { 3196 maxsize = size + (offset & PAGE_MASK); 3197 } else { 3198 maxsize = size; 3199 /* Do not allow non-VMIO notmapped buffers. */ 3200 flags &= ~GB_UNMAPPED; 3201 } 3202 maxsize = imax(maxsize, bsize); 3203 3204 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags); 3205 if (bp == NULL) { 3206 if (slpflag || slptimeo) 3207 return NULL; 3208 goto loop; 3209 } 3210 3211 /* 3212 * This code is used to make sure that a buffer is not 3213 * created while the getnewbuf routine is blocked. 3214 * This can be a problem whether the vnode is locked or not. 3215 * If the buffer is created out from under us, we have to 3216 * throw away the one we just created. 3217 * 3218 * Note: this must occur before we associate the buffer 3219 * with the vp especially considering limitations in 3220 * the splay tree implementation when dealing with duplicate 3221 * lblkno's. 3222 */ 3223 BO_LOCK(bo); 3224 if (gbincore(bo, blkno)) { 3225 BO_UNLOCK(bo); 3226 bp->b_flags |= B_INVAL; 3227 brelse(bp); 3228 goto loop; 3229 } 3230 3231 /* 3232 * Insert the buffer into the hash, so that it can 3233 * be found by incore. 3234 */ 3235 bp->b_blkno = bp->b_lblkno = blkno; 3236 bp->b_offset = offset; 3237 bgetvp(vp, bp); 3238 BO_UNLOCK(bo); 3239 3240 /* 3241 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 3242 * buffer size starts out as 0, B_CACHE will be set by 3243 * allocbuf() for the VMIO case prior to it testing the 3244 * backing store for validity. 3245 */ 3246 3247 if (vmio) { 3248 bp->b_flags |= B_VMIO; 3249 KASSERT(vp->v_object == bp->b_bufobj->bo_object, 3250 ("ARGH! different b_bufobj->bo_object %p %p %p\n", 3251 bp, vp->v_object, bp->b_bufobj->bo_object)); 3252 } else { 3253 bp->b_flags &= ~B_VMIO; 3254 KASSERT(bp->b_bufobj->bo_object == NULL, 3255 ("ARGH! has b_bufobj->bo_object %p %p\n", 3256 bp, bp->b_bufobj->bo_object)); 3257 BUF_CHECK_MAPPED(bp); 3258 } 3259 3260 allocbuf(bp, size); 3261 bp->b_flags &= ~B_DONE; 3262 } 3263 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); 3264 BUF_ASSERT_HELD(bp); 3265 end: 3266 KASSERT(bp->b_bufobj == bo, 3267 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3268 return (bp); 3269 } 3270 3271 /* 3272 * Get an empty, disassociated buffer of given size. The buffer is initially 3273 * set to B_INVAL. 3274 */ 3275 struct buf * 3276 geteblk(int size, int flags) 3277 { 3278 struct buf *bp; 3279 int maxsize; 3280 3281 maxsize = (size + BKVAMASK) & ~BKVAMASK; 3282 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) { 3283 if ((flags & GB_NOWAIT_BD) && 3284 (curthread->td_pflags & TDP_BUFNEED) != 0) 3285 return (NULL); 3286 } 3287 allocbuf(bp, size); 3288 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 3289 BUF_ASSERT_HELD(bp); 3290 return (bp); 3291 } 3292 3293 3294 /* 3295 * This code constitutes the buffer memory from either anonymous system 3296 * memory (in the case of non-VMIO operations) or from an associated 3297 * VM object (in the case of VMIO operations). This code is able to 3298 * resize a buffer up or down. 3299 * 3300 * Note that this code is tricky, and has many complications to resolve 3301 * deadlock or inconsistant data situations. Tread lightly!!! 3302 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3303 * the caller. Calling this code willy nilly can result in the loss of data. 3304 * 3305 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3306 * B_CACHE for the non-VMIO case. 3307 */ 3308 3309 int 3310 allocbuf(struct buf *bp, int size) 3311 { 3312 int newbsize, mbsize; 3313 int i; 3314 3315 BUF_ASSERT_HELD(bp); 3316 3317 if (bp->b_kvasize < size) 3318 panic("allocbuf: buffer too small"); 3319 3320 if ((bp->b_flags & B_VMIO) == 0) { 3321 caddr_t origbuf; 3322 int origbufsize; 3323 /* 3324 * Just get anonymous memory from the kernel. Don't 3325 * mess with B_CACHE. 3326 */ 3327 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3328 if (bp->b_flags & B_MALLOC) 3329 newbsize = mbsize; 3330 else 3331 newbsize = round_page(size); 3332 3333 if (newbsize < bp->b_bufsize) { 3334 /* 3335 * malloced buffers are not shrunk 3336 */ 3337 if (bp->b_flags & B_MALLOC) { 3338 if (newbsize) { 3339 bp->b_bcount = size; 3340 } else { 3341 free(bp->b_data, M_BIOBUF); 3342 if (bp->b_bufsize) { 3343 atomic_subtract_long( 3344 &bufmallocspace, 3345 bp->b_bufsize); 3346 bufspacewakeup(); 3347 bp->b_bufsize = 0; 3348 } 3349 bp->b_saveaddr = bp->b_kvabase; 3350 bp->b_data = bp->b_saveaddr; 3351 bp->b_bcount = 0; 3352 bp->b_flags &= ~B_MALLOC; 3353 } 3354 return 1; 3355 } 3356 vm_hold_free_pages(bp, newbsize); 3357 } else if (newbsize > bp->b_bufsize) { 3358 /* 3359 * We only use malloced memory on the first allocation. 3360 * and revert to page-allocated memory when the buffer 3361 * grows. 3362 */ 3363 /* 3364 * There is a potential smp race here that could lead 3365 * to bufmallocspace slightly passing the max. It 3366 * is probably extremely rare and not worth worrying 3367 * over. 3368 */ 3369 if ( (bufmallocspace < maxbufmallocspace) && 3370 (bp->b_bufsize == 0) && 3371 (mbsize <= PAGE_SIZE/2)) { 3372 3373 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 3374 bp->b_bufsize = mbsize; 3375 bp->b_bcount = size; 3376 bp->b_flags |= B_MALLOC; 3377 atomic_add_long(&bufmallocspace, mbsize); 3378 return 1; 3379 } 3380 origbuf = NULL; 3381 origbufsize = 0; 3382 /* 3383 * If the buffer is growing on its other-than-first allocation, 3384 * then we revert to the page-allocation scheme. 3385 */ 3386 if (bp->b_flags & B_MALLOC) { 3387 origbuf = bp->b_data; 3388 origbufsize = bp->b_bufsize; 3389 bp->b_data = bp->b_kvabase; 3390 if (bp->b_bufsize) { 3391 atomic_subtract_long(&bufmallocspace, 3392 bp->b_bufsize); 3393 bufspacewakeup(); 3394 bp->b_bufsize = 0; 3395 } 3396 bp->b_flags &= ~B_MALLOC; 3397 newbsize = round_page(newbsize); 3398 } 3399 vm_hold_load_pages( 3400 bp, 3401 (vm_offset_t) bp->b_data + bp->b_bufsize, 3402 (vm_offset_t) bp->b_data + newbsize); 3403 if (origbuf) { 3404 bcopy(origbuf, bp->b_data, origbufsize); 3405 free(origbuf, M_BIOBUF); 3406 } 3407 } 3408 } else { 3409 int desiredpages; 3410 3411 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3412 desiredpages = (size == 0) ? 0 : 3413 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 3414 3415 if (bp->b_flags & B_MALLOC) 3416 panic("allocbuf: VMIO buffer can't be malloced"); 3417 /* 3418 * Set B_CACHE initially if buffer is 0 length or will become 3419 * 0-length. 3420 */ 3421 if (size == 0 || bp->b_bufsize == 0) 3422 bp->b_flags |= B_CACHE; 3423 3424 if (newbsize < bp->b_bufsize) { 3425 /* 3426 * DEV_BSIZE aligned new buffer size is less then the 3427 * DEV_BSIZE aligned existing buffer size. Figure out 3428 * if we have to remove any pages. 3429 */ 3430 if (desiredpages < bp->b_npages) { 3431 vm_page_t m; 3432 3433 if ((bp->b_flags & B_UNMAPPED) == 0) { 3434 BUF_CHECK_MAPPED(bp); 3435 pmap_qremove((vm_offset_t)trunc_page( 3436 (vm_offset_t)bp->b_data) + 3437 (desiredpages << PAGE_SHIFT), 3438 (bp->b_npages - desiredpages)); 3439 } else 3440 BUF_CHECK_UNMAPPED(bp); 3441 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 3442 for (i = desiredpages; i < bp->b_npages; i++) { 3443 /* 3444 * the page is not freed here -- it 3445 * is the responsibility of 3446 * vnode_pager_setsize 3447 */ 3448 m = bp->b_pages[i]; 3449 KASSERT(m != bogus_page, 3450 ("allocbuf: bogus page found")); 3451 while (vm_page_sleep_if_busy(m, 3452 "biodep")) 3453 continue; 3454 3455 bp->b_pages[i] = NULL; 3456 vm_page_lock(m); 3457 vm_page_unwire(m, 0); 3458 vm_page_unlock(m); 3459 } 3460 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 3461 bp->b_npages = desiredpages; 3462 } 3463 } else if (size > bp->b_bcount) { 3464 /* 3465 * We are growing the buffer, possibly in a 3466 * byte-granular fashion. 3467 */ 3468 vm_object_t obj; 3469 vm_offset_t toff; 3470 vm_offset_t tinc; 3471 3472 /* 3473 * Step 1, bring in the VM pages from the object, 3474 * allocating them if necessary. We must clear 3475 * B_CACHE if these pages are not valid for the 3476 * range covered by the buffer. 3477 */ 3478 3479 obj = bp->b_bufobj->bo_object; 3480 3481 VM_OBJECT_WLOCK(obj); 3482 while (bp->b_npages < desiredpages) { 3483 vm_page_t m; 3484 3485 /* 3486 * We must allocate system pages since blocking 3487 * here could interfere with paging I/O, no 3488 * matter which process we are. 3489 * 3490 * Only exclusive busy can be tested here. 3491 * Blocking on shared busy might lead to 3492 * deadlocks once allocbuf() is called after 3493 * pages are vfs_busy_pages(). 3494 */ 3495 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + 3496 bp->b_npages, VM_ALLOC_NOBUSY | 3497 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | 3498 VM_ALLOC_IGN_SBUSY | 3499 VM_ALLOC_COUNT(desiredpages - bp->b_npages)); 3500 if (m->valid == 0) 3501 bp->b_flags &= ~B_CACHE; 3502 bp->b_pages[bp->b_npages] = m; 3503 ++bp->b_npages; 3504 } 3505 3506 /* 3507 * Step 2. We've loaded the pages into the buffer, 3508 * we have to figure out if we can still have B_CACHE 3509 * set. Note that B_CACHE is set according to the 3510 * byte-granular range ( bcount and size ), new the 3511 * aligned range ( newbsize ). 3512 * 3513 * The VM test is against m->valid, which is DEV_BSIZE 3514 * aligned. Needless to say, the validity of the data 3515 * needs to also be DEV_BSIZE aligned. Note that this 3516 * fails with NFS if the server or some other client 3517 * extends the file's EOF. If our buffer is resized, 3518 * B_CACHE may remain set! XXX 3519 */ 3520 3521 toff = bp->b_bcount; 3522 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 3523 3524 while ((bp->b_flags & B_CACHE) && toff < size) { 3525 vm_pindex_t pi; 3526 3527 if (tinc > (size - toff)) 3528 tinc = size - toff; 3529 3530 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 3531 PAGE_SHIFT; 3532 3533 vfs_buf_test_cache( 3534 bp, 3535 bp->b_offset, 3536 toff, 3537 tinc, 3538 bp->b_pages[pi] 3539 ); 3540 toff += tinc; 3541 tinc = PAGE_SIZE; 3542 } 3543 VM_OBJECT_WUNLOCK(obj); 3544 3545 /* 3546 * Step 3, fixup the KVM pmap. 3547 */ 3548 if ((bp->b_flags & B_UNMAPPED) == 0) 3549 bpmap_qenter(bp); 3550 else 3551 BUF_CHECK_UNMAPPED(bp); 3552 } 3553 } 3554 if (newbsize < bp->b_bufsize) 3555 bufspacewakeup(); 3556 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3557 bp->b_bcount = size; /* requested buffer size */ 3558 return 1; 3559 } 3560 3561 extern int inflight_transient_maps; 3562 3563 void 3564 biodone(struct bio *bp) 3565 { 3566 struct mtx *mtxp; 3567 void (*done)(struct bio *); 3568 vm_offset_t start, end; 3569 int transient; 3570 3571 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3572 mtx_lock(mtxp); 3573 bp->bio_flags |= BIO_DONE; 3574 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { 3575 start = trunc_page((vm_offset_t)bp->bio_data); 3576 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length); 3577 transient = 1; 3578 } else { 3579 transient = 0; 3580 start = end = 0; 3581 } 3582 done = bp->bio_done; 3583 if (done == NULL) 3584 wakeup(bp); 3585 mtx_unlock(mtxp); 3586 if (done != NULL) 3587 done(bp); 3588 if (transient) { 3589 pmap_qremove(start, OFF_TO_IDX(end - start)); 3590 vmem_free(transient_arena, start, end - start); 3591 atomic_add_int(&inflight_transient_maps, -1); 3592 } 3593 } 3594 3595 /* 3596 * Wait for a BIO to finish. 3597 * 3598 * XXX: resort to a timeout for now. The optimal locking (if any) for this 3599 * case is not yet clear. 3600 */ 3601 int 3602 biowait(struct bio *bp, const char *wchan) 3603 { 3604 struct mtx *mtxp; 3605 3606 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3607 mtx_lock(mtxp); 3608 while ((bp->bio_flags & BIO_DONE) == 0) 3609 msleep(bp, mtxp, PRIBIO, wchan, hz / 10); 3610 mtx_unlock(mtxp); 3611 if (bp->bio_error != 0) 3612 return (bp->bio_error); 3613 if (!(bp->bio_flags & BIO_ERROR)) 3614 return (0); 3615 return (EIO); 3616 } 3617 3618 void 3619 biofinish(struct bio *bp, struct devstat *stat, int error) 3620 { 3621 3622 if (error) { 3623 bp->bio_error = error; 3624 bp->bio_flags |= BIO_ERROR; 3625 } 3626 if (stat != NULL) 3627 devstat_end_transaction_bio(stat, bp); 3628 biodone(bp); 3629 } 3630 3631 /* 3632 * bufwait: 3633 * 3634 * Wait for buffer I/O completion, returning error status. The buffer 3635 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR 3636 * error and cleared. 3637 */ 3638 int 3639 bufwait(struct buf *bp) 3640 { 3641 if (bp->b_iocmd == BIO_READ) 3642 bwait(bp, PRIBIO, "biord"); 3643 else 3644 bwait(bp, PRIBIO, "biowr"); 3645 if (bp->b_flags & B_EINTR) { 3646 bp->b_flags &= ~B_EINTR; 3647 return (EINTR); 3648 } 3649 if (bp->b_ioflags & BIO_ERROR) { 3650 return (bp->b_error ? bp->b_error : EIO); 3651 } else { 3652 return (0); 3653 } 3654 } 3655 3656 /* 3657 * Call back function from struct bio back up to struct buf. 3658 */ 3659 static void 3660 bufdonebio(struct bio *bip) 3661 { 3662 struct buf *bp; 3663 3664 bp = bip->bio_caller2; 3665 bp->b_resid = bp->b_bcount - bip->bio_completed; 3666 bp->b_resid = bip->bio_resid; /* XXX: remove */ 3667 bp->b_ioflags = bip->bio_flags; 3668 bp->b_error = bip->bio_error; 3669 if (bp->b_error) 3670 bp->b_ioflags |= BIO_ERROR; 3671 bufdone(bp); 3672 g_destroy_bio(bip); 3673 } 3674 3675 void 3676 dev_strategy(struct cdev *dev, struct buf *bp) 3677 { 3678 struct cdevsw *csw; 3679 int ref; 3680 3681 KASSERT(dev->si_refcount > 0, 3682 ("dev_strategy on un-referenced struct cdev *(%s) %p", 3683 devtoname(dev), dev)); 3684 3685 csw = dev_refthread(dev, &ref); 3686 dev_strategy_csw(dev, csw, bp); 3687 dev_relthread(dev, ref); 3688 } 3689 3690 void 3691 dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp) 3692 { 3693 struct bio *bip; 3694 3695 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE, 3696 ("b_iocmd botch")); 3697 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) || 3698 dev->si_threadcount > 0, 3699 ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev), 3700 dev)); 3701 if (csw == NULL) { 3702 bp->b_error = ENXIO; 3703 bp->b_ioflags = BIO_ERROR; 3704 bufdone(bp); 3705 return; 3706 } 3707 for (;;) { 3708 bip = g_new_bio(); 3709 if (bip != NULL) 3710 break; 3711 /* Try again later */ 3712 tsleep(&bp, PRIBIO, "dev_strat", hz/10); 3713 } 3714 bip->bio_cmd = bp->b_iocmd; 3715 bip->bio_offset = bp->b_iooffset; 3716 bip->bio_length = bp->b_bcount; 3717 bip->bio_bcount = bp->b_bcount; /* XXX: remove */ 3718 bdata2bio(bp, bip); 3719 bip->bio_done = bufdonebio; 3720 bip->bio_caller2 = bp; 3721 bip->bio_dev = dev; 3722 (*csw->d_strategy)(bip); 3723 } 3724 3725 /* 3726 * bufdone: 3727 * 3728 * Finish I/O on a buffer, optionally calling a completion function. 3729 * This is usually called from an interrupt so process blocking is 3730 * not allowed. 3731 * 3732 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3733 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3734 * assuming B_INVAL is clear. 3735 * 3736 * For the VMIO case, we set B_CACHE if the op was a read and no 3737 * read error occured, or if the op was a write. B_CACHE is never 3738 * set if the buffer is invalid or otherwise uncacheable. 3739 * 3740 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3741 * initiator to leave B_INVAL set to brelse the buffer out of existance 3742 * in the biodone routine. 3743 */ 3744 void 3745 bufdone(struct buf *bp) 3746 { 3747 struct bufobj *dropobj; 3748 void (*biodone)(struct buf *); 3749 3750 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 3751 dropobj = NULL; 3752 3753 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 3754 BUF_ASSERT_HELD(bp); 3755 3756 runningbufwakeup(bp); 3757 if (bp->b_iocmd == BIO_WRITE) 3758 dropobj = bp->b_bufobj; 3759 /* call optional completion function if requested */ 3760 if (bp->b_iodone != NULL) { 3761 biodone = bp->b_iodone; 3762 bp->b_iodone = NULL; 3763 (*biodone) (bp); 3764 if (dropobj) 3765 bufobj_wdrop(dropobj); 3766 return; 3767 } 3768 3769 bufdone_finish(bp); 3770 3771 if (dropobj) 3772 bufobj_wdrop(dropobj); 3773 } 3774 3775 void 3776 bufdone_finish(struct buf *bp) 3777 { 3778 BUF_ASSERT_HELD(bp); 3779 3780 if (!LIST_EMPTY(&bp->b_dep)) 3781 buf_complete(bp); 3782 3783 if (bp->b_flags & B_VMIO) { 3784 vm_ooffset_t foff; 3785 vm_page_t m; 3786 vm_object_t obj; 3787 struct vnode *vp; 3788 int bogus, i, iosize; 3789 3790 obj = bp->b_bufobj->bo_object; 3791 KASSERT(obj->paging_in_progress >= bp->b_npages, 3792 ("biodone_finish: paging in progress(%d) < b_npages(%d)", 3793 obj->paging_in_progress, bp->b_npages)); 3794 3795 vp = bp->b_vp; 3796 KASSERT(vp->v_holdcnt > 0, 3797 ("biodone_finish: vnode %p has zero hold count", vp)); 3798 KASSERT(vp->v_object != NULL, 3799 ("biodone_finish: vnode %p has no vm_object", vp)); 3800 3801 foff = bp->b_offset; 3802 KASSERT(bp->b_offset != NOOFFSET, 3803 ("biodone_finish: bp %p has no buffer offset", bp)); 3804 3805 /* 3806 * Set B_CACHE if the op was a normal read and no error 3807 * occured. B_CACHE is set for writes in the b*write() 3808 * routines. 3809 */ 3810 iosize = bp->b_bcount - bp->b_resid; 3811 if (bp->b_iocmd == BIO_READ && 3812 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 3813 !(bp->b_ioflags & BIO_ERROR)) { 3814 bp->b_flags |= B_CACHE; 3815 } 3816 bogus = 0; 3817 VM_OBJECT_WLOCK(obj); 3818 for (i = 0; i < bp->b_npages; i++) { 3819 int bogusflag = 0; 3820 int resid; 3821 3822 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3823 if (resid > iosize) 3824 resid = iosize; 3825 3826 /* 3827 * cleanup bogus pages, restoring the originals 3828 */ 3829 m = bp->b_pages[i]; 3830 if (m == bogus_page) { 3831 bogus = bogusflag = 1; 3832 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3833 if (m == NULL) 3834 panic("biodone: page disappeared!"); 3835 bp->b_pages[i] = m; 3836 } 3837 KASSERT(OFF_TO_IDX(foff) == m->pindex, 3838 ("biodone_finish: foff(%jd)/pindex(%ju) mismatch", 3839 (intmax_t)foff, (uintmax_t)m->pindex)); 3840 3841 /* 3842 * In the write case, the valid and clean bits are 3843 * already changed correctly ( see bdwrite() ), so we 3844 * only need to do this here in the read case. 3845 */ 3846 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 3847 KASSERT((m->dirty & vm_page_bits(foff & 3848 PAGE_MASK, resid)) == 0, ("bufdone_finish:" 3849 " page %p has unexpected dirty bits", m)); 3850 vfs_page_set_valid(bp, foff, m); 3851 } 3852 3853 vm_page_sunbusy(m); 3854 vm_object_pip_subtract(obj, 1); 3855 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3856 iosize -= resid; 3857 } 3858 vm_object_pip_wakeupn(obj, 0); 3859 VM_OBJECT_WUNLOCK(obj); 3860 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 3861 BUF_CHECK_MAPPED(bp); 3862 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3863 bp->b_pages, bp->b_npages); 3864 } 3865 } 3866 3867 /* 3868 * For asynchronous completions, release the buffer now. The brelse 3869 * will do a wakeup there if necessary - so no need to do a wakeup 3870 * here in the async case. The sync case always needs to do a wakeup. 3871 */ 3872 3873 if (bp->b_flags & B_ASYNC) { 3874 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 3875 brelse(bp); 3876 else 3877 bqrelse(bp); 3878 } else 3879 bdone(bp); 3880 } 3881 3882 /* 3883 * This routine is called in lieu of iodone in the case of 3884 * incomplete I/O. This keeps the busy status for pages 3885 * consistant. 3886 */ 3887 void 3888 vfs_unbusy_pages(struct buf *bp) 3889 { 3890 int i; 3891 vm_object_t obj; 3892 vm_page_t m; 3893 3894 runningbufwakeup(bp); 3895 if (!(bp->b_flags & B_VMIO)) 3896 return; 3897 3898 obj = bp->b_bufobj->bo_object; 3899 VM_OBJECT_WLOCK(obj); 3900 for (i = 0; i < bp->b_npages; i++) { 3901 m = bp->b_pages[i]; 3902 if (m == bogus_page) { 3903 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 3904 if (!m) 3905 panic("vfs_unbusy_pages: page missing\n"); 3906 bp->b_pages[i] = m; 3907 if ((bp->b_flags & B_UNMAPPED) == 0) { 3908 BUF_CHECK_MAPPED(bp); 3909 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3910 bp->b_pages, bp->b_npages); 3911 } else 3912 BUF_CHECK_UNMAPPED(bp); 3913 } 3914 vm_object_pip_subtract(obj, 1); 3915 vm_page_sunbusy(m); 3916 } 3917 vm_object_pip_wakeupn(obj, 0); 3918 VM_OBJECT_WUNLOCK(obj); 3919 } 3920 3921 /* 3922 * vfs_page_set_valid: 3923 * 3924 * Set the valid bits in a page based on the supplied offset. The 3925 * range is restricted to the buffer's size. 3926 * 3927 * This routine is typically called after a read completes. 3928 */ 3929 static void 3930 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3931 { 3932 vm_ooffset_t eoff; 3933 3934 /* 3935 * Compute the end offset, eoff, such that [off, eoff) does not span a 3936 * page boundary and eoff is not greater than the end of the buffer. 3937 * The end of the buffer, in this case, is our file EOF, not the 3938 * allocation size of the buffer. 3939 */ 3940 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK; 3941 if (eoff > bp->b_offset + bp->b_bcount) 3942 eoff = bp->b_offset + bp->b_bcount; 3943 3944 /* 3945 * Set valid range. This is typically the entire buffer and thus the 3946 * entire page. 3947 */ 3948 if (eoff > off) 3949 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off); 3950 } 3951 3952 /* 3953 * vfs_page_set_validclean: 3954 * 3955 * Set the valid bits and clear the dirty bits in a page based on the 3956 * supplied offset. The range is restricted to the buffer's size. 3957 */ 3958 static void 3959 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3960 { 3961 vm_ooffset_t soff, eoff; 3962 3963 /* 3964 * Start and end offsets in buffer. eoff - soff may not cross a 3965 * page boundry or cross the end of the buffer. The end of the 3966 * buffer, in this case, is our file EOF, not the allocation size 3967 * of the buffer. 3968 */ 3969 soff = off; 3970 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3971 if (eoff > bp->b_offset + bp->b_bcount) 3972 eoff = bp->b_offset + bp->b_bcount; 3973 3974 /* 3975 * Set valid range. This is typically the entire buffer and thus the 3976 * entire page. 3977 */ 3978 if (eoff > soff) { 3979 vm_page_set_validclean( 3980 m, 3981 (vm_offset_t) (soff & PAGE_MASK), 3982 (vm_offset_t) (eoff - soff) 3983 ); 3984 } 3985 } 3986 3987 /* 3988 * Ensure that all buffer pages are not exclusive busied. If any page is 3989 * exclusive busy, drain it. 3990 */ 3991 void 3992 vfs_drain_busy_pages(struct buf *bp) 3993 { 3994 vm_page_t m; 3995 int i, last_busied; 3996 3997 VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object); 3998 last_busied = 0; 3999 for (i = 0; i < bp->b_npages; i++) { 4000 m = bp->b_pages[i]; 4001 if (vm_page_xbusied(m)) { 4002 for (; last_busied < i; last_busied++) 4003 vm_page_sbusy(bp->b_pages[last_busied]); 4004 while (vm_page_xbusied(m)) { 4005 vm_page_lock(m); 4006 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4007 vm_page_busy_sleep(m, "vbpage"); 4008 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4009 } 4010 } 4011 } 4012 for (i = 0; i < last_busied; i++) 4013 vm_page_sunbusy(bp->b_pages[i]); 4014 } 4015 4016 /* 4017 * This routine is called before a device strategy routine. 4018 * It is used to tell the VM system that paging I/O is in 4019 * progress, and treat the pages associated with the buffer 4020 * almost as being exclusive busy. Also the object paging_in_progress 4021 * flag is handled to make sure that the object doesn't become 4022 * inconsistant. 4023 * 4024 * Since I/O has not been initiated yet, certain buffer flags 4025 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 4026 * and should be ignored. 4027 */ 4028 void 4029 vfs_busy_pages(struct buf *bp, int clear_modify) 4030 { 4031 int i, bogus; 4032 vm_object_t obj; 4033 vm_ooffset_t foff; 4034 vm_page_t m; 4035 4036 if (!(bp->b_flags & B_VMIO)) 4037 return; 4038 4039 obj = bp->b_bufobj->bo_object; 4040 foff = bp->b_offset; 4041 KASSERT(bp->b_offset != NOOFFSET, 4042 ("vfs_busy_pages: no buffer offset")); 4043 VM_OBJECT_WLOCK(obj); 4044 vfs_drain_busy_pages(bp); 4045 if (bp->b_bufsize != 0) 4046 vfs_setdirty_locked_object(bp); 4047 bogus = 0; 4048 for (i = 0; i < bp->b_npages; i++) { 4049 m = bp->b_pages[i]; 4050 4051 if ((bp->b_flags & B_CLUSTER) == 0) { 4052 vm_object_pip_add(obj, 1); 4053 vm_page_sbusy(m); 4054 } 4055 /* 4056 * When readying a buffer for a read ( i.e 4057 * clear_modify == 0 ), it is important to do 4058 * bogus_page replacement for valid pages in 4059 * partially instantiated buffers. Partially 4060 * instantiated buffers can, in turn, occur when 4061 * reconstituting a buffer from its VM backing store 4062 * base. We only have to do this if B_CACHE is 4063 * clear ( which causes the I/O to occur in the 4064 * first place ). The replacement prevents the read 4065 * I/O from overwriting potentially dirty VM-backed 4066 * pages. XXX bogus page replacement is, uh, bogus. 4067 * It may not work properly with small-block devices. 4068 * We need to find a better way. 4069 */ 4070 if (clear_modify) { 4071 pmap_remove_write(m); 4072 vfs_page_set_validclean(bp, foff, m); 4073 } else if (m->valid == VM_PAGE_BITS_ALL && 4074 (bp->b_flags & B_CACHE) == 0) { 4075 bp->b_pages[i] = bogus_page; 4076 bogus++; 4077 } 4078 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 4079 } 4080 VM_OBJECT_WUNLOCK(obj); 4081 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 4082 BUF_CHECK_MAPPED(bp); 4083 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4084 bp->b_pages, bp->b_npages); 4085 } 4086 } 4087 4088 /* 4089 * vfs_bio_set_valid: 4090 * 4091 * Set the range within the buffer to valid. The range is 4092 * relative to the beginning of the buffer, b_offset. Note that 4093 * b_offset itself may be offset from the beginning of the first 4094 * page. 4095 */ 4096 void 4097 vfs_bio_set_valid(struct buf *bp, int base, int size) 4098 { 4099 int i, n; 4100 vm_page_t m; 4101 4102 if (!(bp->b_flags & B_VMIO)) 4103 return; 4104 4105 /* 4106 * Fixup base to be relative to beginning of first page. 4107 * Set initial n to be the maximum number of bytes in the 4108 * first page that can be validated. 4109 */ 4110 base += (bp->b_offset & PAGE_MASK); 4111 n = PAGE_SIZE - (base & PAGE_MASK); 4112 4113 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4114 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4115 m = bp->b_pages[i]; 4116 if (n > size) 4117 n = size; 4118 vm_page_set_valid_range(m, base & PAGE_MASK, n); 4119 base += n; 4120 size -= n; 4121 n = PAGE_SIZE; 4122 } 4123 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4124 } 4125 4126 /* 4127 * vfs_bio_clrbuf: 4128 * 4129 * If the specified buffer is a non-VMIO buffer, clear the entire 4130 * buffer. If the specified buffer is a VMIO buffer, clear and 4131 * validate only the previously invalid portions of the buffer. 4132 * This routine essentially fakes an I/O, so we need to clear 4133 * BIO_ERROR and B_INVAL. 4134 * 4135 * Note that while we only theoretically need to clear through b_bcount, 4136 * we go ahead and clear through b_bufsize. 4137 */ 4138 void 4139 vfs_bio_clrbuf(struct buf *bp) 4140 { 4141 int i, j, mask, sa, ea, slide; 4142 4143 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { 4144 clrbuf(bp); 4145 return; 4146 } 4147 bp->b_flags &= ~B_INVAL; 4148 bp->b_ioflags &= ~BIO_ERROR; 4149 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4150 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4151 (bp->b_offset & PAGE_MASK) == 0) { 4152 if (bp->b_pages[0] == bogus_page) 4153 goto unlock; 4154 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4155 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object); 4156 if ((bp->b_pages[0]->valid & mask) == mask) 4157 goto unlock; 4158 if ((bp->b_pages[0]->valid & mask) == 0) { 4159 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize); 4160 bp->b_pages[0]->valid |= mask; 4161 goto unlock; 4162 } 4163 } 4164 sa = bp->b_offset & PAGE_MASK; 4165 slide = 0; 4166 for (i = 0; i < bp->b_npages; i++, sa = 0) { 4167 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize); 4168 ea = slide & PAGE_MASK; 4169 if (ea == 0) 4170 ea = PAGE_SIZE; 4171 if (bp->b_pages[i] == bogus_page) 4172 continue; 4173 j = sa / DEV_BSIZE; 4174 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4175 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object); 4176 if ((bp->b_pages[i]->valid & mask) == mask) 4177 continue; 4178 if ((bp->b_pages[i]->valid & mask) == 0) 4179 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa); 4180 else { 4181 for (; sa < ea; sa += DEV_BSIZE, j++) { 4182 if ((bp->b_pages[i]->valid & (1 << j)) == 0) { 4183 pmap_zero_page_area(bp->b_pages[i], 4184 sa, DEV_BSIZE); 4185 } 4186 } 4187 } 4188 bp->b_pages[i]->valid |= mask; 4189 } 4190 unlock: 4191 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4192 bp->b_resid = 0; 4193 } 4194 4195 void 4196 vfs_bio_bzero_buf(struct buf *bp, int base, int size) 4197 { 4198 vm_page_t m; 4199 int i, n; 4200 4201 if ((bp->b_flags & B_UNMAPPED) == 0) { 4202 BUF_CHECK_MAPPED(bp); 4203 bzero(bp->b_data + base, size); 4204 } else { 4205 BUF_CHECK_UNMAPPED(bp); 4206 n = PAGE_SIZE - (base & PAGE_MASK); 4207 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4208 m = bp->b_pages[i]; 4209 if (n > size) 4210 n = size; 4211 pmap_zero_page_area(m, base & PAGE_MASK, n); 4212 base += n; 4213 size -= n; 4214 n = PAGE_SIZE; 4215 } 4216 } 4217 } 4218 4219 /* 4220 * vm_hold_load_pages and vm_hold_free_pages get pages into 4221 * a buffers address space. The pages are anonymous and are 4222 * not associated with a file object. 4223 */ 4224 static void 4225 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4226 { 4227 vm_offset_t pg; 4228 vm_page_t p; 4229 int index; 4230 4231 BUF_CHECK_MAPPED(bp); 4232 4233 to = round_page(to); 4234 from = round_page(from); 4235 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4236 4237 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 4238 tryagain: 4239 /* 4240 * note: must allocate system pages since blocking here 4241 * could interfere with paging I/O, no matter which 4242 * process we are. 4243 */ 4244 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | 4245 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT)); 4246 if (p == NULL) { 4247 VM_WAIT; 4248 goto tryagain; 4249 } 4250 pmap_qenter(pg, &p, 1); 4251 bp->b_pages[index] = p; 4252 } 4253 bp->b_npages = index; 4254 } 4255 4256 /* Return pages associated with this buf to the vm system */ 4257 static void 4258 vm_hold_free_pages(struct buf *bp, int newbsize) 4259 { 4260 vm_offset_t from; 4261 vm_page_t p; 4262 int index, newnpages; 4263 4264 BUF_CHECK_MAPPED(bp); 4265 4266 from = round_page((vm_offset_t)bp->b_data + newbsize); 4267 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4268 if (bp->b_npages > newnpages) 4269 pmap_qremove(from, bp->b_npages - newnpages); 4270 for (index = newnpages; index < bp->b_npages; index++) { 4271 p = bp->b_pages[index]; 4272 bp->b_pages[index] = NULL; 4273 if (vm_page_sbusied(p)) 4274 printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n", 4275 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno); 4276 p->wire_count--; 4277 vm_page_free(p); 4278 atomic_subtract_int(&cnt.v_wire_count, 1); 4279 } 4280 bp->b_npages = newnpages; 4281 } 4282 4283 /* 4284 * Map an IO request into kernel virtual address space. 4285 * 4286 * All requests are (re)mapped into kernel VA space. 4287 * Notice that we use b_bufsize for the size of the buffer 4288 * to be mapped. b_bcount might be modified by the driver. 4289 * 4290 * Note that even if the caller determines that the address space should 4291 * be valid, a race or a smaller-file mapped into a larger space may 4292 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST 4293 * check the return value. 4294 */ 4295 int 4296 vmapbuf(struct buf *bp, int mapbuf) 4297 { 4298 caddr_t kva; 4299 vm_prot_t prot; 4300 int pidx; 4301 4302 if (bp->b_bufsize < 0) 4303 return (-1); 4304 prot = VM_PROT_READ; 4305 if (bp->b_iocmd == BIO_READ) 4306 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 4307 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 4308 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages, 4309 btoc(MAXPHYS))) < 0) 4310 return (-1); 4311 bp->b_npages = pidx; 4312 if (mapbuf || !unmapped_buf_allowed) { 4313 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx); 4314 kva = bp->b_saveaddr; 4315 bp->b_saveaddr = bp->b_data; 4316 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK); 4317 bp->b_flags &= ~B_UNMAPPED; 4318 } else { 4319 bp->b_flags |= B_UNMAPPED; 4320 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK; 4321 bp->b_saveaddr = bp->b_data; 4322 bp->b_data = unmapped_buf; 4323 } 4324 return(0); 4325 } 4326 4327 /* 4328 * Free the io map PTEs associated with this IO operation. 4329 * We also invalidate the TLB entries and restore the original b_addr. 4330 */ 4331 void 4332 vunmapbuf(struct buf *bp) 4333 { 4334 int npages; 4335 4336 npages = bp->b_npages; 4337 if (bp->b_flags & B_UNMAPPED) 4338 bp->b_flags &= ~B_UNMAPPED; 4339 else 4340 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 4341 vm_page_unhold_pages(bp->b_pages, npages); 4342 4343 bp->b_data = bp->b_saveaddr; 4344 } 4345 4346 void 4347 bdone(struct buf *bp) 4348 { 4349 struct mtx *mtxp; 4350 4351 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4352 mtx_lock(mtxp); 4353 bp->b_flags |= B_DONE; 4354 wakeup(bp); 4355 mtx_unlock(mtxp); 4356 } 4357 4358 void 4359 bwait(struct buf *bp, u_char pri, const char *wchan) 4360 { 4361 struct mtx *mtxp; 4362 4363 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4364 mtx_lock(mtxp); 4365 while ((bp->b_flags & B_DONE) == 0) 4366 msleep(bp, mtxp, pri, wchan, 0); 4367 mtx_unlock(mtxp); 4368 } 4369 4370 int 4371 bufsync(struct bufobj *bo, int waitfor) 4372 { 4373 4374 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread)); 4375 } 4376 4377 void 4378 bufstrategy(struct bufobj *bo, struct buf *bp) 4379 { 4380 int i = 0; 4381 struct vnode *vp; 4382 4383 vp = bp->b_vp; 4384 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); 4385 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 4386 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); 4387 i = VOP_STRATEGY(vp, bp); 4388 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); 4389 } 4390 4391 void 4392 bufobj_wrefl(struct bufobj *bo) 4393 { 4394 4395 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4396 ASSERT_BO_WLOCKED(bo); 4397 bo->bo_numoutput++; 4398 } 4399 4400 void 4401 bufobj_wref(struct bufobj *bo) 4402 { 4403 4404 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4405 BO_LOCK(bo); 4406 bo->bo_numoutput++; 4407 BO_UNLOCK(bo); 4408 } 4409 4410 void 4411 bufobj_wdrop(struct bufobj *bo) 4412 { 4413 4414 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop")); 4415 BO_LOCK(bo); 4416 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count")); 4417 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) { 4418 bo->bo_flag &= ~BO_WWAIT; 4419 wakeup(&bo->bo_numoutput); 4420 } 4421 BO_UNLOCK(bo); 4422 } 4423 4424 int 4425 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo) 4426 { 4427 int error; 4428 4429 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait")); 4430 ASSERT_BO_WLOCKED(bo); 4431 error = 0; 4432 while (bo->bo_numoutput) { 4433 bo->bo_flag |= BO_WWAIT; 4434 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo), 4435 slpflag | (PRIBIO + 1), "bo_wwait", timeo); 4436 if (error) 4437 break; 4438 } 4439 return (error); 4440 } 4441 4442 void 4443 bpin(struct buf *bp) 4444 { 4445 struct mtx *mtxp; 4446 4447 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4448 mtx_lock(mtxp); 4449 bp->b_pin_count++; 4450 mtx_unlock(mtxp); 4451 } 4452 4453 void 4454 bunpin(struct buf *bp) 4455 { 4456 struct mtx *mtxp; 4457 4458 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4459 mtx_lock(mtxp); 4460 if (--bp->b_pin_count == 0) 4461 wakeup(bp); 4462 mtx_unlock(mtxp); 4463 } 4464 4465 void 4466 bunpin_wait(struct buf *bp) 4467 { 4468 struct mtx *mtxp; 4469 4470 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4471 mtx_lock(mtxp); 4472 while (bp->b_pin_count > 0) 4473 msleep(bp, mtxp, PRIBIO, "bwunpin", 0); 4474 mtx_unlock(mtxp); 4475 } 4476 4477 /* 4478 * Set bio_data or bio_ma for struct bio from the struct buf. 4479 */ 4480 void 4481 bdata2bio(struct buf *bp, struct bio *bip) 4482 { 4483 4484 if ((bp->b_flags & B_UNMAPPED) != 0) { 4485 KASSERT(unmapped_buf_allowed, ("unmapped")); 4486 bip->bio_ma = bp->b_pages; 4487 bip->bio_ma_n = bp->b_npages; 4488 bip->bio_data = unmapped_buf; 4489 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 4490 bip->bio_flags |= BIO_UNMAPPED; 4491 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) / 4492 PAGE_SIZE == bp->b_npages, 4493 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset, 4494 (long long)bip->bio_length, bip->bio_ma_n)); 4495 } else { 4496 bip->bio_data = bp->b_data; 4497 bip->bio_ma = NULL; 4498 } 4499 } 4500 4501 #include "opt_ddb.h" 4502 #ifdef DDB 4503 #include <ddb/ddb.h> 4504 4505 /* DDB command to show buffer data */ 4506 DB_SHOW_COMMAND(buffer, db_show_buffer) 4507 { 4508 /* get args */ 4509 struct buf *bp = (struct buf *)addr; 4510 4511 if (!have_addr) { 4512 db_printf("usage: show buffer <addr>\n"); 4513 return; 4514 } 4515 4516 db_printf("buf at %p\n", bp); 4517 db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n", 4518 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags, 4519 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS); 4520 db_printf( 4521 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n" 4522 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, " 4523 "b_dep = %p\n", 4524 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4525 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, 4526 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first); 4527 if (bp->b_npages) { 4528 int i; 4529 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 4530 for (i = 0; i < bp->b_npages; i++) { 4531 vm_page_t m; 4532 m = bp->b_pages[i]; 4533 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4534 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4535 if ((i + 1) < bp->b_npages) 4536 db_printf(","); 4537 } 4538 db_printf("\n"); 4539 } 4540 db_printf(" "); 4541 BUF_LOCKPRINTINFO(bp); 4542 } 4543 4544 DB_SHOW_COMMAND(lockedbufs, lockedbufs) 4545 { 4546 struct buf *bp; 4547 int i; 4548 4549 for (i = 0; i < nbuf; i++) { 4550 bp = &buf[i]; 4551 if (BUF_ISLOCKED(bp)) { 4552 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4553 db_printf("\n"); 4554 } 4555 } 4556 } 4557 4558 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) 4559 { 4560 struct vnode *vp; 4561 struct buf *bp; 4562 4563 if (!have_addr) { 4564 db_printf("usage: show vnodebufs <addr>\n"); 4565 return; 4566 } 4567 vp = (struct vnode *)addr; 4568 db_printf("Clean buffers:\n"); 4569 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { 4570 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4571 db_printf("\n"); 4572 } 4573 db_printf("Dirty buffers:\n"); 4574 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 4575 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4576 db_printf("\n"); 4577 } 4578 } 4579 4580 DB_COMMAND(countfreebufs, db_coundfreebufs) 4581 { 4582 struct buf *bp; 4583 int i, used = 0, nfree = 0; 4584 4585 if (have_addr) { 4586 db_printf("usage: countfreebufs\n"); 4587 return; 4588 } 4589 4590 for (i = 0; i < nbuf; i++) { 4591 bp = &buf[i]; 4592 if ((bp->b_flags & B_INFREECNT) != 0) 4593 nfree++; 4594 else 4595 used++; 4596 } 4597 4598 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used, 4599 nfree + used); 4600 db_printf("numfreebuffers is %d\n", numfreebuffers); 4601 } 4602 #endif /* DDB */ 4603