1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * Copyright (c) 1994,1997 John S. Dyson 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Konstantin Belousov 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * this file contains a new buffer I/O scheme implementing a coherent 34 * VM object and buffer cache scheme. Pains have been taken to make 35 * sure that the performance degradation associated with schemes such 36 * as this is not realized. 37 * 38 * Author: John S. Dyson 39 * Significant help during the development and debugging phases 40 * had been provided by David Greenman, also of the FreeBSD core team. 41 * 42 * see man buf(9) for more info. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #include <sys/devicestat.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fail.h> 56 #include <sys/limits.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mount.h> 60 #include <sys/mutex.h> 61 #include <sys/kernel.h> 62 #include <sys/kthread.h> 63 #include <sys/proc.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sysctl.h> 67 #include <sys/vmmeter.h> 68 #include <sys/vnode.h> 69 #include <geom/geom.h> 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <vm/vm_kern.h> 73 #include <vm/vm_pageout.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_extern.h> 77 #include <vm/vm_map.h> 78 #include "opt_compat.h" 79 #include "opt_directio.h" 80 #include "opt_swap.h" 81 82 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer"); 83 84 struct bio_ops bioops; /* I/O operation notification */ 85 86 struct buf_ops buf_ops_bio = { 87 .bop_name = "buf_ops_bio", 88 .bop_write = bufwrite, 89 .bop_strategy = bufstrategy, 90 .bop_sync = bufsync, 91 .bop_bdflush = bufbdflush, 92 }; 93 94 /* 95 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has 96 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c. 97 */ 98 struct buf *buf; /* buffer header pool */ 99 caddr_t unmapped_buf; 100 101 static struct proc *bufdaemonproc; 102 103 static int inmem(struct vnode *vp, daddr_t blkno); 104 static void vm_hold_free_pages(struct buf *bp, int newbsize); 105 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, 106 vm_offset_t to); 107 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); 108 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, 109 vm_page_t m); 110 static void vfs_drain_busy_pages(struct buf *bp); 111 static void vfs_clean_pages_dirty_buf(struct buf *bp); 112 static void vfs_setdirty_locked_object(struct buf *bp); 113 static void vfs_vmio_release(struct buf *bp); 114 static int vfs_bio_clcheck(struct vnode *vp, int size, 115 daddr_t lblkno, daddr_t blkno); 116 static int buf_flush(struct vnode *vp, int); 117 static int flushbufqueues(struct vnode *, int, int); 118 static void buf_daemon(void); 119 static void bremfreel(struct buf *bp); 120 static __inline void bd_wakeup(void); 121 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 122 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 123 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS); 124 #endif 125 126 int vmiodirenable = TRUE; 127 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 128 "Use the VM system for directory writes"); 129 long runningbufspace; 130 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 131 "Amount of presently outstanding async buffer io"); 132 static long bufspace; 133 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 134 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 135 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, 136 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers"); 137 #else 138 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 139 "Virtual memory used for buffers"); 140 #endif 141 static long unmapped_bufspace; 142 SYSCTL_LONG(_vfs, OID_AUTO, unmapped_bufspace, CTLFLAG_RD, 143 &unmapped_bufspace, 0, 144 "Amount of unmapped buffers, inclusive in the bufspace"); 145 static long maxbufspace; 146 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 147 "Maximum allowed value of bufspace (including buf_daemon)"); 148 static long bufmallocspace; 149 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 150 "Amount of malloced memory for buffers"); 151 static long maxbufmallocspace; 152 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0, 153 "Maximum amount of malloced memory for buffers"); 154 static long lobufspace; 155 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 156 "Minimum amount of buffers we want to have"); 157 long hibufspace; 158 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 159 "Maximum allowed value of bufspace (excluding buf_daemon)"); 160 static int bufreusecnt; 161 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0, 162 "Number of times we have reused a buffer"); 163 static int buffreekvacnt; 164 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0, 165 "Number of times we have freed the KVA space from some buffer"); 166 static int bufdefragcnt; 167 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0, 168 "Number of times we have had to repeat buffer allocation to defragment"); 169 static long lorunningspace; 170 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 171 "Minimum preferred space used for in-progress I/O"); 172 static long hirunningspace; 173 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 174 "Maximum amount of space to use for in-progress I/O"); 175 int dirtybufferflushes; 176 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes, 177 0, "Number of bdwrite to bawrite conversions to limit dirty buffers"); 178 int bdwriteskip; 179 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip, 180 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk"); 181 int altbufferflushes; 182 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes, 183 0, "Number of fsync flushes to limit dirty buffers"); 184 static int recursiveflushes; 185 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes, 186 0, "Number of flushes skipped due to being recursive"); 187 static int numdirtybuffers; 188 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 189 "Number of buffers that are dirty (has unwritten changes) at the moment"); 190 static int lodirtybuffers; 191 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 192 "How many buffers we want to have free before bufdaemon can sleep"); 193 static int hidirtybuffers; 194 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 195 "When the number of dirty buffers is considered severe"); 196 int dirtybufthresh; 197 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh, 198 0, "Number of bdwrite to bawrite conversions to clear dirty buffers"); 199 static int numfreebuffers; 200 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 201 "Number of free buffers"); 202 static int lofreebuffers; 203 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 204 "XXX Unused"); 205 static int hifreebuffers; 206 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 207 "XXX Complicatedly unused"); 208 static int getnewbufcalls; 209 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0, 210 "Number of calls to getnewbuf"); 211 static int getnewbufrestarts; 212 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0, 213 "Number of times getnewbuf has had to restart a buffer aquisition"); 214 static int mappingrestarts; 215 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0, 216 "Number of times getblk has had to restart a buffer mapping for " 217 "unmapped buffer"); 218 static int flushbufqtarget = 100; 219 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0, 220 "Amount of work to do in flushbufqueues when helping bufdaemon"); 221 static long notbufdflushes; 222 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, ¬bufdflushes, 0, 223 "Number of dirty buffer flushes done by the bufdaemon helpers"); 224 static long barrierwrites; 225 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0, 226 "Number of barrier writes"); 227 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, 228 &unmapped_buf_allowed, 0, 229 "Permit the use of the unmapped i/o"); 230 231 /* 232 * Lock for the non-dirty bufqueues 233 */ 234 static struct mtx_padalign bqclean; 235 236 /* 237 * Lock for the dirty queue. 238 */ 239 static struct mtx_padalign bqdirty; 240 241 /* 242 * This lock synchronizes access to bd_request. 243 */ 244 static struct mtx_padalign bdlock; 245 246 /* 247 * This lock protects the runningbufreq and synchronizes runningbufwakeup and 248 * waitrunningbufspace(). 249 */ 250 static struct mtx_padalign rbreqlock; 251 252 /* 253 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. 254 */ 255 static struct mtx_padalign nblock; 256 257 /* 258 * Lock that protects bdirtywait. 259 */ 260 static struct mtx_padalign bdirtylock; 261 262 /* 263 * Wakeup point for bufdaemon, as well as indicator of whether it is already 264 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it 265 * is idling. 266 */ 267 static int bd_request; 268 269 /* 270 * Request for the buf daemon to write more buffers than is indicated by 271 * lodirtybuf. This may be necessary to push out excess dependencies or 272 * defragment the address space where a simple count of the number of dirty 273 * buffers is insufficient to characterize the demand for flushing them. 274 */ 275 static int bd_speedupreq; 276 277 /* 278 * bogus page -- for I/O to/from partially complete buffers 279 * this is a temporary solution to the problem, but it is not 280 * really that bad. it would be better to split the buffer 281 * for input in the case of buffers partially already in memory, 282 * but the code is intricate enough already. 283 */ 284 vm_page_t bogus_page; 285 286 /* 287 * Synchronization (sleep/wakeup) variable for active buffer space requests. 288 * Set when wait starts, cleared prior to wakeup(). 289 * Used in runningbufwakeup() and waitrunningbufspace(). 290 */ 291 static int runningbufreq; 292 293 /* 294 * Synchronization (sleep/wakeup) variable for buffer requests. 295 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done 296 * by and/or. 297 * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(), 298 * getnewbuf(), and getblk(). 299 */ 300 static int needsbuffer; 301 302 /* 303 * Synchronization for bwillwrite() waiters. 304 */ 305 static int bdirtywait; 306 307 /* 308 * Definitions for the buffer free lists. 309 */ 310 #define BUFFER_QUEUES 5 /* number of free buffer queues */ 311 312 #define QUEUE_NONE 0 /* on no queue */ 313 #define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */ 314 #define QUEUE_DIRTY 2 /* B_DELWRI buffers */ 315 #define QUEUE_EMPTYKVA 3 /* empty buffer headers w/KVA assignment */ 316 #define QUEUE_EMPTY 4 /* empty buffer headers */ 317 #define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */ 318 319 /* Queues for free buffers with various properties */ 320 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } }; 321 #ifdef INVARIANTS 322 static int bq_len[BUFFER_QUEUES]; 323 #endif 324 325 /* 326 * Single global constant for BUF_WMESG, to avoid getting multiple references. 327 * buf_wmesg is referred from macros. 328 */ 329 const char *buf_wmesg = BUF_WMESG; 330 331 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 332 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 333 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 334 335 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 336 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 337 static int 338 sysctl_bufspace(SYSCTL_HANDLER_ARGS) 339 { 340 long lvalue; 341 int ivalue; 342 343 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long)) 344 return (sysctl_handle_long(oidp, arg1, arg2, req)); 345 lvalue = *(long *)arg1; 346 if (lvalue > INT_MAX) 347 /* On overflow, still write out a long to trigger ENOMEM. */ 348 return (sysctl_handle_long(oidp, &lvalue, 0, req)); 349 ivalue = lvalue; 350 return (sysctl_handle_int(oidp, &ivalue, 0, req)); 351 } 352 #endif 353 354 #ifdef DIRECTIO 355 extern void ffs_rawread_setup(void); 356 #endif /* DIRECTIO */ 357 358 /* 359 * bqlock: 360 * 361 * Return the appropriate queue lock based on the index. 362 */ 363 static inline struct mtx * 364 bqlock(int qindex) 365 { 366 367 if (qindex == QUEUE_DIRTY) 368 return (struct mtx *)(&bqdirty); 369 return (struct mtx *)(&bqclean); 370 } 371 372 /* 373 * bdirtywakeup: 374 * 375 * Wakeup any bwillwrite() waiters. 376 */ 377 static void 378 bdirtywakeup(void) 379 { 380 mtx_lock(&bdirtylock); 381 if (bdirtywait) { 382 bdirtywait = 0; 383 wakeup(&bdirtywait); 384 } 385 mtx_unlock(&bdirtylock); 386 } 387 388 /* 389 * bdirtysub: 390 * 391 * Decrement the numdirtybuffers count by one and wakeup any 392 * threads blocked in bwillwrite(). 393 */ 394 static void 395 bdirtysub(void) 396 { 397 398 if (atomic_fetchadd_int(&numdirtybuffers, -1) == 399 (lodirtybuffers + hidirtybuffers) / 2) 400 bdirtywakeup(); 401 } 402 403 /* 404 * bdirtyadd: 405 * 406 * Increment the numdirtybuffers count by one and wakeup the buf 407 * daemon if needed. 408 */ 409 static void 410 bdirtyadd(void) 411 { 412 413 /* 414 * Only do the wakeup once as we cross the boundary. The 415 * buf daemon will keep running until the condition clears. 416 */ 417 if (atomic_fetchadd_int(&numdirtybuffers, 1) == 418 (lodirtybuffers + hidirtybuffers) / 2) 419 bd_wakeup(); 420 } 421 422 /* 423 * bufspacewakeup: 424 * 425 * Called when buffer space is potentially available for recovery. 426 * getnewbuf() will block on this flag when it is unable to free 427 * sufficient buffer space. Buffer space becomes recoverable when 428 * bp's get placed back in the queues. 429 */ 430 431 static __inline void 432 bufspacewakeup(void) 433 { 434 435 /* 436 * If someone is waiting for BUF space, wake them up. Even 437 * though we haven't freed the kva space yet, the waiting 438 * process will be able to now. 439 */ 440 mtx_lock(&nblock); 441 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 442 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 443 wakeup(&needsbuffer); 444 } 445 mtx_unlock(&nblock); 446 } 447 448 /* 449 * runningwakeup: 450 * 451 * Wake up processes that are waiting on asynchronous writes to fall 452 * below lorunningspace. 453 */ 454 static void 455 runningwakeup(void) 456 { 457 458 mtx_lock(&rbreqlock); 459 if (runningbufreq) { 460 runningbufreq = 0; 461 wakeup(&runningbufreq); 462 } 463 mtx_unlock(&rbreqlock); 464 } 465 466 /* 467 * runningbufwakeup: 468 * 469 * Decrement the outstanding write count according. 470 */ 471 void 472 runningbufwakeup(struct buf *bp) 473 { 474 long space, bspace; 475 476 if (bp->b_runningbufspace == 0) 477 return; 478 space = atomic_fetchadd_long(&runningbufspace, -bp->b_runningbufspace); 479 bspace = bp->b_runningbufspace; 480 bp->b_runningbufspace = 0; 481 /* 482 * Only acquire the lock and wakeup on the transition from exceeding 483 * the threshold to falling below it. 484 */ 485 if (space < lorunningspace) 486 return; 487 if (space - bspace > lorunningspace) 488 return; 489 runningwakeup(); 490 } 491 492 /* 493 * bufcountadd: 494 * 495 * Called when a buffer has been added to one of the free queues to 496 * account for the buffer and to wakeup anyone waiting for free buffers. 497 * This typically occurs when large amounts of metadata are being handled 498 * by the buffer cache ( else buffer space runs out first, usually ). 499 */ 500 static __inline void 501 bufcountadd(struct buf *bp) 502 { 503 int old; 504 505 KASSERT((bp->b_flags & B_INFREECNT) == 0, 506 ("buf %p already counted as free", bp)); 507 bp->b_flags |= B_INFREECNT; 508 old = atomic_fetchadd_int(&numfreebuffers, 1); 509 KASSERT(old >= 0 && old < nbuf, 510 ("numfreebuffers climbed to %d", old + 1)); 511 mtx_lock(&nblock); 512 if (needsbuffer) { 513 needsbuffer &= ~VFS_BIO_NEED_ANY; 514 if (numfreebuffers >= hifreebuffers) 515 needsbuffer &= ~VFS_BIO_NEED_FREE; 516 wakeup(&needsbuffer); 517 } 518 mtx_unlock(&nblock); 519 } 520 521 /* 522 * bufcountsub: 523 * 524 * Decrement the numfreebuffers count as needed. 525 */ 526 static void 527 bufcountsub(struct buf *bp) 528 { 529 int old; 530 531 /* 532 * Fixup numfreebuffers count. If the buffer is invalid or not 533 * delayed-write, the buffer was free and we must decrement 534 * numfreebuffers. 535 */ 536 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 537 KASSERT((bp->b_flags & B_INFREECNT) != 0, 538 ("buf %p not counted in numfreebuffers", bp)); 539 bp->b_flags &= ~B_INFREECNT; 540 old = atomic_fetchadd_int(&numfreebuffers, -1); 541 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); 542 } 543 } 544 545 /* 546 * waitrunningbufspace() 547 * 548 * runningbufspace is a measure of the amount of I/O currently 549 * running. This routine is used in async-write situations to 550 * prevent creating huge backups of pending writes to a device. 551 * Only asynchronous writes are governed by this function. 552 * 553 * This does NOT turn an async write into a sync write. It waits 554 * for earlier writes to complete and generally returns before the 555 * caller's write has reached the device. 556 */ 557 void 558 waitrunningbufspace(void) 559 { 560 561 mtx_lock(&rbreqlock); 562 while (runningbufspace > hirunningspace) { 563 ++runningbufreq; 564 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0); 565 } 566 mtx_unlock(&rbreqlock); 567 } 568 569 570 /* 571 * vfs_buf_test_cache: 572 * 573 * Called when a buffer is extended. This function clears the B_CACHE 574 * bit if the newly extended portion of the buffer does not contain 575 * valid data. 576 */ 577 static __inline 578 void 579 vfs_buf_test_cache(struct buf *bp, 580 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 581 vm_page_t m) 582 { 583 584 VM_OBJECT_ASSERT_WLOCKED(m->object); 585 if (bp->b_flags & B_CACHE) { 586 int base = (foff + off) & PAGE_MASK; 587 if (vm_page_is_valid(m, base, size) == 0) 588 bp->b_flags &= ~B_CACHE; 589 } 590 } 591 592 /* Wake up the buffer daemon if necessary */ 593 static __inline void 594 bd_wakeup(void) 595 { 596 597 mtx_lock(&bdlock); 598 if (bd_request == 0) { 599 bd_request = 1; 600 wakeup(&bd_request); 601 } 602 mtx_unlock(&bdlock); 603 } 604 605 /* 606 * bd_speedup - speedup the buffer cache flushing code 607 */ 608 void 609 bd_speedup(void) 610 { 611 int needwake; 612 613 mtx_lock(&bdlock); 614 needwake = 0; 615 if (bd_speedupreq == 0 || bd_request == 0) 616 needwake = 1; 617 bd_speedupreq = 1; 618 bd_request = 1; 619 if (needwake) 620 wakeup(&bd_request); 621 mtx_unlock(&bdlock); 622 } 623 624 #ifdef __i386__ 625 #define TRANSIENT_DENOM 5 626 #else 627 #define TRANSIENT_DENOM 10 628 #endif 629 630 /* 631 * Calculating buffer cache scaling values and reserve space for buffer 632 * headers. This is called during low level kernel initialization and 633 * may be called more then once. We CANNOT write to the memory area 634 * being reserved at this time. 635 */ 636 caddr_t 637 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) 638 { 639 int tuned_nbuf; 640 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz; 641 642 /* 643 * physmem_est is in pages. Convert it to kilobytes (assumes 644 * PAGE_SIZE is >= 1K) 645 */ 646 physmem_est = physmem_est * (PAGE_SIZE / 1024); 647 648 /* 649 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 650 * For the first 64MB of ram nominally allocate sufficient buffers to 651 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 652 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing 653 * the buffer cache we limit the eventual kva reservation to 654 * maxbcache bytes. 655 * 656 * factor represents the 1/4 x ram conversion. 657 */ 658 if (nbuf == 0) { 659 int factor = 4 * BKVASIZE / 1024; 660 661 nbuf = 50; 662 if (physmem_est > 4096) 663 nbuf += min((physmem_est - 4096) / factor, 664 65536 / factor); 665 if (physmem_est > 65536) 666 nbuf += min((physmem_est - 65536) * 2 / (factor * 5), 667 32 * 1024 * 1024 / (factor * 5)); 668 669 if (maxbcache && nbuf > maxbcache / BKVASIZE) 670 nbuf = maxbcache / BKVASIZE; 671 tuned_nbuf = 1; 672 } else 673 tuned_nbuf = 0; 674 675 /* XXX Avoid unsigned long overflows later on with maxbufspace. */ 676 maxbuf = (LONG_MAX / 3) / BKVASIZE; 677 if (nbuf > maxbuf) { 678 if (!tuned_nbuf) 679 printf("Warning: nbufs lowered from %d to %ld\n", nbuf, 680 maxbuf); 681 nbuf = maxbuf; 682 } 683 684 /* 685 * Ideal allocation size for the transient bio submap if 10% 686 * of the maximal space buffer map. This roughly corresponds 687 * to the amount of the buffer mapped for typical UFS load. 688 * 689 * Clip the buffer map to reserve space for the transient 690 * BIOs, if its extent is bigger than 90% (80% on i386) of the 691 * maximum buffer map extent on the platform. 692 * 693 * The fall-back to the maxbuf in case of maxbcache unset, 694 * allows to not trim the buffer KVA for the architectures 695 * with ample KVA space. 696 */ 697 if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { 698 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; 699 buf_sz = (long)nbuf * BKVASIZE; 700 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * 701 (TRANSIENT_DENOM - 1)) { 702 /* 703 * There is more KVA than memory. Do not 704 * adjust buffer map size, and assign the rest 705 * of maxbuf to transient map. 706 */ 707 biotmap_sz = maxbuf_sz - buf_sz; 708 } else { 709 /* 710 * Buffer map spans all KVA we could afford on 711 * this platform. Give 10% (20% on i386) of 712 * the buffer map to the transient bio map. 713 */ 714 biotmap_sz = buf_sz / TRANSIENT_DENOM; 715 buf_sz -= biotmap_sz; 716 } 717 if (biotmap_sz / INT_MAX > MAXPHYS) 718 bio_transient_maxcnt = INT_MAX; 719 else 720 bio_transient_maxcnt = biotmap_sz / MAXPHYS; 721 /* 722 * Artifically limit to 1024 simultaneous in-flight I/Os 723 * using the transient mapping. 724 */ 725 if (bio_transient_maxcnt > 1024) 726 bio_transient_maxcnt = 1024; 727 if (tuned_nbuf) 728 nbuf = buf_sz / BKVASIZE; 729 } 730 731 /* 732 * swbufs are used as temporary holders for I/O, such as paging I/O. 733 * We have no less then 16 and no more then 256. 734 */ 735 nswbuf = max(min(nbuf/4, 256), 16); 736 #ifdef NSWBUF_MIN 737 if (nswbuf < NSWBUF_MIN) 738 nswbuf = NSWBUF_MIN; 739 #endif 740 #ifdef DIRECTIO 741 ffs_rawread_setup(); 742 #endif 743 744 /* 745 * Reserve space for the buffer cache buffers 746 */ 747 swbuf = (void *)v; 748 v = (caddr_t)(swbuf + nswbuf); 749 buf = (void *)v; 750 v = (caddr_t)(buf + nbuf); 751 752 return(v); 753 } 754 755 /* Initialize the buffer subsystem. Called before use of any buffers. */ 756 void 757 bufinit(void) 758 { 759 struct buf *bp; 760 int i; 761 762 mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF); 763 mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF); 764 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); 765 mtx_init(&nblock, "needsbuffer lock", NULL, MTX_DEF); 766 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); 767 mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); 768 769 /* next, make a null set of free lists */ 770 for (i = 0; i < BUFFER_QUEUES; i++) 771 TAILQ_INIT(&bufqueues[i]); 772 773 /* finally, initialize each buffer header and stick on empty q */ 774 for (i = 0; i < nbuf; i++) { 775 bp = &buf[i]; 776 bzero(bp, sizeof *bp); 777 bp->b_flags = B_INVAL | B_INFREECNT; 778 bp->b_rcred = NOCRED; 779 bp->b_wcred = NOCRED; 780 bp->b_qindex = QUEUE_EMPTY; 781 bp->b_xflags = 0; 782 LIST_INIT(&bp->b_dep); 783 BUF_LOCKINIT(bp); 784 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 785 #ifdef INVARIANTS 786 bq_len[QUEUE_EMPTY]++; 787 #endif 788 } 789 790 /* 791 * maxbufspace is the absolute maximum amount of buffer space we are 792 * allowed to reserve in KVM and in real terms. The absolute maximum 793 * is nominally used by buf_daemon. hibufspace is the nominal maximum 794 * used by most other processes. The differential is required to 795 * ensure that buf_daemon is able to run when other processes might 796 * be blocked waiting for buffer space. 797 * 798 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 799 * this may result in KVM fragmentation which is not handled optimally 800 * by the system. 801 */ 802 maxbufspace = (long)nbuf * BKVASIZE; 803 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 804 lobufspace = hibufspace - MAXBSIZE; 805 806 /* 807 * Note: The 16 MiB upper limit for hirunningspace was chosen 808 * arbitrarily and may need further tuning. It corresponds to 809 * 128 outstanding write IO requests (if IO size is 128 KiB), 810 * which fits with many RAID controllers' tagged queuing limits. 811 * The lower 1 MiB limit is the historical upper limit for 812 * hirunningspace. 813 */ 814 hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBSIZE), 815 16 * 1024 * 1024), 1024 * 1024); 816 lorunningspace = roundup((hirunningspace * 2) / 3, MAXBSIZE); 817 818 /* 819 * Limit the amount of malloc memory since it is wired permanently into 820 * the kernel space. Even though this is accounted for in the buffer 821 * allocation, we don't want the malloced region to grow uncontrolled. 822 * The malloc scheme improves memory utilization significantly on average 823 * (small) directories. 824 */ 825 maxbufmallocspace = hibufspace / 20; 826 827 /* 828 * Reduce the chance of a deadlock occuring by limiting the number 829 * of delayed-write dirty buffers we allow to stack up. 830 */ 831 hidirtybuffers = nbuf / 4 + 20; 832 dirtybufthresh = hidirtybuffers * 9 / 10; 833 numdirtybuffers = 0; 834 /* 835 * To support extreme low-memory systems, make sure hidirtybuffers cannot 836 * eat up all available buffer space. This occurs when our minimum cannot 837 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 838 * BKVASIZE'd buffers. 839 */ 840 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 841 hidirtybuffers >>= 1; 842 } 843 lodirtybuffers = hidirtybuffers / 2; 844 845 /* 846 * Try to keep the number of free buffers in the specified range, 847 * and give special processes (e.g. like buf_daemon) access to an 848 * emergency reserve. 849 */ 850 lofreebuffers = nbuf / 18 + 5; 851 hifreebuffers = 2 * lofreebuffers; 852 numfreebuffers = nbuf; 853 854 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 855 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 856 unmapped_buf = (caddr_t)kmem_alloc_nofault(kernel_map, MAXPHYS); 857 } 858 859 #ifdef INVARIANTS 860 static inline void 861 vfs_buf_check_mapped(struct buf *bp) 862 { 863 864 KASSERT((bp->b_flags & B_UNMAPPED) == 0, 865 ("mapped buf %p %x", bp, bp->b_flags)); 866 KASSERT(bp->b_kvabase != unmapped_buf, 867 ("mapped buf: b_kvabase was not updated %p", bp)); 868 KASSERT(bp->b_data != unmapped_buf, 869 ("mapped buf: b_data was not updated %p", bp)); 870 } 871 872 static inline void 873 vfs_buf_check_unmapped(struct buf *bp) 874 { 875 876 KASSERT((bp->b_flags & B_UNMAPPED) == B_UNMAPPED, 877 ("unmapped buf %p %x", bp, bp->b_flags)); 878 KASSERT(bp->b_kvabase == unmapped_buf, 879 ("unmapped buf: corrupted b_kvabase %p", bp)); 880 KASSERT(bp->b_data == unmapped_buf, 881 ("unmapped buf: corrupted b_data %p", bp)); 882 } 883 884 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp) 885 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp) 886 #else 887 #define BUF_CHECK_MAPPED(bp) do {} while (0) 888 #define BUF_CHECK_UNMAPPED(bp) do {} while (0) 889 #endif 890 891 static void 892 bpmap_qenter(struct buf *bp) 893 { 894 895 BUF_CHECK_MAPPED(bp); 896 897 /* 898 * bp->b_data is relative to bp->b_offset, but 899 * bp->b_offset may be offset into the first page. 900 */ 901 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 902 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 903 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 904 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 905 } 906 907 /* 908 * bfreekva() - free the kva allocation for a buffer. 909 * 910 * Since this call frees up buffer space, we call bufspacewakeup(). 911 */ 912 static void 913 bfreekva(struct buf *bp) 914 { 915 916 if (bp->b_kvasize == 0) 917 return; 918 919 atomic_add_int(&buffreekvacnt, 1); 920 atomic_subtract_long(&bufspace, bp->b_kvasize); 921 if ((bp->b_flags & B_UNMAPPED) == 0) { 922 BUF_CHECK_MAPPED(bp); 923 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvabase, 924 (vm_offset_t)bp->b_kvabase + bp->b_kvasize); 925 } else { 926 BUF_CHECK_UNMAPPED(bp); 927 if ((bp->b_flags & B_KVAALLOC) != 0) { 928 vm_map_remove(buffer_map, (vm_offset_t)bp->b_kvaalloc, 929 (vm_offset_t)bp->b_kvaalloc + bp->b_kvasize); 930 } 931 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 932 bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 933 } 934 bp->b_kvasize = 0; 935 bufspacewakeup(); 936 } 937 938 /* 939 * binsfree: 940 * 941 * Insert the buffer into the appropriate free list. 942 */ 943 static void 944 binsfree(struct buf *bp, int qindex) 945 { 946 struct mtx *olock, *nlock; 947 948 BUF_ASSERT_XLOCKED(bp); 949 950 olock = bqlock(bp->b_qindex); 951 nlock = bqlock(qindex); 952 mtx_lock(olock); 953 /* Handle delayed bremfree() processing. */ 954 if (bp->b_flags & B_REMFREE) 955 bremfreel(bp); 956 957 if (bp->b_qindex != QUEUE_NONE) 958 panic("binsfree: free buffer onto another queue???"); 959 960 bp->b_qindex = qindex; 961 if (olock != nlock) { 962 mtx_unlock(olock); 963 mtx_lock(nlock); 964 } 965 if (bp->b_flags & B_AGE) 966 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 967 else 968 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 969 #ifdef INVARIANTS 970 bq_len[bp->b_qindex]++; 971 #endif 972 mtx_unlock(nlock); 973 974 /* 975 * Something we can maybe free or reuse. 976 */ 977 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 978 bufspacewakeup(); 979 980 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 981 bufcountadd(bp); 982 } 983 984 /* 985 * bremfree: 986 * 987 * Mark the buffer for removal from the appropriate free list. 988 * 989 */ 990 void 991 bremfree(struct buf *bp) 992 { 993 994 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 995 KASSERT((bp->b_flags & B_REMFREE) == 0, 996 ("bremfree: buffer %p already marked for delayed removal.", bp)); 997 KASSERT(bp->b_qindex != QUEUE_NONE, 998 ("bremfree: buffer %p not on a queue.", bp)); 999 BUF_ASSERT_XLOCKED(bp); 1000 1001 bp->b_flags |= B_REMFREE; 1002 bufcountsub(bp); 1003 } 1004 1005 /* 1006 * bremfreef: 1007 * 1008 * Force an immediate removal from a free list. Used only in nfs when 1009 * it abuses the b_freelist pointer. 1010 */ 1011 void 1012 bremfreef(struct buf *bp) 1013 { 1014 struct mtx *qlock; 1015 1016 qlock = bqlock(bp->b_qindex); 1017 mtx_lock(qlock); 1018 bremfreel(bp); 1019 mtx_unlock(qlock); 1020 } 1021 1022 /* 1023 * bremfreel: 1024 * 1025 * Removes a buffer from the free list, must be called with the 1026 * correct qlock held. 1027 */ 1028 static void 1029 bremfreel(struct buf *bp) 1030 { 1031 1032 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", 1033 bp, bp->b_vp, bp->b_flags); 1034 KASSERT(bp->b_qindex != QUEUE_NONE, 1035 ("bremfreel: buffer %p not on a queue.", bp)); 1036 BUF_ASSERT_XLOCKED(bp); 1037 mtx_assert(bqlock(bp->b_qindex), MA_OWNED); 1038 1039 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 1040 #ifdef INVARIANTS 1041 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow", 1042 bp->b_qindex)); 1043 bq_len[bp->b_qindex]--; 1044 #endif 1045 bp->b_qindex = QUEUE_NONE; 1046 /* 1047 * If this was a delayed bremfree() we only need to remove the buffer 1048 * from the queue and return the stats are already done. 1049 */ 1050 if (bp->b_flags & B_REMFREE) { 1051 bp->b_flags &= ~B_REMFREE; 1052 return; 1053 } 1054 bufcountsub(bp); 1055 } 1056 1057 /* 1058 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must 1059 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set, 1060 * the buffer is valid and we do not have to do anything. 1061 */ 1062 void 1063 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, 1064 int cnt, struct ucred * cred) 1065 { 1066 struct buf *rabp; 1067 int i; 1068 1069 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 1070 if (inmem(vp, *rablkno)) 1071 continue; 1072 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0); 1073 1074 if ((rabp->b_flags & B_CACHE) == 0) { 1075 if (!TD_IS_IDLETHREAD(curthread)) 1076 curthread->td_ru.ru_inblock++; 1077 rabp->b_flags |= B_ASYNC; 1078 rabp->b_flags &= ~B_INVAL; 1079 rabp->b_ioflags &= ~BIO_ERROR; 1080 rabp->b_iocmd = BIO_READ; 1081 if (rabp->b_rcred == NOCRED && cred != NOCRED) 1082 rabp->b_rcred = crhold(cred); 1083 vfs_busy_pages(rabp, 0); 1084 BUF_KERNPROC(rabp); 1085 rabp->b_iooffset = dbtob(rabp->b_blkno); 1086 bstrategy(rabp); 1087 } else { 1088 brelse(rabp); 1089 } 1090 } 1091 } 1092 1093 /* 1094 * Entry point for bread() and breadn() via #defines in sys/buf.h. 1095 * 1096 * Get a buffer with the specified data. Look in the cache first. We 1097 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 1098 * is set, the buffer is valid and we do not have to do anything, see 1099 * getblk(). Also starts asynchronous I/O on read-ahead blocks. 1100 */ 1101 int 1102 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, 1103 int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp) 1104 { 1105 struct buf *bp; 1106 int rv = 0, readwait = 0; 1107 1108 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size); 1109 /* 1110 * Can only return NULL if GB_LOCK_NOWAIT flag is specified. 1111 */ 1112 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags); 1113 if (bp == NULL) 1114 return (EBUSY); 1115 1116 /* if not found in cache, do some I/O */ 1117 if ((bp->b_flags & B_CACHE) == 0) { 1118 if (!TD_IS_IDLETHREAD(curthread)) 1119 curthread->td_ru.ru_inblock++; 1120 bp->b_iocmd = BIO_READ; 1121 bp->b_flags &= ~B_INVAL; 1122 bp->b_ioflags &= ~BIO_ERROR; 1123 if (bp->b_rcred == NOCRED && cred != NOCRED) 1124 bp->b_rcred = crhold(cred); 1125 vfs_busy_pages(bp, 0); 1126 bp->b_iooffset = dbtob(bp->b_blkno); 1127 bstrategy(bp); 1128 ++readwait; 1129 } 1130 1131 breada(vp, rablkno, rabsize, cnt, cred); 1132 1133 if (readwait) { 1134 rv = bufwait(bp); 1135 } 1136 return (rv); 1137 } 1138 1139 /* 1140 * Write, release buffer on completion. (Done by iodone 1141 * if async). Do not bother writing anything if the buffer 1142 * is invalid. 1143 * 1144 * Note that we set B_CACHE here, indicating that buffer is 1145 * fully valid and thus cacheable. This is true even of NFS 1146 * now so we set it generally. This could be set either here 1147 * or in biodone() since the I/O is synchronous. We put it 1148 * here. 1149 */ 1150 int 1151 bufwrite(struct buf *bp) 1152 { 1153 int oldflags; 1154 struct vnode *vp; 1155 long space; 1156 int vp_md; 1157 1158 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1159 if (bp->b_flags & B_INVAL) { 1160 brelse(bp); 1161 return (0); 1162 } 1163 1164 if (bp->b_flags & B_BARRIER) 1165 barrierwrites++; 1166 1167 oldflags = bp->b_flags; 1168 1169 BUF_ASSERT_HELD(bp); 1170 1171 if (bp->b_pin_count > 0) 1172 bunpin_wait(bp); 1173 1174 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG), 1175 ("FFS background buffer should not get here %p", bp)); 1176 1177 vp = bp->b_vp; 1178 if (vp) 1179 vp_md = vp->v_vflag & VV_MD; 1180 else 1181 vp_md = 0; 1182 1183 /* 1184 * Mark the buffer clean. Increment the bufobj write count 1185 * before bundirty() call, to prevent other thread from seeing 1186 * empty dirty list and zero counter for writes in progress, 1187 * falsely indicating that the bufobj is clean. 1188 */ 1189 bufobj_wref(bp->b_bufobj); 1190 bundirty(bp); 1191 1192 bp->b_flags &= ~B_DONE; 1193 bp->b_ioflags &= ~BIO_ERROR; 1194 bp->b_flags |= B_CACHE; 1195 bp->b_iocmd = BIO_WRITE; 1196 1197 vfs_busy_pages(bp, 1); 1198 1199 /* 1200 * Normal bwrites pipeline writes 1201 */ 1202 bp->b_runningbufspace = bp->b_bufsize; 1203 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace); 1204 1205 if (!TD_IS_IDLETHREAD(curthread)) 1206 curthread->td_ru.ru_oublock++; 1207 if (oldflags & B_ASYNC) 1208 BUF_KERNPROC(bp); 1209 bp->b_iooffset = dbtob(bp->b_blkno); 1210 bstrategy(bp); 1211 1212 if ((oldflags & B_ASYNC) == 0) { 1213 int rtval = bufwait(bp); 1214 brelse(bp); 1215 return (rtval); 1216 } else if (space > hirunningspace) { 1217 /* 1218 * don't allow the async write to saturate the I/O 1219 * system. We will not deadlock here because 1220 * we are blocking waiting for I/O that is already in-progress 1221 * to complete. We do not block here if it is the update 1222 * or syncer daemon trying to clean up as that can lead 1223 * to deadlock. 1224 */ 1225 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md) 1226 waitrunningbufspace(); 1227 } 1228 1229 return (0); 1230 } 1231 1232 void 1233 bufbdflush(struct bufobj *bo, struct buf *bp) 1234 { 1235 struct buf *nbp; 1236 1237 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) { 1238 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); 1239 altbufferflushes++; 1240 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) { 1241 BO_LOCK(bo); 1242 /* 1243 * Try to find a buffer to flush. 1244 */ 1245 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 1246 if ((nbp->b_vflags & BV_BKGRDINPROG) || 1247 BUF_LOCK(nbp, 1248 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 1249 continue; 1250 if (bp == nbp) 1251 panic("bdwrite: found ourselves"); 1252 BO_UNLOCK(bo); 1253 /* Don't countdeps with the bo lock held. */ 1254 if (buf_countdeps(nbp, 0)) { 1255 BO_LOCK(bo); 1256 BUF_UNLOCK(nbp); 1257 continue; 1258 } 1259 if (nbp->b_flags & B_CLUSTEROK) { 1260 vfs_bio_awrite(nbp); 1261 } else { 1262 bremfree(nbp); 1263 bawrite(nbp); 1264 } 1265 dirtybufferflushes++; 1266 break; 1267 } 1268 if (nbp == NULL) 1269 BO_UNLOCK(bo); 1270 } 1271 } 1272 1273 /* 1274 * Delayed write. (Buffer is marked dirty). Do not bother writing 1275 * anything if the buffer is marked invalid. 1276 * 1277 * Note that since the buffer must be completely valid, we can safely 1278 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1279 * biodone() in order to prevent getblk from writing the buffer 1280 * out synchronously. 1281 */ 1282 void 1283 bdwrite(struct buf *bp) 1284 { 1285 struct thread *td = curthread; 1286 struct vnode *vp; 1287 struct bufobj *bo; 1288 1289 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1290 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1291 KASSERT((bp->b_flags & B_BARRIER) == 0, 1292 ("Barrier request in delayed write %p", bp)); 1293 BUF_ASSERT_HELD(bp); 1294 1295 if (bp->b_flags & B_INVAL) { 1296 brelse(bp); 1297 return; 1298 } 1299 1300 /* 1301 * If we have too many dirty buffers, don't create any more. 1302 * If we are wildly over our limit, then force a complete 1303 * cleanup. Otherwise, just keep the situation from getting 1304 * out of control. Note that we have to avoid a recursive 1305 * disaster and not try to clean up after our own cleanup! 1306 */ 1307 vp = bp->b_vp; 1308 bo = bp->b_bufobj; 1309 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) { 1310 td->td_pflags |= TDP_INBDFLUSH; 1311 BO_BDFLUSH(bo, bp); 1312 td->td_pflags &= ~TDP_INBDFLUSH; 1313 } else 1314 recursiveflushes++; 1315 1316 bdirty(bp); 1317 /* 1318 * Set B_CACHE, indicating that the buffer is fully valid. This is 1319 * true even of NFS now. 1320 */ 1321 bp->b_flags |= B_CACHE; 1322 1323 /* 1324 * This bmap keeps the system from needing to do the bmap later, 1325 * perhaps when the system is attempting to do a sync. Since it 1326 * is likely that the indirect block -- or whatever other datastructure 1327 * that the filesystem needs is still in memory now, it is a good 1328 * thing to do this. Note also, that if the pageout daemon is 1329 * requesting a sync -- there might not be enough memory to do 1330 * the bmap then... So, this is important to do. 1331 */ 1332 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { 1333 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1334 } 1335 1336 /* 1337 * Set the *dirty* buffer range based upon the VM system dirty 1338 * pages. 1339 * 1340 * Mark the buffer pages as clean. We need to do this here to 1341 * satisfy the vnode_pager and the pageout daemon, so that it 1342 * thinks that the pages have been "cleaned". Note that since 1343 * the pages are in a delayed write buffer -- the VFS layer 1344 * "will" see that the pages get written out on the next sync, 1345 * or perhaps the cluster will be completed. 1346 */ 1347 vfs_clean_pages_dirty_buf(bp); 1348 bqrelse(bp); 1349 1350 /* 1351 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1352 * due to the softdep code. 1353 */ 1354 } 1355 1356 /* 1357 * bdirty: 1358 * 1359 * Turn buffer into delayed write request. We must clear BIO_READ and 1360 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 1361 * itself to properly update it in the dirty/clean lists. We mark it 1362 * B_DONE to ensure that any asynchronization of the buffer properly 1363 * clears B_DONE ( else a panic will occur later ). 1364 * 1365 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 1366 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 1367 * should only be called if the buffer is known-good. 1368 * 1369 * Since the buffer is not on a queue, we do not update the numfreebuffers 1370 * count. 1371 * 1372 * The buffer must be on QUEUE_NONE. 1373 */ 1374 void 1375 bdirty(struct buf *bp) 1376 { 1377 1378 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", 1379 bp, bp->b_vp, bp->b_flags); 1380 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1381 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1382 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1383 BUF_ASSERT_HELD(bp); 1384 bp->b_flags &= ~(B_RELBUF); 1385 bp->b_iocmd = BIO_WRITE; 1386 1387 if ((bp->b_flags & B_DELWRI) == 0) { 1388 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; 1389 reassignbuf(bp); 1390 bdirtyadd(); 1391 } 1392 } 1393 1394 /* 1395 * bundirty: 1396 * 1397 * Clear B_DELWRI for buffer. 1398 * 1399 * Since the buffer is not on a queue, we do not update the numfreebuffers 1400 * count. 1401 * 1402 * The buffer must be on QUEUE_NONE. 1403 */ 1404 1405 void 1406 bundirty(struct buf *bp) 1407 { 1408 1409 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1410 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1411 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1412 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1413 BUF_ASSERT_HELD(bp); 1414 1415 if (bp->b_flags & B_DELWRI) { 1416 bp->b_flags &= ~B_DELWRI; 1417 reassignbuf(bp); 1418 bdirtysub(); 1419 } 1420 /* 1421 * Since it is now being written, we can clear its deferred write flag. 1422 */ 1423 bp->b_flags &= ~B_DEFERRED; 1424 } 1425 1426 /* 1427 * bawrite: 1428 * 1429 * Asynchronous write. Start output on a buffer, but do not wait for 1430 * it to complete. The buffer is released when the output completes. 1431 * 1432 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1433 * B_INVAL buffers. Not us. 1434 */ 1435 void 1436 bawrite(struct buf *bp) 1437 { 1438 1439 bp->b_flags |= B_ASYNC; 1440 (void) bwrite(bp); 1441 } 1442 1443 /* 1444 * babarrierwrite: 1445 * 1446 * Asynchronous barrier write. Start output on a buffer, but do not 1447 * wait for it to complete. Place a write barrier after this write so 1448 * that this buffer and all buffers written before it are committed to 1449 * the disk before any buffers written after this write are committed 1450 * to the disk. The buffer is released when the output completes. 1451 */ 1452 void 1453 babarrierwrite(struct buf *bp) 1454 { 1455 1456 bp->b_flags |= B_ASYNC | B_BARRIER; 1457 (void) bwrite(bp); 1458 } 1459 1460 /* 1461 * bbarrierwrite: 1462 * 1463 * Synchronous barrier write. Start output on a buffer and wait for 1464 * it to complete. Place a write barrier after this write so that 1465 * this buffer and all buffers written before it are committed to 1466 * the disk before any buffers written after this write are committed 1467 * to the disk. The buffer is released when the output completes. 1468 */ 1469 int 1470 bbarrierwrite(struct buf *bp) 1471 { 1472 1473 bp->b_flags |= B_BARRIER; 1474 return (bwrite(bp)); 1475 } 1476 1477 /* 1478 * bwillwrite: 1479 * 1480 * Called prior to the locking of any vnodes when we are expecting to 1481 * write. We do not want to starve the buffer cache with too many 1482 * dirty buffers so we block here. By blocking prior to the locking 1483 * of any vnodes we attempt to avoid the situation where a locked vnode 1484 * prevents the various system daemons from flushing related buffers. 1485 */ 1486 void 1487 bwillwrite(void) 1488 { 1489 1490 if (numdirtybuffers >= hidirtybuffers) { 1491 mtx_lock(&bdirtylock); 1492 while (numdirtybuffers >= hidirtybuffers) { 1493 bdirtywait = 1; 1494 msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4), 1495 "flswai", 0); 1496 } 1497 mtx_unlock(&bdirtylock); 1498 } 1499 } 1500 1501 /* 1502 * Return true if we have too many dirty buffers. 1503 */ 1504 int 1505 buf_dirty_count_severe(void) 1506 { 1507 1508 return(numdirtybuffers >= hidirtybuffers); 1509 } 1510 1511 static __noinline int 1512 buf_vm_page_count_severe(void) 1513 { 1514 1515 KFAIL_POINT_CODE(DEBUG_FP, buf_pressure, return 1); 1516 1517 return vm_page_count_severe(); 1518 } 1519 1520 /* 1521 * brelse: 1522 * 1523 * Release a busy buffer and, if requested, free its resources. The 1524 * buffer will be stashed in the appropriate bufqueue[] allowing it 1525 * to be accessed later as a cache entity or reused for other purposes. 1526 */ 1527 void 1528 brelse(struct buf *bp) 1529 { 1530 int qindex; 1531 1532 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X", 1533 bp, bp->b_vp, bp->b_flags); 1534 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1535 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1536 1537 if (BUF_LOCKRECURSED(bp)) { 1538 /* 1539 * Do not process, in particular, do not handle the 1540 * B_INVAL/B_RELBUF and do not release to free list. 1541 */ 1542 BUF_UNLOCK(bp); 1543 return; 1544 } 1545 1546 if (bp->b_flags & B_MANAGED) { 1547 bqrelse(bp); 1548 return; 1549 } 1550 1551 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && 1552 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) { 1553 /* 1554 * Failed write, redirty. Must clear BIO_ERROR to prevent 1555 * pages from being scrapped. If the error is anything 1556 * other than an I/O error (EIO), assume that retrying 1557 * is futile. 1558 */ 1559 bp->b_ioflags &= ~BIO_ERROR; 1560 bdirty(bp); 1561 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 1562 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { 1563 /* 1564 * Either a failed I/O or we were asked to free or not 1565 * cache the buffer. 1566 */ 1567 bp->b_flags |= B_INVAL; 1568 if (!LIST_EMPTY(&bp->b_dep)) 1569 buf_deallocate(bp); 1570 if (bp->b_flags & B_DELWRI) 1571 bdirtysub(); 1572 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1573 if ((bp->b_flags & B_VMIO) == 0) { 1574 if (bp->b_bufsize) 1575 allocbuf(bp, 0); 1576 if (bp->b_vp) 1577 brelvp(bp); 1578 } 1579 } 1580 1581 /* 1582 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1583 * is called with B_DELWRI set, the underlying pages may wind up 1584 * getting freed causing a previous write (bdwrite()) to get 'lost' 1585 * because pages associated with a B_DELWRI bp are marked clean. 1586 * 1587 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1588 * if B_DELWRI is set. 1589 * 1590 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1591 * on pages to return pages to the VM page queues. 1592 */ 1593 if (bp->b_flags & B_DELWRI) 1594 bp->b_flags &= ~B_RELBUF; 1595 else if (buf_vm_page_count_severe()) { 1596 /* 1597 * BKGRDINPROG can only be set with the buf and bufobj 1598 * locks both held. We tolerate a race to clear it here. 1599 */ 1600 if (!(bp->b_vflags & BV_BKGRDINPROG)) 1601 bp->b_flags |= B_RELBUF; 1602 } 1603 1604 /* 1605 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1606 * constituted, not even NFS buffers now. Two flags effect this. If 1607 * B_INVAL, the struct buf is invalidated but the VM object is kept 1608 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1609 * 1610 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 1611 * invalidated. BIO_ERROR cannot be set for a failed write unless the 1612 * buffer is also B_INVAL because it hits the re-dirtying code above. 1613 * 1614 * Normally we can do this whether a buffer is B_DELWRI or not. If 1615 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1616 * the commit state and we cannot afford to lose the buffer. If the 1617 * buffer has a background write in progress, we need to keep it 1618 * around to prevent it from being reconstituted and starting a second 1619 * background write. 1620 */ 1621 if ((bp->b_flags & B_VMIO) 1622 && !(bp->b_vp->v_mount != NULL && 1623 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1624 !vn_isdisk(bp->b_vp, NULL) && 1625 (bp->b_flags & B_DELWRI)) 1626 ) { 1627 1628 int i, j, resid; 1629 vm_page_t m; 1630 off_t foff; 1631 vm_pindex_t poff; 1632 vm_object_t obj; 1633 1634 obj = bp->b_bufobj->bo_object; 1635 1636 /* 1637 * Get the base offset and length of the buffer. Note that 1638 * in the VMIO case if the buffer block size is not 1639 * page-aligned then b_data pointer may not be page-aligned. 1640 * But our b_pages[] array *IS* page aligned. 1641 * 1642 * block sizes less then DEV_BSIZE (usually 512) are not 1643 * supported due to the page granularity bits (m->valid, 1644 * m->dirty, etc...). 1645 * 1646 * See man buf(9) for more information 1647 */ 1648 resid = bp->b_bufsize; 1649 foff = bp->b_offset; 1650 for (i = 0; i < bp->b_npages; i++) { 1651 int had_bogus = 0; 1652 1653 m = bp->b_pages[i]; 1654 1655 /* 1656 * If we hit a bogus page, fixup *all* the bogus pages 1657 * now. 1658 */ 1659 if (m == bogus_page) { 1660 poff = OFF_TO_IDX(bp->b_offset); 1661 had_bogus = 1; 1662 1663 VM_OBJECT_RLOCK(obj); 1664 for (j = i; j < bp->b_npages; j++) { 1665 vm_page_t mtmp; 1666 mtmp = bp->b_pages[j]; 1667 if (mtmp == bogus_page) { 1668 mtmp = vm_page_lookup(obj, poff + j); 1669 if (!mtmp) { 1670 panic("brelse: page missing\n"); 1671 } 1672 bp->b_pages[j] = mtmp; 1673 } 1674 } 1675 VM_OBJECT_RUNLOCK(obj); 1676 1677 if ((bp->b_flags & (B_INVAL | B_UNMAPPED)) == 0) { 1678 BUF_CHECK_MAPPED(bp); 1679 pmap_qenter( 1680 trunc_page((vm_offset_t)bp->b_data), 1681 bp->b_pages, bp->b_npages); 1682 } 1683 m = bp->b_pages[i]; 1684 } 1685 if ((bp->b_flags & B_NOCACHE) || 1686 (bp->b_ioflags & BIO_ERROR && 1687 bp->b_iocmd == BIO_READ)) { 1688 int poffset = foff & PAGE_MASK; 1689 int presid = resid > (PAGE_SIZE - poffset) ? 1690 (PAGE_SIZE - poffset) : resid; 1691 1692 KASSERT(presid >= 0, ("brelse: extra page")); 1693 VM_OBJECT_WLOCK(obj); 1694 vm_page_set_invalid(m, poffset, presid); 1695 VM_OBJECT_WUNLOCK(obj); 1696 if (had_bogus) 1697 printf("avoided corruption bug in bogus_page/brelse code\n"); 1698 } 1699 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1700 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1701 } 1702 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1703 vfs_vmio_release(bp); 1704 1705 } else if (bp->b_flags & B_VMIO) { 1706 1707 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1708 vfs_vmio_release(bp); 1709 } 1710 1711 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) { 1712 if (bp->b_bufsize != 0) 1713 allocbuf(bp, 0); 1714 if (bp->b_vp != NULL) 1715 brelvp(bp); 1716 } 1717 1718 /* 1719 * If the buffer has junk contents signal it and eventually 1720 * clean up B_DELWRI and diassociate the vnode so that gbincore() 1721 * doesn't find it. 1722 */ 1723 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || 1724 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) 1725 bp->b_flags |= B_INVAL; 1726 if (bp->b_flags & B_INVAL) { 1727 if (bp->b_flags & B_DELWRI) 1728 bundirty(bp); 1729 if (bp->b_vp) 1730 brelvp(bp); 1731 } 1732 1733 /* buffers with no memory */ 1734 if (bp->b_bufsize == 0) { 1735 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1736 if (bp->b_vflags & BV_BKGRDINPROG) 1737 panic("losing buffer 1"); 1738 if (bp->b_kvasize) 1739 qindex = QUEUE_EMPTYKVA; 1740 else 1741 qindex = QUEUE_EMPTY; 1742 bp->b_flags |= B_AGE; 1743 /* buffers with junk contents */ 1744 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || 1745 (bp->b_ioflags & BIO_ERROR)) { 1746 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1747 if (bp->b_vflags & BV_BKGRDINPROG) 1748 panic("losing buffer 2"); 1749 qindex = QUEUE_CLEAN; 1750 bp->b_flags |= B_AGE; 1751 /* remaining buffers */ 1752 } else if (bp->b_flags & B_DELWRI) 1753 qindex = QUEUE_DIRTY; 1754 else 1755 qindex = QUEUE_CLEAN; 1756 1757 binsfree(bp, qindex); 1758 1759 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT); 1760 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 1761 panic("brelse: not dirty"); 1762 /* unlock */ 1763 BUF_UNLOCK(bp); 1764 } 1765 1766 /* 1767 * Release a buffer back to the appropriate queue but do not try to free 1768 * it. The buffer is expected to be used again soon. 1769 * 1770 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1771 * biodone() to requeue an async I/O on completion. It is also used when 1772 * known good buffers need to be requeued but we think we may need the data 1773 * again soon. 1774 * 1775 * XXX we should be able to leave the B_RELBUF hint set on completion. 1776 */ 1777 void 1778 bqrelse(struct buf *bp) 1779 { 1780 int qindex; 1781 1782 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1783 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1784 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1785 1786 if (BUF_LOCKRECURSED(bp)) { 1787 /* do not release to free list */ 1788 BUF_UNLOCK(bp); 1789 return; 1790 } 1791 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1792 1793 if (bp->b_flags & B_MANAGED) { 1794 if (bp->b_flags & B_REMFREE) 1795 bremfreef(bp); 1796 goto out; 1797 } 1798 1799 /* buffers with stale but valid contents */ 1800 if (bp->b_flags & B_DELWRI) { 1801 qindex = QUEUE_DIRTY; 1802 } else { 1803 if ((bp->b_flags & B_DELWRI) == 0 && 1804 (bp->b_xflags & BX_VNDIRTY)) 1805 panic("bqrelse: not dirty"); 1806 /* 1807 * BKGRDINPROG can only be set with the buf and bufobj 1808 * locks both held. We tolerate a race to clear it here. 1809 */ 1810 if (buf_vm_page_count_severe() && 1811 (bp->b_vflags & BV_BKGRDINPROG) == 0) { 1812 /* 1813 * We are too low on memory, we have to try to free 1814 * the buffer (most importantly: the wired pages 1815 * making up its backing store) *now*. 1816 */ 1817 brelse(bp); 1818 return; 1819 } 1820 qindex = QUEUE_CLEAN; 1821 } 1822 binsfree(bp, qindex); 1823 1824 out: 1825 /* unlock */ 1826 BUF_UNLOCK(bp); 1827 } 1828 1829 /* Give pages used by the bp back to the VM system (where possible) */ 1830 static void 1831 vfs_vmio_release(struct buf *bp) 1832 { 1833 int i; 1834 vm_page_t m; 1835 1836 if ((bp->b_flags & B_UNMAPPED) == 0) { 1837 BUF_CHECK_MAPPED(bp); 1838 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages); 1839 } else 1840 BUF_CHECK_UNMAPPED(bp); 1841 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 1842 for (i = 0; i < bp->b_npages; i++) { 1843 m = bp->b_pages[i]; 1844 bp->b_pages[i] = NULL; 1845 /* 1846 * In order to keep page LRU ordering consistent, put 1847 * everything on the inactive queue. 1848 */ 1849 vm_page_lock(m); 1850 vm_page_unwire(m, 0); 1851 /* 1852 * We don't mess with busy pages, it is 1853 * the responsibility of the process that 1854 * busied the pages to deal with them. 1855 */ 1856 if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 && 1857 m->wire_count == 0) { 1858 /* 1859 * Might as well free the page if we can and it has 1860 * no valid data. We also free the page if the 1861 * buffer was used for direct I/O 1862 */ 1863 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { 1864 vm_page_free(m); 1865 } else if (bp->b_flags & B_DIRECT) { 1866 vm_page_try_to_free(m); 1867 } else if (buf_vm_page_count_severe()) { 1868 vm_page_try_to_cache(m); 1869 } 1870 } 1871 vm_page_unlock(m); 1872 } 1873 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 1874 1875 if (bp->b_bufsize) { 1876 bufspacewakeup(); 1877 bp->b_bufsize = 0; 1878 } 1879 bp->b_npages = 0; 1880 bp->b_flags &= ~B_VMIO; 1881 if (bp->b_vp) 1882 brelvp(bp); 1883 } 1884 1885 /* 1886 * Check to see if a block at a particular lbn is available for a clustered 1887 * write. 1888 */ 1889 static int 1890 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) 1891 { 1892 struct buf *bpa; 1893 int match; 1894 1895 match = 0; 1896 1897 /* If the buf isn't in core skip it */ 1898 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) 1899 return (0); 1900 1901 /* If the buf is busy we don't want to wait for it */ 1902 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 1903 return (0); 1904 1905 /* Only cluster with valid clusterable delayed write buffers */ 1906 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != 1907 (B_DELWRI | B_CLUSTEROK)) 1908 goto done; 1909 1910 if (bpa->b_bufsize != size) 1911 goto done; 1912 1913 /* 1914 * Check to see if it is in the expected place on disk and that the 1915 * block has been mapped. 1916 */ 1917 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno)) 1918 match = 1; 1919 done: 1920 BUF_UNLOCK(bpa); 1921 return (match); 1922 } 1923 1924 /* 1925 * vfs_bio_awrite: 1926 * 1927 * Implement clustered async writes for clearing out B_DELWRI buffers. 1928 * This is much better then the old way of writing only one buffer at 1929 * a time. Note that we may not be presented with the buffers in the 1930 * correct order, so we search for the cluster in both directions. 1931 */ 1932 int 1933 vfs_bio_awrite(struct buf *bp) 1934 { 1935 struct bufobj *bo; 1936 int i; 1937 int j; 1938 daddr_t lblkno = bp->b_lblkno; 1939 struct vnode *vp = bp->b_vp; 1940 int ncl; 1941 int nwritten; 1942 int size; 1943 int maxcl; 1944 int gbflags; 1945 1946 bo = &vp->v_bufobj; 1947 gbflags = (bp->b_flags & B_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 1948 /* 1949 * right now we support clustered writing only to regular files. If 1950 * we find a clusterable block we could be in the middle of a cluster 1951 * rather then at the beginning. 1952 */ 1953 if ((vp->v_type == VREG) && 1954 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1955 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1956 1957 size = vp->v_mount->mnt_stat.f_iosize; 1958 maxcl = MAXPHYS / size; 1959 1960 BO_RLOCK(bo); 1961 for (i = 1; i < maxcl; i++) 1962 if (vfs_bio_clcheck(vp, size, lblkno + i, 1963 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) 1964 break; 1965 1966 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 1967 if (vfs_bio_clcheck(vp, size, lblkno - j, 1968 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) 1969 break; 1970 BO_RUNLOCK(bo); 1971 --j; 1972 ncl = i + j; 1973 /* 1974 * this is a possible cluster write 1975 */ 1976 if (ncl != 1) { 1977 BUF_UNLOCK(bp); 1978 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl, 1979 gbflags); 1980 return (nwritten); 1981 } 1982 } 1983 bremfree(bp); 1984 bp->b_flags |= B_ASYNC; 1985 /* 1986 * default (old) behavior, writing out only one block 1987 * 1988 * XXX returns b_bufsize instead of b_bcount for nwritten? 1989 */ 1990 nwritten = bp->b_bufsize; 1991 (void) bwrite(bp); 1992 1993 return (nwritten); 1994 } 1995 1996 static void 1997 setbufkva(struct buf *bp, vm_offset_t addr, int maxsize, int gbflags) 1998 { 1999 2000 KASSERT((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2001 bp->b_kvasize == 0, ("call bfreekva(%p)", bp)); 2002 if ((gbflags & GB_UNMAPPED) == 0) { 2003 bp->b_kvabase = (caddr_t)addr; 2004 } else if ((gbflags & GB_KVAALLOC) != 0) { 2005 KASSERT((gbflags & GB_UNMAPPED) != 0, 2006 ("GB_KVAALLOC without GB_UNMAPPED")); 2007 bp->b_kvaalloc = (caddr_t)addr; 2008 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2009 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2010 } 2011 bp->b_kvasize = maxsize; 2012 } 2013 2014 /* 2015 * Allocate the buffer KVA and set b_kvasize. Also set b_kvabase if 2016 * needed. 2017 */ 2018 static int 2019 allocbufkva(struct buf *bp, int maxsize, int gbflags) 2020 { 2021 vm_offset_t addr; 2022 int rv; 2023 2024 bfreekva(bp); 2025 addr = 0; 2026 2027 vm_map_lock(buffer_map); 2028 if (vm_map_findspace(buffer_map, vm_map_min(buffer_map), maxsize, 2029 &addr)) { 2030 vm_map_unlock(buffer_map); 2031 /* 2032 * Buffer map is too fragmented. Request the caller 2033 * to defragment the map. 2034 */ 2035 atomic_add_int(&bufdefragcnt, 1); 2036 return (1); 2037 } 2038 rv = vm_map_insert(buffer_map, NULL, 0, addr, addr + maxsize, 2039 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 2040 KASSERT(rv == KERN_SUCCESS, ("vm_map_insert(buffer_map) rv %d", rv)); 2041 vm_map_unlock(buffer_map); 2042 setbufkva(bp, addr, maxsize, gbflags); 2043 atomic_add_long(&bufspace, bp->b_kvasize); 2044 return (0); 2045 } 2046 2047 /* 2048 * Ask the bufdaemon for help, or act as bufdaemon itself, when a 2049 * locked vnode is supplied. 2050 */ 2051 static void 2052 getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo, 2053 int defrag) 2054 { 2055 struct thread *td; 2056 char *waitmsg; 2057 int fl, flags, norunbuf; 2058 2059 mtx_assert(&bqclean, MA_OWNED); 2060 2061 if (defrag) { 2062 flags = VFS_BIO_NEED_BUFSPACE; 2063 waitmsg = "nbufkv"; 2064 } else if (bufspace >= hibufspace) { 2065 waitmsg = "nbufbs"; 2066 flags = VFS_BIO_NEED_BUFSPACE; 2067 } else { 2068 waitmsg = "newbuf"; 2069 flags = VFS_BIO_NEED_ANY; 2070 } 2071 mtx_lock(&nblock); 2072 needsbuffer |= flags; 2073 mtx_unlock(&nblock); 2074 mtx_unlock(&bqclean); 2075 2076 bd_speedup(); /* heeeelp */ 2077 if ((gbflags & GB_NOWAIT_BD) != 0) 2078 return; 2079 2080 td = curthread; 2081 mtx_lock(&nblock); 2082 while (needsbuffer & flags) { 2083 if (vp != NULL && (td->td_pflags & TDP_BUFNEED) == 0) { 2084 mtx_unlock(&nblock); 2085 /* 2086 * getblk() is called with a vnode locked, and 2087 * some majority of the dirty buffers may as 2088 * well belong to the vnode. Flushing the 2089 * buffers there would make a progress that 2090 * cannot be achieved by the buf_daemon, that 2091 * cannot lock the vnode. 2092 */ 2093 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) | 2094 (td->td_pflags & TDP_NORUNNINGBUF); 2095 /* play bufdaemon */ 2096 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF; 2097 fl = buf_flush(vp, flushbufqtarget); 2098 td->td_pflags &= norunbuf; 2099 mtx_lock(&nblock); 2100 if (fl != 0) 2101 continue; 2102 if ((needsbuffer & flags) == 0) 2103 break; 2104 } 2105 if (msleep(&needsbuffer, &nblock, (PRIBIO + 4) | slpflag, 2106 waitmsg, slptimeo)) 2107 break; 2108 } 2109 mtx_unlock(&nblock); 2110 } 2111 2112 static void 2113 getnewbuf_reuse_bp(struct buf *bp, int qindex) 2114 { 2115 2116 CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d " 2117 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags, 2118 bp->b_kvasize, bp->b_bufsize, qindex); 2119 mtx_assert(&bqclean, MA_NOTOWNED); 2120 2121 /* 2122 * Note: we no longer distinguish between VMIO and non-VMIO 2123 * buffers. 2124 */ 2125 KASSERT((bp->b_flags & B_DELWRI) == 0, 2126 ("delwri buffer %p found in queue %d", bp, qindex)); 2127 2128 if (qindex == QUEUE_CLEAN) { 2129 if (bp->b_flags & B_VMIO) { 2130 bp->b_flags &= ~B_ASYNC; 2131 vfs_vmio_release(bp); 2132 } 2133 if (bp->b_vp != NULL) 2134 brelvp(bp); 2135 } 2136 2137 /* 2138 * Get the rest of the buffer freed up. b_kva* is still valid 2139 * after this operation. 2140 */ 2141 2142 if (bp->b_rcred != NOCRED) { 2143 crfree(bp->b_rcred); 2144 bp->b_rcred = NOCRED; 2145 } 2146 if (bp->b_wcred != NOCRED) { 2147 crfree(bp->b_wcred); 2148 bp->b_wcred = NOCRED; 2149 } 2150 if (!LIST_EMPTY(&bp->b_dep)) 2151 buf_deallocate(bp); 2152 if (bp->b_vflags & BV_BKGRDINPROG) 2153 panic("losing buffer 3"); 2154 KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p. qindex: %d", 2155 bp, bp->b_vp, qindex)); 2156 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, 2157 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags)); 2158 2159 if (bp->b_bufsize) 2160 allocbuf(bp, 0); 2161 2162 bp->b_flags &= B_UNMAPPED | B_KVAALLOC; 2163 bp->b_ioflags = 0; 2164 bp->b_xflags = 0; 2165 KASSERT((bp->b_flags & B_INFREECNT) == 0, 2166 ("buf %p still counted as free?", bp)); 2167 bp->b_vflags = 0; 2168 bp->b_vp = NULL; 2169 bp->b_blkno = bp->b_lblkno = 0; 2170 bp->b_offset = NOOFFSET; 2171 bp->b_iodone = 0; 2172 bp->b_error = 0; 2173 bp->b_resid = 0; 2174 bp->b_bcount = 0; 2175 bp->b_npages = 0; 2176 bp->b_dirtyoff = bp->b_dirtyend = 0; 2177 bp->b_bufobj = NULL; 2178 bp->b_pin_count = 0; 2179 bp->b_fsprivate1 = NULL; 2180 bp->b_fsprivate2 = NULL; 2181 bp->b_fsprivate3 = NULL; 2182 2183 LIST_INIT(&bp->b_dep); 2184 } 2185 2186 static int flushingbufs; 2187 2188 static struct buf * 2189 getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata) 2190 { 2191 struct buf *bp, *nbp; 2192 int nqindex, qindex, pass; 2193 2194 KASSERT(!unmapped || !defrag, ("both unmapped and defrag")); 2195 2196 pass = 1; 2197 restart: 2198 atomic_add_int(&getnewbufrestarts, 1); 2199 2200 /* 2201 * Setup for scan. If we do not have enough free buffers, 2202 * we setup a degenerate case that immediately fails. Note 2203 * that if we are specially marked process, we are allowed to 2204 * dip into our reserves. 2205 * 2206 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 2207 * for the allocation of the mapped buffer. For unmapped, the 2208 * easiest is to start with EMPTY outright. 2209 * 2210 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 2211 * However, there are a number of cases (defragging, reusing, ...) 2212 * where we cannot backup. 2213 */ 2214 nbp = NULL; 2215 mtx_lock(&bqclean); 2216 if (!defrag && unmapped) { 2217 nqindex = QUEUE_EMPTY; 2218 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2219 } 2220 if (nbp == NULL) { 2221 nqindex = QUEUE_EMPTYKVA; 2222 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2223 } 2224 2225 /* 2226 * If no EMPTYKVA buffers and we are either defragging or 2227 * reusing, locate a CLEAN buffer to free or reuse. If 2228 * bufspace useage is low skip this step so we can allocate a 2229 * new buffer. 2230 */ 2231 if (nbp == NULL && (defrag || bufspace >= lobufspace)) { 2232 nqindex = QUEUE_CLEAN; 2233 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2234 } 2235 2236 /* 2237 * If we could not find or were not allowed to reuse a CLEAN 2238 * buffer, check to see if it is ok to use an EMPTY buffer. 2239 * We can only use an EMPTY buffer if allocating its KVA would 2240 * not otherwise run us out of buffer space. No KVA is needed 2241 * for the unmapped allocation. 2242 */ 2243 if (nbp == NULL && defrag == 0 && (bufspace + maxsize < hibufspace || 2244 metadata)) { 2245 nqindex = QUEUE_EMPTY; 2246 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 2247 } 2248 2249 /* 2250 * All available buffers might be clean, retry ignoring the 2251 * lobufspace as the last resort. 2252 */ 2253 if (nbp == NULL && !TAILQ_EMPTY(&bufqueues[QUEUE_CLEAN])) { 2254 nqindex = QUEUE_CLEAN; 2255 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2256 } 2257 2258 /* 2259 * Run scan, possibly freeing data and/or kva mappings on the fly 2260 * depending. 2261 */ 2262 while ((bp = nbp) != NULL) { 2263 qindex = nqindex; 2264 2265 /* 2266 * Calculate next bp (we can only use it if we do not 2267 * block or do other fancy things). 2268 */ 2269 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 2270 switch (qindex) { 2271 case QUEUE_EMPTY: 2272 nqindex = QUEUE_EMPTYKVA; 2273 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 2274 if (nbp != NULL) 2275 break; 2276 /* FALLTHROUGH */ 2277 case QUEUE_EMPTYKVA: 2278 nqindex = QUEUE_CLEAN; 2279 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2280 if (nbp != NULL) 2281 break; 2282 /* FALLTHROUGH */ 2283 case QUEUE_CLEAN: 2284 if (metadata && pass == 1) { 2285 pass = 2; 2286 nqindex = QUEUE_EMPTY; 2287 nbp = TAILQ_FIRST( 2288 &bufqueues[QUEUE_EMPTY]); 2289 } 2290 /* 2291 * nbp is NULL. 2292 */ 2293 break; 2294 } 2295 } 2296 /* 2297 * If we are defragging then we need a buffer with 2298 * b_kvasize != 0. XXX this situation should no longer 2299 * occur, if defrag is non-zero the buffer's b_kvasize 2300 * should also be non-zero at this point. XXX 2301 */ 2302 if (defrag && bp->b_kvasize == 0) { 2303 printf("Warning: defrag empty buffer %p\n", bp); 2304 continue; 2305 } 2306 2307 /* 2308 * Start freeing the bp. This is somewhat involved. nbp 2309 * remains valid only for QUEUE_EMPTY[KVA] bp's. 2310 */ 2311 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2312 continue; 2313 /* 2314 * BKGRDINPROG can only be set with the buf and bufobj 2315 * locks both held. We tolerate a race to clear it here. 2316 */ 2317 if (bp->b_vflags & BV_BKGRDINPROG) { 2318 BUF_UNLOCK(bp); 2319 continue; 2320 } 2321 2322 KASSERT(bp->b_qindex == qindex, 2323 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2324 2325 bremfreel(bp); 2326 mtx_unlock(&bqclean); 2327 /* 2328 * NOTE: nbp is now entirely invalid. We can only restart 2329 * the scan from this point on. 2330 */ 2331 2332 getnewbuf_reuse_bp(bp, qindex); 2333 mtx_assert(&bqclean, MA_NOTOWNED); 2334 2335 /* 2336 * If we are defragging then free the buffer. 2337 */ 2338 if (defrag) { 2339 bp->b_flags |= B_INVAL; 2340 bfreekva(bp); 2341 brelse(bp); 2342 defrag = 0; 2343 goto restart; 2344 } 2345 2346 /* 2347 * Notify any waiters for the buffer lock about 2348 * identity change by freeing the buffer. 2349 */ 2350 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) { 2351 bp->b_flags |= B_INVAL; 2352 bfreekva(bp); 2353 brelse(bp); 2354 goto restart; 2355 } 2356 2357 if (metadata) 2358 break; 2359 2360 /* 2361 * If we are overcomitted then recover the buffer and its 2362 * KVM space. This occurs in rare situations when multiple 2363 * processes are blocked in getnewbuf() or allocbuf(). 2364 */ 2365 if (bufspace >= hibufspace) 2366 flushingbufs = 1; 2367 if (flushingbufs && bp->b_kvasize != 0) { 2368 bp->b_flags |= B_INVAL; 2369 bfreekva(bp); 2370 brelse(bp); 2371 goto restart; 2372 } 2373 if (bufspace < lobufspace) 2374 flushingbufs = 0; 2375 break; 2376 } 2377 return (bp); 2378 } 2379 2380 /* 2381 * getnewbuf: 2382 * 2383 * Find and initialize a new buffer header, freeing up existing buffers 2384 * in the bufqueues as necessary. The new buffer is returned locked. 2385 * 2386 * Important: B_INVAL is not set. If the caller wishes to throw the 2387 * buffer away, the caller must set B_INVAL prior to calling brelse(). 2388 * 2389 * We block if: 2390 * We have insufficient buffer headers 2391 * We have insufficient buffer space 2392 * buffer_map is too fragmented ( space reservation fails ) 2393 * If we have to flush dirty buffers ( but we try to avoid this ) 2394 */ 2395 static struct buf * 2396 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, 2397 int gbflags) 2398 { 2399 struct buf *bp; 2400 int defrag, metadata; 2401 2402 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 2403 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 2404 if (!unmapped_buf_allowed) 2405 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC); 2406 2407 defrag = 0; 2408 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || 2409 vp->v_type == VCHR) 2410 metadata = 1; 2411 else 2412 metadata = 0; 2413 /* 2414 * We can't afford to block since we might be holding a vnode lock, 2415 * which may prevent system daemons from running. We deal with 2416 * low-memory situations by proactively returning memory and running 2417 * async I/O rather then sync I/O. 2418 */ 2419 atomic_add_int(&getnewbufcalls, 1); 2420 atomic_subtract_int(&getnewbufrestarts, 1); 2421 restart: 2422 bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED | 2423 GB_KVAALLOC)) == GB_UNMAPPED, metadata); 2424 if (bp != NULL) 2425 defrag = 0; 2426 2427 /* 2428 * If we exhausted our list, sleep as appropriate. We may have to 2429 * wakeup various daemons and write out some dirty buffers. 2430 * 2431 * Generally we are sleeping due to insufficient buffer space. 2432 */ 2433 if (bp == NULL) { 2434 mtx_assert(&bqclean, MA_OWNED); 2435 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag); 2436 mtx_assert(&bqclean, MA_NOTOWNED); 2437 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) { 2438 mtx_assert(&bqclean, MA_NOTOWNED); 2439 2440 bfreekva(bp); 2441 bp->b_flags |= B_UNMAPPED; 2442 bp->b_kvabase = bp->b_data = unmapped_buf; 2443 bp->b_kvasize = maxsize; 2444 atomic_add_long(&bufspace, bp->b_kvasize); 2445 atomic_add_long(&unmapped_bufspace, bp->b_kvasize); 2446 atomic_add_int(&bufreusecnt, 1); 2447 } else { 2448 mtx_assert(&bqclean, MA_NOTOWNED); 2449 2450 /* 2451 * We finally have a valid bp. We aren't quite out of the 2452 * woods, we still have to reserve kva space. In order 2453 * to keep fragmentation sane we only allocate kva in 2454 * BKVASIZE chunks. 2455 */ 2456 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2457 2458 if (maxsize != bp->b_kvasize || (bp->b_flags & (B_UNMAPPED | 2459 B_KVAALLOC)) == B_UNMAPPED) { 2460 if (allocbufkva(bp, maxsize, gbflags)) { 2461 defrag = 1; 2462 bp->b_flags |= B_INVAL; 2463 brelse(bp); 2464 goto restart; 2465 } 2466 atomic_add_int(&bufreusecnt, 1); 2467 } else if ((bp->b_flags & B_KVAALLOC) != 0 && 2468 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 0) { 2469 /* 2470 * If the reused buffer has KVA allocated, 2471 * reassign b_kvaalloc to b_kvabase. 2472 */ 2473 bp->b_kvabase = bp->b_kvaalloc; 2474 bp->b_flags &= ~B_KVAALLOC; 2475 atomic_subtract_long(&unmapped_bufspace, 2476 bp->b_kvasize); 2477 atomic_add_int(&bufreusecnt, 1); 2478 } else if ((bp->b_flags & (B_UNMAPPED | B_KVAALLOC)) == 0 && 2479 (gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == (GB_UNMAPPED | 2480 GB_KVAALLOC)) { 2481 /* 2482 * The case of reused buffer already have KVA 2483 * mapped, but the request is for unmapped 2484 * buffer with KVA allocated. 2485 */ 2486 bp->b_kvaalloc = bp->b_kvabase; 2487 bp->b_data = bp->b_kvabase = unmapped_buf; 2488 bp->b_flags |= B_UNMAPPED | B_KVAALLOC; 2489 atomic_add_long(&unmapped_bufspace, 2490 bp->b_kvasize); 2491 atomic_add_int(&bufreusecnt, 1); 2492 } 2493 if ((gbflags & GB_UNMAPPED) == 0) { 2494 bp->b_saveaddr = bp->b_kvabase; 2495 bp->b_data = bp->b_saveaddr; 2496 bp->b_flags &= ~B_UNMAPPED; 2497 BUF_CHECK_MAPPED(bp); 2498 } 2499 } 2500 return (bp); 2501 } 2502 2503 /* 2504 * buf_daemon: 2505 * 2506 * buffer flushing daemon. Buffers are normally flushed by the 2507 * update daemon but if it cannot keep up this process starts to 2508 * take the load in an attempt to prevent getnewbuf() from blocking. 2509 */ 2510 2511 static struct kproc_desc buf_kp = { 2512 "bufdaemon", 2513 buf_daemon, 2514 &bufdaemonproc 2515 }; 2516 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp); 2517 2518 static int 2519 buf_flush(struct vnode *vp, int target) 2520 { 2521 int flushed; 2522 2523 flushed = flushbufqueues(vp, target, 0); 2524 if (flushed == 0) { 2525 /* 2526 * Could not find any buffers without rollback 2527 * dependencies, so just write the first one 2528 * in the hopes of eventually making progress. 2529 */ 2530 if (vp != NULL && target > 2) 2531 target /= 2; 2532 flushbufqueues(vp, target, 1); 2533 } 2534 return (flushed); 2535 } 2536 2537 static void 2538 buf_daemon() 2539 { 2540 int lodirty; 2541 2542 /* 2543 * This process needs to be suspended prior to shutdown sync. 2544 */ 2545 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc, 2546 SHUTDOWN_PRI_LAST); 2547 2548 /* 2549 * This process is allowed to take the buffer cache to the limit 2550 */ 2551 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED; 2552 mtx_lock(&bdlock); 2553 for (;;) { 2554 bd_request = 0; 2555 mtx_unlock(&bdlock); 2556 2557 kproc_suspend_check(bufdaemonproc); 2558 lodirty = lodirtybuffers; 2559 if (bd_speedupreq) { 2560 lodirty = numdirtybuffers / 2; 2561 bd_speedupreq = 0; 2562 } 2563 /* 2564 * Do the flush. Limit the amount of in-transit I/O we 2565 * allow to build up, otherwise we would completely saturate 2566 * the I/O system. 2567 */ 2568 while (numdirtybuffers > lodirty) { 2569 if (buf_flush(NULL, numdirtybuffers - lodirty) == 0) 2570 break; 2571 kern_yield(PRI_USER); 2572 } 2573 2574 /* 2575 * Only clear bd_request if we have reached our low water 2576 * mark. The buf_daemon normally waits 1 second and 2577 * then incrementally flushes any dirty buffers that have 2578 * built up, within reason. 2579 * 2580 * If we were unable to hit our low water mark and couldn't 2581 * find any flushable buffers, we sleep for a short period 2582 * to avoid endless loops on unlockable buffers. 2583 */ 2584 mtx_lock(&bdlock); 2585 if (numdirtybuffers <= lodirtybuffers) { 2586 /* 2587 * We reached our low water mark, reset the 2588 * request and sleep until we are needed again. 2589 * The sleep is just so the suspend code works. 2590 */ 2591 bd_request = 0; 2592 /* 2593 * Do an extra wakeup in case dirty threshold 2594 * changed via sysctl and the explicit transition 2595 * out of shortfall was missed. 2596 */ 2597 bdirtywakeup(); 2598 if (runningbufspace <= lorunningspace) 2599 runningwakeup(); 2600 msleep(&bd_request, &bdlock, PVM, "psleep", hz); 2601 } else { 2602 /* 2603 * We couldn't find any flushable dirty buffers but 2604 * still have too many dirty buffers, we 2605 * have to sleep and try again. (rare) 2606 */ 2607 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10); 2608 } 2609 } 2610 } 2611 2612 /* 2613 * flushbufqueues: 2614 * 2615 * Try to flush a buffer in the dirty queue. We must be careful to 2616 * free up B_INVAL buffers instead of write them, which NFS is 2617 * particularly sensitive to. 2618 */ 2619 static int flushwithdeps = 0; 2620 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps, 2621 0, "Number of buffers flushed with dependecies that require rollbacks"); 2622 2623 static int 2624 flushbufqueues(struct vnode *lvp, int target, int flushdeps) 2625 { 2626 struct buf *sentinel; 2627 struct vnode *vp; 2628 struct mount *mp; 2629 struct buf *bp; 2630 int hasdeps; 2631 int flushed; 2632 int queue; 2633 2634 flushed = 0; 2635 queue = QUEUE_DIRTY; 2636 bp = NULL; 2637 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO); 2638 sentinel->b_qindex = QUEUE_SENTINEL; 2639 mtx_lock(&bqdirty); 2640 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist); 2641 while (flushed != target) { 2642 bp = TAILQ_NEXT(sentinel, b_freelist); 2643 if (bp != NULL) { 2644 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2645 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel, 2646 b_freelist); 2647 } else 2648 break; 2649 /* 2650 * Skip sentinels inserted by other invocations of the 2651 * flushbufqueues(), taking care to not reorder them. 2652 */ 2653 if (bp->b_qindex == QUEUE_SENTINEL) 2654 continue; 2655 /* 2656 * Only flush the buffers that belong to the 2657 * vnode locked by the curthread. 2658 */ 2659 if (lvp != NULL && bp->b_vp != lvp) 2660 continue; 2661 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2662 continue; 2663 if (bp->b_pin_count > 0) { 2664 BUF_UNLOCK(bp); 2665 continue; 2666 } 2667 /* 2668 * BKGRDINPROG can only be set with the buf and bufobj 2669 * locks both held. We tolerate a race to clear it here. 2670 */ 2671 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || 2672 (bp->b_flags & B_DELWRI) == 0) { 2673 BUF_UNLOCK(bp); 2674 continue; 2675 } 2676 if (bp->b_flags & B_INVAL) { 2677 bremfreel(bp); 2678 mtx_unlock(&bqdirty); 2679 brelse(bp); 2680 flushed++; 2681 mtx_lock(&bqdirty); 2682 continue; 2683 } 2684 2685 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { 2686 if (flushdeps == 0) { 2687 BUF_UNLOCK(bp); 2688 continue; 2689 } 2690 hasdeps = 1; 2691 } else 2692 hasdeps = 0; 2693 /* 2694 * We must hold the lock on a vnode before writing 2695 * one of its buffers. Otherwise we may confuse, or 2696 * in the case of a snapshot vnode, deadlock the 2697 * system. 2698 * 2699 * The lock order here is the reverse of the normal 2700 * of vnode followed by buf lock. This is ok because 2701 * the NOWAIT will prevent deadlock. 2702 */ 2703 vp = bp->b_vp; 2704 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2705 BUF_UNLOCK(bp); 2706 continue; 2707 } 2708 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_CANRECURSE) == 0) { 2709 mtx_unlock(&bqdirty); 2710 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X", 2711 bp, bp->b_vp, bp->b_flags); 2712 if (curproc == bufdaemonproc) 2713 vfs_bio_awrite(bp); 2714 else { 2715 bremfree(bp); 2716 bwrite(bp); 2717 notbufdflushes++; 2718 } 2719 vn_finished_write(mp); 2720 VOP_UNLOCK(vp, 0); 2721 flushwithdeps += hasdeps; 2722 flushed++; 2723 2724 /* 2725 * Sleeping on runningbufspace while holding 2726 * vnode lock leads to deadlock. 2727 */ 2728 if (curproc == bufdaemonproc && 2729 runningbufspace > hirunningspace) 2730 waitrunningbufspace(); 2731 mtx_lock(&bqdirty); 2732 continue; 2733 } 2734 vn_finished_write(mp); 2735 BUF_UNLOCK(bp); 2736 } 2737 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2738 mtx_unlock(&bqdirty); 2739 free(sentinel, M_TEMP); 2740 return (flushed); 2741 } 2742 2743 /* 2744 * Check to see if a block is currently memory resident. 2745 */ 2746 struct buf * 2747 incore(struct bufobj *bo, daddr_t blkno) 2748 { 2749 struct buf *bp; 2750 2751 BO_RLOCK(bo); 2752 bp = gbincore(bo, blkno); 2753 BO_RUNLOCK(bo); 2754 return (bp); 2755 } 2756 2757 /* 2758 * Returns true if no I/O is needed to access the 2759 * associated VM object. This is like incore except 2760 * it also hunts around in the VM system for the data. 2761 */ 2762 2763 static int 2764 inmem(struct vnode * vp, daddr_t blkno) 2765 { 2766 vm_object_t obj; 2767 vm_offset_t toff, tinc, size; 2768 vm_page_t m; 2769 vm_ooffset_t off; 2770 2771 ASSERT_VOP_LOCKED(vp, "inmem"); 2772 2773 if (incore(&vp->v_bufobj, blkno)) 2774 return 1; 2775 if (vp->v_mount == NULL) 2776 return 0; 2777 obj = vp->v_object; 2778 if (obj == NULL) 2779 return (0); 2780 2781 size = PAGE_SIZE; 2782 if (size > vp->v_mount->mnt_stat.f_iosize) 2783 size = vp->v_mount->mnt_stat.f_iosize; 2784 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2785 2786 VM_OBJECT_RLOCK(obj); 2787 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2788 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2789 if (!m) 2790 goto notinmem; 2791 tinc = size; 2792 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2793 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2794 if (vm_page_is_valid(m, 2795 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2796 goto notinmem; 2797 } 2798 VM_OBJECT_RUNLOCK(obj); 2799 return 1; 2800 2801 notinmem: 2802 VM_OBJECT_RUNLOCK(obj); 2803 return (0); 2804 } 2805 2806 /* 2807 * Set the dirty range for a buffer based on the status of the dirty 2808 * bits in the pages comprising the buffer. The range is limited 2809 * to the size of the buffer. 2810 * 2811 * Tell the VM system that the pages associated with this buffer 2812 * are clean. This is used for delayed writes where the data is 2813 * going to go to disk eventually without additional VM intevention. 2814 * 2815 * Note that while we only really need to clean through to b_bcount, we 2816 * just go ahead and clean through to b_bufsize. 2817 */ 2818 static void 2819 vfs_clean_pages_dirty_buf(struct buf *bp) 2820 { 2821 vm_ooffset_t foff, noff, eoff; 2822 vm_page_t m; 2823 int i; 2824 2825 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0) 2826 return; 2827 2828 foff = bp->b_offset; 2829 KASSERT(bp->b_offset != NOOFFSET, 2830 ("vfs_clean_pages_dirty_buf: no buffer offset")); 2831 2832 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 2833 vfs_drain_busy_pages(bp); 2834 vfs_setdirty_locked_object(bp); 2835 for (i = 0; i < bp->b_npages; i++) { 2836 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 2837 eoff = noff; 2838 if (eoff > bp->b_offset + bp->b_bufsize) 2839 eoff = bp->b_offset + bp->b_bufsize; 2840 m = bp->b_pages[i]; 2841 vfs_page_set_validclean(bp, foff, m); 2842 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2843 foff = noff; 2844 } 2845 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 2846 } 2847 2848 static void 2849 vfs_setdirty_locked_object(struct buf *bp) 2850 { 2851 vm_object_t object; 2852 int i; 2853 2854 object = bp->b_bufobj->bo_object; 2855 VM_OBJECT_ASSERT_WLOCKED(object); 2856 2857 /* 2858 * We qualify the scan for modified pages on whether the 2859 * object has been flushed yet. 2860 */ 2861 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) { 2862 vm_offset_t boffset; 2863 vm_offset_t eoffset; 2864 2865 /* 2866 * test the pages to see if they have been modified directly 2867 * by users through the VM system. 2868 */ 2869 for (i = 0; i < bp->b_npages; i++) 2870 vm_page_test_dirty(bp->b_pages[i]); 2871 2872 /* 2873 * Calculate the encompassing dirty range, boffset and eoffset, 2874 * (eoffset - boffset) bytes. 2875 */ 2876 2877 for (i = 0; i < bp->b_npages; i++) { 2878 if (bp->b_pages[i]->dirty) 2879 break; 2880 } 2881 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2882 2883 for (i = bp->b_npages - 1; i >= 0; --i) { 2884 if (bp->b_pages[i]->dirty) { 2885 break; 2886 } 2887 } 2888 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2889 2890 /* 2891 * Fit it to the buffer. 2892 */ 2893 2894 if (eoffset > bp->b_bcount) 2895 eoffset = bp->b_bcount; 2896 2897 /* 2898 * If we have a good dirty range, merge with the existing 2899 * dirty range. 2900 */ 2901 2902 if (boffset < eoffset) { 2903 if (bp->b_dirtyoff > boffset) 2904 bp->b_dirtyoff = boffset; 2905 if (bp->b_dirtyend < eoffset) 2906 bp->b_dirtyend = eoffset; 2907 } 2908 } 2909 } 2910 2911 /* 2912 * Allocate the KVA mapping for an existing buffer. It handles the 2913 * cases of both B_UNMAPPED buffer, and buffer with the preallocated 2914 * KVA which is not mapped (B_KVAALLOC). 2915 */ 2916 static void 2917 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags) 2918 { 2919 struct buf *scratch_bp; 2920 int bsize, maxsize, need_mapping, need_kva; 2921 off_t offset; 2922 2923 need_mapping = (bp->b_flags & B_UNMAPPED) != 0 && 2924 (gbflags & GB_UNMAPPED) == 0; 2925 need_kva = (bp->b_flags & (B_KVAALLOC | B_UNMAPPED)) == B_UNMAPPED && 2926 (gbflags & GB_KVAALLOC) != 0; 2927 if (!need_mapping && !need_kva) 2928 return; 2929 2930 BUF_CHECK_UNMAPPED(bp); 2931 2932 if (need_mapping && (bp->b_flags & B_KVAALLOC) != 0) { 2933 /* 2934 * Buffer is not mapped, but the KVA was already 2935 * reserved at the time of the instantiation. Use the 2936 * allocated space. 2937 */ 2938 bp->b_flags &= ~B_KVAALLOC; 2939 KASSERT(bp->b_kvaalloc != 0, ("kvaalloc == 0")); 2940 bp->b_kvabase = bp->b_kvaalloc; 2941 atomic_subtract_long(&unmapped_bufspace, bp->b_kvasize); 2942 goto has_addr; 2943 } 2944 2945 /* 2946 * Calculate the amount of the address space we would reserve 2947 * if the buffer was mapped. 2948 */ 2949 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize; 2950 offset = blkno * bsize; 2951 maxsize = size + (offset & PAGE_MASK); 2952 maxsize = imax(maxsize, bsize); 2953 2954 mapping_loop: 2955 if (allocbufkva(bp, maxsize, gbflags)) { 2956 /* 2957 * Request defragmentation. getnewbuf() returns us the 2958 * allocated space by the scratch buffer KVA. 2959 */ 2960 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags | 2961 (GB_UNMAPPED | GB_KVAALLOC)); 2962 if (scratch_bp == NULL) { 2963 if ((gbflags & GB_NOWAIT_BD) != 0) { 2964 /* 2965 * XXXKIB: defragmentation cannot 2966 * succeed, not sure what else to do. 2967 */ 2968 panic("GB_NOWAIT_BD and B_UNMAPPED %p", bp); 2969 } 2970 atomic_add_int(&mappingrestarts, 1); 2971 goto mapping_loop; 2972 } 2973 KASSERT((scratch_bp->b_flags & B_KVAALLOC) != 0, 2974 ("scratch bp !B_KVAALLOC %p", scratch_bp)); 2975 setbufkva(bp, (vm_offset_t)scratch_bp->b_kvaalloc, 2976 scratch_bp->b_kvasize, gbflags); 2977 2978 /* Get rid of the scratch buffer. */ 2979 scratch_bp->b_kvasize = 0; 2980 scratch_bp->b_flags |= B_INVAL; 2981 scratch_bp->b_flags &= ~(B_UNMAPPED | B_KVAALLOC); 2982 brelse(scratch_bp); 2983 } 2984 if (!need_mapping) 2985 return; 2986 2987 has_addr: 2988 bp->b_saveaddr = bp->b_kvabase; 2989 bp->b_data = bp->b_saveaddr; /* b_offset is handled by bpmap_qenter */ 2990 bp->b_flags &= ~B_UNMAPPED; 2991 BUF_CHECK_MAPPED(bp); 2992 bpmap_qenter(bp); 2993 } 2994 2995 /* 2996 * getblk: 2997 * 2998 * Get a block given a specified block and offset into a file/device. 2999 * The buffers B_DONE bit will be cleared on return, making it almost 3000 * ready for an I/O initiation. B_INVAL may or may not be set on 3001 * return. The caller should clear B_INVAL prior to initiating a 3002 * READ. 3003 * 3004 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 3005 * an existing buffer. 3006 * 3007 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 3008 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 3009 * and then cleared based on the backing VM. If the previous buffer is 3010 * non-0-sized but invalid, B_CACHE will be cleared. 3011 * 3012 * If getblk() must create a new buffer, the new buffer is returned with 3013 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 3014 * case it is returned with B_INVAL clear and B_CACHE set based on the 3015 * backing VM. 3016 * 3017 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 3018 * B_CACHE bit is clear. 3019 * 3020 * What this means, basically, is that the caller should use B_CACHE to 3021 * determine whether the buffer is fully valid or not and should clear 3022 * B_INVAL prior to issuing a read. If the caller intends to validate 3023 * the buffer by loading its data area with something, the caller needs 3024 * to clear B_INVAL. If the caller does this without issuing an I/O, 3025 * the caller should set B_CACHE ( as an optimization ), else the caller 3026 * should issue the I/O and biodone() will set B_CACHE if the I/O was 3027 * a write attempt or if it was a successfull read. If the caller 3028 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 3029 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 3030 */ 3031 struct buf * 3032 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, 3033 int flags) 3034 { 3035 struct buf *bp; 3036 struct bufobj *bo; 3037 int bsize, error, maxsize, vmio; 3038 off_t offset; 3039 3040 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); 3041 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 3042 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 3043 ASSERT_VOP_LOCKED(vp, "getblk"); 3044 if (size > MAXBSIZE) 3045 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 3046 if (!unmapped_buf_allowed) 3047 flags &= ~(GB_UNMAPPED | GB_KVAALLOC); 3048 3049 bo = &vp->v_bufobj; 3050 loop: 3051 BO_RLOCK(bo); 3052 bp = gbincore(bo, blkno); 3053 if (bp != NULL) { 3054 int lockflags; 3055 /* 3056 * Buffer is in-core. If the buffer is not busy nor managed, 3057 * it must be on a queue. 3058 */ 3059 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; 3060 3061 if (flags & GB_LOCK_NOWAIT) 3062 lockflags |= LK_NOWAIT; 3063 3064 error = BUF_TIMELOCK(bp, lockflags, 3065 BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); 3066 3067 /* 3068 * If we slept and got the lock we have to restart in case 3069 * the buffer changed identities. 3070 */ 3071 if (error == ENOLCK) 3072 goto loop; 3073 /* We timed out or were interrupted. */ 3074 else if (error) 3075 return (NULL); 3076 /* If recursed, assume caller knows the rules. */ 3077 else if (BUF_LOCKRECURSED(bp)) 3078 goto end; 3079 3080 /* 3081 * The buffer is locked. B_CACHE is cleared if the buffer is 3082 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 3083 * and for a VMIO buffer B_CACHE is adjusted according to the 3084 * backing VM cache. 3085 */ 3086 if (bp->b_flags & B_INVAL) 3087 bp->b_flags &= ~B_CACHE; 3088 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 3089 bp->b_flags |= B_CACHE; 3090 if (bp->b_flags & B_MANAGED) 3091 MPASS(bp->b_qindex == QUEUE_NONE); 3092 else 3093 bremfree(bp); 3094 3095 /* 3096 * check for size inconsistencies for non-VMIO case. 3097 */ 3098 if (bp->b_bcount != size) { 3099 if ((bp->b_flags & B_VMIO) == 0 || 3100 (size > bp->b_kvasize)) { 3101 if (bp->b_flags & B_DELWRI) { 3102 /* 3103 * If buffer is pinned and caller does 3104 * not want sleep waiting for it to be 3105 * unpinned, bail out 3106 * */ 3107 if (bp->b_pin_count > 0) { 3108 if (flags & GB_LOCK_NOWAIT) { 3109 bqrelse(bp); 3110 return (NULL); 3111 } else { 3112 bunpin_wait(bp); 3113 } 3114 } 3115 bp->b_flags |= B_NOCACHE; 3116 bwrite(bp); 3117 } else { 3118 if (LIST_EMPTY(&bp->b_dep)) { 3119 bp->b_flags |= B_RELBUF; 3120 brelse(bp); 3121 } else { 3122 bp->b_flags |= B_NOCACHE; 3123 bwrite(bp); 3124 } 3125 } 3126 goto loop; 3127 } 3128 } 3129 3130 /* 3131 * Handle the case of unmapped buffer which should 3132 * become mapped, or the buffer for which KVA 3133 * reservation is requested. 3134 */ 3135 bp_unmapped_get_kva(bp, blkno, size, flags); 3136 3137 /* 3138 * If the size is inconsistant in the VMIO case, we can resize 3139 * the buffer. This might lead to B_CACHE getting set or 3140 * cleared. If the size has not changed, B_CACHE remains 3141 * unchanged from its previous state. 3142 */ 3143 if (bp->b_bcount != size) 3144 allocbuf(bp, size); 3145 3146 KASSERT(bp->b_offset != NOOFFSET, 3147 ("getblk: no buffer offset")); 3148 3149 /* 3150 * A buffer with B_DELWRI set and B_CACHE clear must 3151 * be committed before we can return the buffer in 3152 * order to prevent the caller from issuing a read 3153 * ( due to B_CACHE not being set ) and overwriting 3154 * it. 3155 * 3156 * Most callers, including NFS and FFS, need this to 3157 * operate properly either because they assume they 3158 * can issue a read if B_CACHE is not set, or because 3159 * ( for example ) an uncached B_DELWRI might loop due 3160 * to softupdates re-dirtying the buffer. In the latter 3161 * case, B_CACHE is set after the first write completes, 3162 * preventing further loops. 3163 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 3164 * above while extending the buffer, we cannot allow the 3165 * buffer to remain with B_CACHE set after the write 3166 * completes or it will represent a corrupt state. To 3167 * deal with this we set B_NOCACHE to scrap the buffer 3168 * after the write. 3169 * 3170 * We might be able to do something fancy, like setting 3171 * B_CACHE in bwrite() except if B_DELWRI is already set, 3172 * so the below call doesn't set B_CACHE, but that gets real 3173 * confusing. This is much easier. 3174 */ 3175 3176 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 3177 bp->b_flags |= B_NOCACHE; 3178 bwrite(bp); 3179 goto loop; 3180 } 3181 bp->b_flags &= ~B_DONE; 3182 } else { 3183 /* 3184 * Buffer is not in-core, create new buffer. The buffer 3185 * returned by getnewbuf() is locked. Note that the returned 3186 * buffer is also considered valid (not marked B_INVAL). 3187 */ 3188 BO_RUNLOCK(bo); 3189 /* 3190 * If the user does not want us to create the buffer, bail out 3191 * here. 3192 */ 3193 if (flags & GB_NOCREAT) 3194 return NULL; 3195 if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread)) 3196 return NULL; 3197 3198 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; 3199 offset = blkno * bsize; 3200 vmio = vp->v_object != NULL; 3201 if (vmio) { 3202 maxsize = size + (offset & PAGE_MASK); 3203 } else { 3204 maxsize = size; 3205 /* Do not allow non-VMIO notmapped buffers. */ 3206 flags &= ~GB_UNMAPPED; 3207 } 3208 maxsize = imax(maxsize, bsize); 3209 3210 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags); 3211 if (bp == NULL) { 3212 if (slpflag || slptimeo) 3213 return NULL; 3214 goto loop; 3215 } 3216 3217 /* 3218 * This code is used to make sure that a buffer is not 3219 * created while the getnewbuf routine is blocked. 3220 * This can be a problem whether the vnode is locked or not. 3221 * If the buffer is created out from under us, we have to 3222 * throw away the one we just created. 3223 * 3224 * Note: this must occur before we associate the buffer 3225 * with the vp especially considering limitations in 3226 * the splay tree implementation when dealing with duplicate 3227 * lblkno's. 3228 */ 3229 BO_LOCK(bo); 3230 if (gbincore(bo, blkno)) { 3231 BO_UNLOCK(bo); 3232 bp->b_flags |= B_INVAL; 3233 brelse(bp); 3234 goto loop; 3235 } 3236 3237 /* 3238 * Insert the buffer into the hash, so that it can 3239 * be found by incore. 3240 */ 3241 bp->b_blkno = bp->b_lblkno = blkno; 3242 bp->b_offset = offset; 3243 bgetvp(vp, bp); 3244 BO_UNLOCK(bo); 3245 3246 /* 3247 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 3248 * buffer size starts out as 0, B_CACHE will be set by 3249 * allocbuf() for the VMIO case prior to it testing the 3250 * backing store for validity. 3251 */ 3252 3253 if (vmio) { 3254 bp->b_flags |= B_VMIO; 3255 KASSERT(vp->v_object == bp->b_bufobj->bo_object, 3256 ("ARGH! different b_bufobj->bo_object %p %p %p\n", 3257 bp, vp->v_object, bp->b_bufobj->bo_object)); 3258 } else { 3259 bp->b_flags &= ~B_VMIO; 3260 KASSERT(bp->b_bufobj->bo_object == NULL, 3261 ("ARGH! has b_bufobj->bo_object %p %p\n", 3262 bp, bp->b_bufobj->bo_object)); 3263 BUF_CHECK_MAPPED(bp); 3264 } 3265 3266 allocbuf(bp, size); 3267 bp->b_flags &= ~B_DONE; 3268 } 3269 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); 3270 BUF_ASSERT_HELD(bp); 3271 end: 3272 KASSERT(bp->b_bufobj == bo, 3273 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3274 return (bp); 3275 } 3276 3277 /* 3278 * Get an empty, disassociated buffer of given size. The buffer is initially 3279 * set to B_INVAL. 3280 */ 3281 struct buf * 3282 geteblk(int size, int flags) 3283 { 3284 struct buf *bp; 3285 int maxsize; 3286 3287 maxsize = (size + BKVAMASK) & ~BKVAMASK; 3288 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) { 3289 if ((flags & GB_NOWAIT_BD) && 3290 (curthread->td_pflags & TDP_BUFNEED) != 0) 3291 return (NULL); 3292 } 3293 allocbuf(bp, size); 3294 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 3295 BUF_ASSERT_HELD(bp); 3296 return (bp); 3297 } 3298 3299 3300 /* 3301 * This code constitutes the buffer memory from either anonymous system 3302 * memory (in the case of non-VMIO operations) or from an associated 3303 * VM object (in the case of VMIO operations). This code is able to 3304 * resize a buffer up or down. 3305 * 3306 * Note that this code is tricky, and has many complications to resolve 3307 * deadlock or inconsistant data situations. Tread lightly!!! 3308 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3309 * the caller. Calling this code willy nilly can result in the loss of data. 3310 * 3311 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3312 * B_CACHE for the non-VMIO case. 3313 */ 3314 3315 int 3316 allocbuf(struct buf *bp, int size) 3317 { 3318 int newbsize, mbsize; 3319 int i; 3320 3321 BUF_ASSERT_HELD(bp); 3322 3323 if (bp->b_kvasize < size) 3324 panic("allocbuf: buffer too small"); 3325 3326 if ((bp->b_flags & B_VMIO) == 0) { 3327 caddr_t origbuf; 3328 int origbufsize; 3329 /* 3330 * Just get anonymous memory from the kernel. Don't 3331 * mess with B_CACHE. 3332 */ 3333 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3334 if (bp->b_flags & B_MALLOC) 3335 newbsize = mbsize; 3336 else 3337 newbsize = round_page(size); 3338 3339 if (newbsize < bp->b_bufsize) { 3340 /* 3341 * malloced buffers are not shrunk 3342 */ 3343 if (bp->b_flags & B_MALLOC) { 3344 if (newbsize) { 3345 bp->b_bcount = size; 3346 } else { 3347 free(bp->b_data, M_BIOBUF); 3348 if (bp->b_bufsize) { 3349 atomic_subtract_long( 3350 &bufmallocspace, 3351 bp->b_bufsize); 3352 bufspacewakeup(); 3353 bp->b_bufsize = 0; 3354 } 3355 bp->b_saveaddr = bp->b_kvabase; 3356 bp->b_data = bp->b_saveaddr; 3357 bp->b_bcount = 0; 3358 bp->b_flags &= ~B_MALLOC; 3359 } 3360 return 1; 3361 } 3362 vm_hold_free_pages(bp, newbsize); 3363 } else if (newbsize > bp->b_bufsize) { 3364 /* 3365 * We only use malloced memory on the first allocation. 3366 * and revert to page-allocated memory when the buffer 3367 * grows. 3368 */ 3369 /* 3370 * There is a potential smp race here that could lead 3371 * to bufmallocspace slightly passing the max. It 3372 * is probably extremely rare and not worth worrying 3373 * over. 3374 */ 3375 if ( (bufmallocspace < maxbufmallocspace) && 3376 (bp->b_bufsize == 0) && 3377 (mbsize <= PAGE_SIZE/2)) { 3378 3379 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 3380 bp->b_bufsize = mbsize; 3381 bp->b_bcount = size; 3382 bp->b_flags |= B_MALLOC; 3383 atomic_add_long(&bufmallocspace, mbsize); 3384 return 1; 3385 } 3386 origbuf = NULL; 3387 origbufsize = 0; 3388 /* 3389 * If the buffer is growing on its other-than-first allocation, 3390 * then we revert to the page-allocation scheme. 3391 */ 3392 if (bp->b_flags & B_MALLOC) { 3393 origbuf = bp->b_data; 3394 origbufsize = bp->b_bufsize; 3395 bp->b_data = bp->b_kvabase; 3396 if (bp->b_bufsize) { 3397 atomic_subtract_long(&bufmallocspace, 3398 bp->b_bufsize); 3399 bufspacewakeup(); 3400 bp->b_bufsize = 0; 3401 } 3402 bp->b_flags &= ~B_MALLOC; 3403 newbsize = round_page(newbsize); 3404 } 3405 vm_hold_load_pages( 3406 bp, 3407 (vm_offset_t) bp->b_data + bp->b_bufsize, 3408 (vm_offset_t) bp->b_data + newbsize); 3409 if (origbuf) { 3410 bcopy(origbuf, bp->b_data, origbufsize); 3411 free(origbuf, M_BIOBUF); 3412 } 3413 } 3414 } else { 3415 int desiredpages; 3416 3417 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3418 desiredpages = (size == 0) ? 0 : 3419 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 3420 3421 if (bp->b_flags & B_MALLOC) 3422 panic("allocbuf: VMIO buffer can't be malloced"); 3423 /* 3424 * Set B_CACHE initially if buffer is 0 length or will become 3425 * 0-length. 3426 */ 3427 if (size == 0 || bp->b_bufsize == 0) 3428 bp->b_flags |= B_CACHE; 3429 3430 if (newbsize < bp->b_bufsize) { 3431 /* 3432 * DEV_BSIZE aligned new buffer size is less then the 3433 * DEV_BSIZE aligned existing buffer size. Figure out 3434 * if we have to remove any pages. 3435 */ 3436 if (desiredpages < bp->b_npages) { 3437 vm_page_t m; 3438 3439 if ((bp->b_flags & B_UNMAPPED) == 0) { 3440 BUF_CHECK_MAPPED(bp); 3441 pmap_qremove((vm_offset_t)trunc_page( 3442 (vm_offset_t)bp->b_data) + 3443 (desiredpages << PAGE_SHIFT), 3444 (bp->b_npages - desiredpages)); 3445 } else 3446 BUF_CHECK_UNMAPPED(bp); 3447 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 3448 for (i = desiredpages; i < bp->b_npages; i++) { 3449 /* 3450 * the page is not freed here -- it 3451 * is the responsibility of 3452 * vnode_pager_setsize 3453 */ 3454 m = bp->b_pages[i]; 3455 KASSERT(m != bogus_page, 3456 ("allocbuf: bogus page found")); 3457 while (vm_page_sleep_if_busy(m, TRUE, 3458 "biodep")) 3459 continue; 3460 3461 bp->b_pages[i] = NULL; 3462 vm_page_lock(m); 3463 vm_page_unwire(m, 0); 3464 vm_page_unlock(m); 3465 } 3466 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 3467 bp->b_npages = desiredpages; 3468 } 3469 } else if (size > bp->b_bcount) { 3470 /* 3471 * We are growing the buffer, possibly in a 3472 * byte-granular fashion. 3473 */ 3474 vm_object_t obj; 3475 vm_offset_t toff; 3476 vm_offset_t tinc; 3477 3478 /* 3479 * Step 1, bring in the VM pages from the object, 3480 * allocating them if necessary. We must clear 3481 * B_CACHE if these pages are not valid for the 3482 * range covered by the buffer. 3483 */ 3484 3485 obj = bp->b_bufobj->bo_object; 3486 3487 VM_OBJECT_WLOCK(obj); 3488 while (bp->b_npages < desiredpages) { 3489 vm_page_t m; 3490 3491 /* 3492 * We must allocate system pages since blocking 3493 * here could interfere with paging I/O, no 3494 * matter which process we are. 3495 * 3496 * We can only test VPO_BUSY here. Blocking on 3497 * m->busy might lead to a deadlock: 3498 * vm_fault->getpages->cluster_read->allocbuf 3499 * Thus, we specify VM_ALLOC_IGN_SBUSY. 3500 */ 3501 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + 3502 bp->b_npages, VM_ALLOC_NOBUSY | 3503 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | 3504 VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY | 3505 VM_ALLOC_COUNT(desiredpages - bp->b_npages)); 3506 if (m->valid == 0) 3507 bp->b_flags &= ~B_CACHE; 3508 bp->b_pages[bp->b_npages] = m; 3509 ++bp->b_npages; 3510 } 3511 3512 /* 3513 * Step 2. We've loaded the pages into the buffer, 3514 * we have to figure out if we can still have B_CACHE 3515 * set. Note that B_CACHE is set according to the 3516 * byte-granular range ( bcount and size ), new the 3517 * aligned range ( newbsize ). 3518 * 3519 * The VM test is against m->valid, which is DEV_BSIZE 3520 * aligned. Needless to say, the validity of the data 3521 * needs to also be DEV_BSIZE aligned. Note that this 3522 * fails with NFS if the server or some other client 3523 * extends the file's EOF. If our buffer is resized, 3524 * B_CACHE may remain set! XXX 3525 */ 3526 3527 toff = bp->b_bcount; 3528 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 3529 3530 while ((bp->b_flags & B_CACHE) && toff < size) { 3531 vm_pindex_t pi; 3532 3533 if (tinc > (size - toff)) 3534 tinc = size - toff; 3535 3536 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 3537 PAGE_SHIFT; 3538 3539 vfs_buf_test_cache( 3540 bp, 3541 bp->b_offset, 3542 toff, 3543 tinc, 3544 bp->b_pages[pi] 3545 ); 3546 toff += tinc; 3547 tinc = PAGE_SIZE; 3548 } 3549 VM_OBJECT_WUNLOCK(obj); 3550 3551 /* 3552 * Step 3, fixup the KVM pmap. 3553 */ 3554 if ((bp->b_flags & B_UNMAPPED) == 0) 3555 bpmap_qenter(bp); 3556 else 3557 BUF_CHECK_UNMAPPED(bp); 3558 } 3559 } 3560 if (newbsize < bp->b_bufsize) 3561 bufspacewakeup(); 3562 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3563 bp->b_bcount = size; /* requested buffer size */ 3564 return 1; 3565 } 3566 3567 extern int inflight_transient_maps; 3568 3569 void 3570 biodone(struct bio *bp) 3571 { 3572 struct mtx *mtxp; 3573 void (*done)(struct bio *); 3574 vm_offset_t start, end; 3575 int transient; 3576 3577 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3578 mtx_lock(mtxp); 3579 bp->bio_flags |= BIO_DONE; 3580 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { 3581 start = trunc_page((vm_offset_t)bp->bio_data); 3582 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length); 3583 transient = 1; 3584 } else { 3585 transient = 0; 3586 start = end = 0; 3587 } 3588 done = bp->bio_done; 3589 if (done == NULL) 3590 wakeup(bp); 3591 mtx_unlock(mtxp); 3592 if (done != NULL) 3593 done(bp); 3594 if (transient) { 3595 pmap_qremove(start, OFF_TO_IDX(end - start)); 3596 vm_map_remove(bio_transient_map, start, end); 3597 atomic_add_int(&inflight_transient_maps, -1); 3598 } 3599 } 3600 3601 /* 3602 * Wait for a BIO to finish. 3603 * 3604 * XXX: resort to a timeout for now. The optimal locking (if any) for this 3605 * case is not yet clear. 3606 */ 3607 int 3608 biowait(struct bio *bp, const char *wchan) 3609 { 3610 struct mtx *mtxp; 3611 3612 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3613 mtx_lock(mtxp); 3614 while ((bp->bio_flags & BIO_DONE) == 0) 3615 msleep(bp, mtxp, PRIBIO, wchan, hz / 10); 3616 mtx_unlock(mtxp); 3617 if (bp->bio_error != 0) 3618 return (bp->bio_error); 3619 if (!(bp->bio_flags & BIO_ERROR)) 3620 return (0); 3621 return (EIO); 3622 } 3623 3624 void 3625 biofinish(struct bio *bp, struct devstat *stat, int error) 3626 { 3627 3628 if (error) { 3629 bp->bio_error = error; 3630 bp->bio_flags |= BIO_ERROR; 3631 } 3632 if (stat != NULL) 3633 devstat_end_transaction_bio(stat, bp); 3634 biodone(bp); 3635 } 3636 3637 /* 3638 * bufwait: 3639 * 3640 * Wait for buffer I/O completion, returning error status. The buffer 3641 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR 3642 * error and cleared. 3643 */ 3644 int 3645 bufwait(struct buf *bp) 3646 { 3647 if (bp->b_iocmd == BIO_READ) 3648 bwait(bp, PRIBIO, "biord"); 3649 else 3650 bwait(bp, PRIBIO, "biowr"); 3651 if (bp->b_flags & B_EINTR) { 3652 bp->b_flags &= ~B_EINTR; 3653 return (EINTR); 3654 } 3655 if (bp->b_ioflags & BIO_ERROR) { 3656 return (bp->b_error ? bp->b_error : EIO); 3657 } else { 3658 return (0); 3659 } 3660 } 3661 3662 /* 3663 * Call back function from struct bio back up to struct buf. 3664 */ 3665 static void 3666 bufdonebio(struct bio *bip) 3667 { 3668 struct buf *bp; 3669 3670 bp = bip->bio_caller2; 3671 bp->b_resid = bp->b_bcount - bip->bio_completed; 3672 bp->b_resid = bip->bio_resid; /* XXX: remove */ 3673 bp->b_ioflags = bip->bio_flags; 3674 bp->b_error = bip->bio_error; 3675 if (bp->b_error) 3676 bp->b_ioflags |= BIO_ERROR; 3677 bufdone(bp); 3678 g_destroy_bio(bip); 3679 } 3680 3681 void 3682 dev_strategy(struct cdev *dev, struct buf *bp) 3683 { 3684 struct cdevsw *csw; 3685 int ref; 3686 3687 KASSERT(dev->si_refcount > 0, 3688 ("dev_strategy on un-referenced struct cdev *(%s) %p", 3689 devtoname(dev), dev)); 3690 3691 csw = dev_refthread(dev, &ref); 3692 dev_strategy_csw(dev, csw, bp); 3693 dev_relthread(dev, ref); 3694 } 3695 3696 void 3697 dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp) 3698 { 3699 struct bio *bip; 3700 3701 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE, 3702 ("b_iocmd botch")); 3703 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) || 3704 dev->si_threadcount > 0, 3705 ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev), 3706 dev)); 3707 if (csw == NULL) { 3708 bp->b_error = ENXIO; 3709 bp->b_ioflags = BIO_ERROR; 3710 bufdone(bp); 3711 return; 3712 } 3713 for (;;) { 3714 bip = g_new_bio(); 3715 if (bip != NULL) 3716 break; 3717 /* Try again later */ 3718 tsleep(&bp, PRIBIO, "dev_strat", hz/10); 3719 } 3720 bip->bio_cmd = bp->b_iocmd; 3721 bip->bio_offset = bp->b_iooffset; 3722 bip->bio_length = bp->b_bcount; 3723 bip->bio_bcount = bp->b_bcount; /* XXX: remove */ 3724 bdata2bio(bp, bip); 3725 bip->bio_done = bufdonebio; 3726 bip->bio_caller2 = bp; 3727 bip->bio_dev = dev; 3728 (*csw->d_strategy)(bip); 3729 } 3730 3731 /* 3732 * bufdone: 3733 * 3734 * Finish I/O on a buffer, optionally calling a completion function. 3735 * This is usually called from an interrupt so process blocking is 3736 * not allowed. 3737 * 3738 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3739 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3740 * assuming B_INVAL is clear. 3741 * 3742 * For the VMIO case, we set B_CACHE if the op was a read and no 3743 * read error occured, or if the op was a write. B_CACHE is never 3744 * set if the buffer is invalid or otherwise uncacheable. 3745 * 3746 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3747 * initiator to leave B_INVAL set to brelse the buffer out of existance 3748 * in the biodone routine. 3749 */ 3750 void 3751 bufdone(struct buf *bp) 3752 { 3753 struct bufobj *dropobj; 3754 void (*biodone)(struct buf *); 3755 3756 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 3757 dropobj = NULL; 3758 3759 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 3760 BUF_ASSERT_HELD(bp); 3761 3762 runningbufwakeup(bp); 3763 if (bp->b_iocmd == BIO_WRITE) 3764 dropobj = bp->b_bufobj; 3765 /* call optional completion function if requested */ 3766 if (bp->b_iodone != NULL) { 3767 biodone = bp->b_iodone; 3768 bp->b_iodone = NULL; 3769 (*biodone) (bp); 3770 if (dropobj) 3771 bufobj_wdrop(dropobj); 3772 return; 3773 } 3774 3775 bufdone_finish(bp); 3776 3777 if (dropobj) 3778 bufobj_wdrop(dropobj); 3779 } 3780 3781 void 3782 bufdone_finish(struct buf *bp) 3783 { 3784 BUF_ASSERT_HELD(bp); 3785 3786 if (!LIST_EMPTY(&bp->b_dep)) 3787 buf_complete(bp); 3788 3789 if (bp->b_flags & B_VMIO) { 3790 vm_ooffset_t foff; 3791 vm_page_t m; 3792 vm_object_t obj; 3793 struct vnode *vp; 3794 int bogus, i, iosize; 3795 3796 obj = bp->b_bufobj->bo_object; 3797 KASSERT(obj->paging_in_progress >= bp->b_npages, 3798 ("biodone_finish: paging in progress(%d) < b_npages(%d)", 3799 obj->paging_in_progress, bp->b_npages)); 3800 3801 vp = bp->b_vp; 3802 KASSERT(vp->v_holdcnt > 0, 3803 ("biodone_finish: vnode %p has zero hold count", vp)); 3804 KASSERT(vp->v_object != NULL, 3805 ("biodone_finish: vnode %p has no vm_object", vp)); 3806 3807 foff = bp->b_offset; 3808 KASSERT(bp->b_offset != NOOFFSET, 3809 ("biodone_finish: bp %p has no buffer offset", bp)); 3810 3811 /* 3812 * Set B_CACHE if the op was a normal read and no error 3813 * occured. B_CACHE is set for writes in the b*write() 3814 * routines. 3815 */ 3816 iosize = bp->b_bcount - bp->b_resid; 3817 if (bp->b_iocmd == BIO_READ && 3818 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 3819 !(bp->b_ioflags & BIO_ERROR)) { 3820 bp->b_flags |= B_CACHE; 3821 } 3822 bogus = 0; 3823 VM_OBJECT_WLOCK(obj); 3824 for (i = 0; i < bp->b_npages; i++) { 3825 int bogusflag = 0; 3826 int resid; 3827 3828 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3829 if (resid > iosize) 3830 resid = iosize; 3831 3832 /* 3833 * cleanup bogus pages, restoring the originals 3834 */ 3835 m = bp->b_pages[i]; 3836 if (m == bogus_page) { 3837 bogus = bogusflag = 1; 3838 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3839 if (m == NULL) 3840 panic("biodone: page disappeared!"); 3841 bp->b_pages[i] = m; 3842 } 3843 KASSERT(OFF_TO_IDX(foff) == m->pindex, 3844 ("biodone_finish: foff(%jd)/pindex(%ju) mismatch", 3845 (intmax_t)foff, (uintmax_t)m->pindex)); 3846 3847 /* 3848 * In the write case, the valid and clean bits are 3849 * already changed correctly ( see bdwrite() ), so we 3850 * only need to do this here in the read case. 3851 */ 3852 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 3853 KASSERT((m->dirty & vm_page_bits(foff & 3854 PAGE_MASK, resid)) == 0, ("bufdone_finish:" 3855 " page %p has unexpected dirty bits", m)); 3856 vfs_page_set_valid(bp, foff, m); 3857 } 3858 3859 vm_page_io_finish(m); 3860 vm_object_pip_subtract(obj, 1); 3861 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3862 iosize -= resid; 3863 } 3864 vm_object_pip_wakeupn(obj, 0); 3865 VM_OBJECT_WUNLOCK(obj); 3866 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 3867 BUF_CHECK_MAPPED(bp); 3868 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3869 bp->b_pages, bp->b_npages); 3870 } 3871 } 3872 3873 /* 3874 * For asynchronous completions, release the buffer now. The brelse 3875 * will do a wakeup there if necessary - so no need to do a wakeup 3876 * here in the async case. The sync case always needs to do a wakeup. 3877 */ 3878 3879 if (bp->b_flags & B_ASYNC) { 3880 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 3881 brelse(bp); 3882 else 3883 bqrelse(bp); 3884 } else 3885 bdone(bp); 3886 } 3887 3888 /* 3889 * This routine is called in lieu of iodone in the case of 3890 * incomplete I/O. This keeps the busy status for pages 3891 * consistant. 3892 */ 3893 void 3894 vfs_unbusy_pages(struct buf *bp) 3895 { 3896 int i; 3897 vm_object_t obj; 3898 vm_page_t m; 3899 3900 runningbufwakeup(bp); 3901 if (!(bp->b_flags & B_VMIO)) 3902 return; 3903 3904 obj = bp->b_bufobj->bo_object; 3905 VM_OBJECT_WLOCK(obj); 3906 for (i = 0; i < bp->b_npages; i++) { 3907 m = bp->b_pages[i]; 3908 if (m == bogus_page) { 3909 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 3910 if (!m) 3911 panic("vfs_unbusy_pages: page missing\n"); 3912 bp->b_pages[i] = m; 3913 if ((bp->b_flags & B_UNMAPPED) == 0) { 3914 BUF_CHECK_MAPPED(bp); 3915 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3916 bp->b_pages, bp->b_npages); 3917 } else 3918 BUF_CHECK_UNMAPPED(bp); 3919 } 3920 vm_object_pip_subtract(obj, 1); 3921 vm_page_io_finish(m); 3922 } 3923 vm_object_pip_wakeupn(obj, 0); 3924 VM_OBJECT_WUNLOCK(obj); 3925 } 3926 3927 /* 3928 * vfs_page_set_valid: 3929 * 3930 * Set the valid bits in a page based on the supplied offset. The 3931 * range is restricted to the buffer's size. 3932 * 3933 * This routine is typically called after a read completes. 3934 */ 3935 static void 3936 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3937 { 3938 vm_ooffset_t eoff; 3939 3940 /* 3941 * Compute the end offset, eoff, such that [off, eoff) does not span a 3942 * page boundary and eoff is not greater than the end of the buffer. 3943 * The end of the buffer, in this case, is our file EOF, not the 3944 * allocation size of the buffer. 3945 */ 3946 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK; 3947 if (eoff > bp->b_offset + bp->b_bcount) 3948 eoff = bp->b_offset + bp->b_bcount; 3949 3950 /* 3951 * Set valid range. This is typically the entire buffer and thus the 3952 * entire page. 3953 */ 3954 if (eoff > off) 3955 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off); 3956 } 3957 3958 /* 3959 * vfs_page_set_validclean: 3960 * 3961 * Set the valid bits and clear the dirty bits in a page based on the 3962 * supplied offset. The range is restricted to the buffer's size. 3963 */ 3964 static void 3965 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m) 3966 { 3967 vm_ooffset_t soff, eoff; 3968 3969 /* 3970 * Start and end offsets in buffer. eoff - soff may not cross a 3971 * page boundry or cross the end of the buffer. The end of the 3972 * buffer, in this case, is our file EOF, not the allocation size 3973 * of the buffer. 3974 */ 3975 soff = off; 3976 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3977 if (eoff > bp->b_offset + bp->b_bcount) 3978 eoff = bp->b_offset + bp->b_bcount; 3979 3980 /* 3981 * Set valid range. This is typically the entire buffer and thus the 3982 * entire page. 3983 */ 3984 if (eoff > soff) { 3985 vm_page_set_validclean( 3986 m, 3987 (vm_offset_t) (soff & PAGE_MASK), 3988 (vm_offset_t) (eoff - soff) 3989 ); 3990 } 3991 } 3992 3993 /* 3994 * Ensure that all buffer pages are not busied by VPO_BUSY flag. If 3995 * any page is busy, drain the flag. 3996 */ 3997 static void 3998 vfs_drain_busy_pages(struct buf *bp) 3999 { 4000 vm_page_t m; 4001 int i, last_busied; 4002 4003 VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object); 4004 last_busied = 0; 4005 for (i = 0; i < bp->b_npages; i++) { 4006 m = bp->b_pages[i]; 4007 if ((m->oflags & VPO_BUSY) != 0) { 4008 for (; last_busied < i; last_busied++) 4009 vm_page_busy(bp->b_pages[last_busied]); 4010 while ((m->oflags & VPO_BUSY) != 0) 4011 vm_page_sleep(m, "vbpage"); 4012 } 4013 } 4014 for (i = 0; i < last_busied; i++) 4015 vm_page_wakeup(bp->b_pages[i]); 4016 } 4017 4018 /* 4019 * This routine is called before a device strategy routine. 4020 * It is used to tell the VM system that paging I/O is in 4021 * progress, and treat the pages associated with the buffer 4022 * almost as being VPO_BUSY. Also the object paging_in_progress 4023 * flag is handled to make sure that the object doesn't become 4024 * inconsistant. 4025 * 4026 * Since I/O has not been initiated yet, certain buffer flags 4027 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 4028 * and should be ignored. 4029 */ 4030 void 4031 vfs_busy_pages(struct buf *bp, int clear_modify) 4032 { 4033 int i, bogus; 4034 vm_object_t obj; 4035 vm_ooffset_t foff; 4036 vm_page_t m; 4037 4038 if (!(bp->b_flags & B_VMIO)) 4039 return; 4040 4041 obj = bp->b_bufobj->bo_object; 4042 foff = bp->b_offset; 4043 KASSERT(bp->b_offset != NOOFFSET, 4044 ("vfs_busy_pages: no buffer offset")); 4045 VM_OBJECT_WLOCK(obj); 4046 vfs_drain_busy_pages(bp); 4047 if (bp->b_bufsize != 0) 4048 vfs_setdirty_locked_object(bp); 4049 bogus = 0; 4050 for (i = 0; i < bp->b_npages; i++) { 4051 m = bp->b_pages[i]; 4052 4053 if ((bp->b_flags & B_CLUSTER) == 0) { 4054 vm_object_pip_add(obj, 1); 4055 vm_page_io_start(m); 4056 } 4057 /* 4058 * When readying a buffer for a read ( i.e 4059 * clear_modify == 0 ), it is important to do 4060 * bogus_page replacement for valid pages in 4061 * partially instantiated buffers. Partially 4062 * instantiated buffers can, in turn, occur when 4063 * reconstituting a buffer from its VM backing store 4064 * base. We only have to do this if B_CACHE is 4065 * clear ( which causes the I/O to occur in the 4066 * first place ). The replacement prevents the read 4067 * I/O from overwriting potentially dirty VM-backed 4068 * pages. XXX bogus page replacement is, uh, bogus. 4069 * It may not work properly with small-block devices. 4070 * We need to find a better way. 4071 */ 4072 if (clear_modify) { 4073 pmap_remove_write(m); 4074 vfs_page_set_validclean(bp, foff, m); 4075 } else if (m->valid == VM_PAGE_BITS_ALL && 4076 (bp->b_flags & B_CACHE) == 0) { 4077 bp->b_pages[i] = bogus_page; 4078 bogus++; 4079 } 4080 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 4081 } 4082 VM_OBJECT_WUNLOCK(obj); 4083 if (bogus && (bp->b_flags & B_UNMAPPED) == 0) { 4084 BUF_CHECK_MAPPED(bp); 4085 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4086 bp->b_pages, bp->b_npages); 4087 } 4088 } 4089 4090 /* 4091 * vfs_bio_set_valid: 4092 * 4093 * Set the range within the buffer to valid. The range is 4094 * relative to the beginning of the buffer, b_offset. Note that 4095 * b_offset itself may be offset from the beginning of the first 4096 * page. 4097 */ 4098 void 4099 vfs_bio_set_valid(struct buf *bp, int base, int size) 4100 { 4101 int i, n; 4102 vm_page_t m; 4103 4104 if (!(bp->b_flags & B_VMIO)) 4105 return; 4106 4107 /* 4108 * Fixup base to be relative to beginning of first page. 4109 * Set initial n to be the maximum number of bytes in the 4110 * first page that can be validated. 4111 */ 4112 base += (bp->b_offset & PAGE_MASK); 4113 n = PAGE_SIZE - (base & PAGE_MASK); 4114 4115 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4116 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4117 m = bp->b_pages[i]; 4118 if (n > size) 4119 n = size; 4120 vm_page_set_valid_range(m, base & PAGE_MASK, n); 4121 base += n; 4122 size -= n; 4123 n = PAGE_SIZE; 4124 } 4125 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4126 } 4127 4128 /* 4129 * vfs_bio_clrbuf: 4130 * 4131 * If the specified buffer is a non-VMIO buffer, clear the entire 4132 * buffer. If the specified buffer is a VMIO buffer, clear and 4133 * validate only the previously invalid portions of the buffer. 4134 * This routine essentially fakes an I/O, so we need to clear 4135 * BIO_ERROR and B_INVAL. 4136 * 4137 * Note that while we only theoretically need to clear through b_bcount, 4138 * we go ahead and clear through b_bufsize. 4139 */ 4140 void 4141 vfs_bio_clrbuf(struct buf *bp) 4142 { 4143 int i, j, mask, sa, ea, slide; 4144 4145 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { 4146 clrbuf(bp); 4147 return; 4148 } 4149 bp->b_flags &= ~B_INVAL; 4150 bp->b_ioflags &= ~BIO_ERROR; 4151 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4152 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4153 (bp->b_offset & PAGE_MASK) == 0) { 4154 if (bp->b_pages[0] == bogus_page) 4155 goto unlock; 4156 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4157 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object); 4158 if ((bp->b_pages[0]->valid & mask) == mask) 4159 goto unlock; 4160 if ((bp->b_pages[0]->valid & mask) == 0) { 4161 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize); 4162 bp->b_pages[0]->valid |= mask; 4163 goto unlock; 4164 } 4165 } 4166 sa = bp->b_offset & PAGE_MASK; 4167 slide = 0; 4168 for (i = 0; i < bp->b_npages; i++, sa = 0) { 4169 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize); 4170 ea = slide & PAGE_MASK; 4171 if (ea == 0) 4172 ea = PAGE_SIZE; 4173 if (bp->b_pages[i] == bogus_page) 4174 continue; 4175 j = sa / DEV_BSIZE; 4176 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4177 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object); 4178 if ((bp->b_pages[i]->valid & mask) == mask) 4179 continue; 4180 if ((bp->b_pages[i]->valid & mask) == 0) 4181 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa); 4182 else { 4183 for (; sa < ea; sa += DEV_BSIZE, j++) { 4184 if ((bp->b_pages[i]->valid & (1 << j)) == 0) { 4185 pmap_zero_page_area(bp->b_pages[i], 4186 sa, DEV_BSIZE); 4187 } 4188 } 4189 } 4190 bp->b_pages[i]->valid |= mask; 4191 } 4192 unlock: 4193 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4194 bp->b_resid = 0; 4195 } 4196 4197 void 4198 vfs_bio_bzero_buf(struct buf *bp, int base, int size) 4199 { 4200 vm_page_t m; 4201 int i, n; 4202 4203 if ((bp->b_flags & B_UNMAPPED) == 0) { 4204 BUF_CHECK_MAPPED(bp); 4205 bzero(bp->b_data + base, size); 4206 } else { 4207 BUF_CHECK_UNMAPPED(bp); 4208 n = PAGE_SIZE - (base & PAGE_MASK); 4209 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4210 m = bp->b_pages[i]; 4211 if (n > size) 4212 n = size; 4213 pmap_zero_page_area(m, base & PAGE_MASK, n); 4214 base += n; 4215 size -= n; 4216 n = PAGE_SIZE; 4217 } 4218 } 4219 } 4220 4221 /* 4222 * vm_hold_load_pages and vm_hold_free_pages get pages into 4223 * a buffers address space. The pages are anonymous and are 4224 * not associated with a file object. 4225 */ 4226 static void 4227 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4228 { 4229 vm_offset_t pg; 4230 vm_page_t p; 4231 int index; 4232 4233 BUF_CHECK_MAPPED(bp); 4234 4235 to = round_page(to); 4236 from = round_page(from); 4237 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4238 4239 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 4240 tryagain: 4241 /* 4242 * note: must allocate system pages since blocking here 4243 * could interfere with paging I/O, no matter which 4244 * process we are. 4245 */ 4246 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | 4247 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT)); 4248 if (p == NULL) { 4249 VM_WAIT; 4250 goto tryagain; 4251 } 4252 pmap_qenter(pg, &p, 1); 4253 bp->b_pages[index] = p; 4254 } 4255 bp->b_npages = index; 4256 } 4257 4258 /* Return pages associated with this buf to the vm system */ 4259 static void 4260 vm_hold_free_pages(struct buf *bp, int newbsize) 4261 { 4262 vm_offset_t from; 4263 vm_page_t p; 4264 int index, newnpages; 4265 4266 BUF_CHECK_MAPPED(bp); 4267 4268 from = round_page((vm_offset_t)bp->b_data + newbsize); 4269 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4270 if (bp->b_npages > newnpages) 4271 pmap_qremove(from, bp->b_npages - newnpages); 4272 for (index = newnpages; index < bp->b_npages; index++) { 4273 p = bp->b_pages[index]; 4274 bp->b_pages[index] = NULL; 4275 if (p->busy != 0) 4276 printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n", 4277 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno); 4278 p->wire_count--; 4279 vm_page_free(p); 4280 atomic_subtract_int(&cnt.v_wire_count, 1); 4281 } 4282 bp->b_npages = newnpages; 4283 } 4284 4285 /* 4286 * Map an IO request into kernel virtual address space. 4287 * 4288 * All requests are (re)mapped into kernel VA space. 4289 * Notice that we use b_bufsize for the size of the buffer 4290 * to be mapped. b_bcount might be modified by the driver. 4291 * 4292 * Note that even if the caller determines that the address space should 4293 * be valid, a race or a smaller-file mapped into a larger space may 4294 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST 4295 * check the return value. 4296 */ 4297 int 4298 vmapbuf(struct buf *bp, int mapbuf) 4299 { 4300 caddr_t kva; 4301 vm_prot_t prot; 4302 int pidx; 4303 4304 if (bp->b_bufsize < 0) 4305 return (-1); 4306 prot = VM_PROT_READ; 4307 if (bp->b_iocmd == BIO_READ) 4308 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 4309 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 4310 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages, 4311 btoc(MAXPHYS))) < 0) 4312 return (-1); 4313 bp->b_npages = pidx; 4314 if (mapbuf || !unmapped_buf_allowed) { 4315 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx); 4316 kva = bp->b_saveaddr; 4317 bp->b_saveaddr = bp->b_data; 4318 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK); 4319 bp->b_flags &= ~B_UNMAPPED; 4320 } else { 4321 bp->b_flags |= B_UNMAPPED; 4322 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK; 4323 bp->b_saveaddr = bp->b_data; 4324 bp->b_data = unmapped_buf; 4325 } 4326 return(0); 4327 } 4328 4329 /* 4330 * Free the io map PTEs associated with this IO operation. 4331 * We also invalidate the TLB entries and restore the original b_addr. 4332 */ 4333 void 4334 vunmapbuf(struct buf *bp) 4335 { 4336 int npages; 4337 4338 npages = bp->b_npages; 4339 if (bp->b_flags & B_UNMAPPED) 4340 bp->b_flags &= ~B_UNMAPPED; 4341 else 4342 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 4343 vm_page_unhold_pages(bp->b_pages, npages); 4344 4345 bp->b_data = bp->b_saveaddr; 4346 } 4347 4348 void 4349 bdone(struct buf *bp) 4350 { 4351 struct mtx *mtxp; 4352 4353 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4354 mtx_lock(mtxp); 4355 bp->b_flags |= B_DONE; 4356 wakeup(bp); 4357 mtx_unlock(mtxp); 4358 } 4359 4360 void 4361 bwait(struct buf *bp, u_char pri, const char *wchan) 4362 { 4363 struct mtx *mtxp; 4364 4365 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4366 mtx_lock(mtxp); 4367 while ((bp->b_flags & B_DONE) == 0) 4368 msleep(bp, mtxp, pri, wchan, 0); 4369 mtx_unlock(mtxp); 4370 } 4371 4372 int 4373 bufsync(struct bufobj *bo, int waitfor) 4374 { 4375 4376 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread)); 4377 } 4378 4379 void 4380 bufstrategy(struct bufobj *bo, struct buf *bp) 4381 { 4382 int i = 0; 4383 struct vnode *vp; 4384 4385 vp = bp->b_vp; 4386 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); 4387 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 4388 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); 4389 i = VOP_STRATEGY(vp, bp); 4390 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); 4391 } 4392 4393 void 4394 bufobj_wrefl(struct bufobj *bo) 4395 { 4396 4397 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4398 ASSERT_BO_WLOCKED(bo); 4399 bo->bo_numoutput++; 4400 } 4401 4402 void 4403 bufobj_wref(struct bufobj *bo) 4404 { 4405 4406 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4407 BO_LOCK(bo); 4408 bo->bo_numoutput++; 4409 BO_UNLOCK(bo); 4410 } 4411 4412 void 4413 bufobj_wdrop(struct bufobj *bo) 4414 { 4415 4416 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop")); 4417 BO_LOCK(bo); 4418 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count")); 4419 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) { 4420 bo->bo_flag &= ~BO_WWAIT; 4421 wakeup(&bo->bo_numoutput); 4422 } 4423 BO_UNLOCK(bo); 4424 } 4425 4426 int 4427 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo) 4428 { 4429 int error; 4430 4431 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait")); 4432 ASSERT_BO_WLOCKED(bo); 4433 error = 0; 4434 while (bo->bo_numoutput) { 4435 bo->bo_flag |= BO_WWAIT; 4436 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo), 4437 slpflag | (PRIBIO + 1), "bo_wwait", timeo); 4438 if (error) 4439 break; 4440 } 4441 return (error); 4442 } 4443 4444 void 4445 bpin(struct buf *bp) 4446 { 4447 struct mtx *mtxp; 4448 4449 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4450 mtx_lock(mtxp); 4451 bp->b_pin_count++; 4452 mtx_unlock(mtxp); 4453 } 4454 4455 void 4456 bunpin(struct buf *bp) 4457 { 4458 struct mtx *mtxp; 4459 4460 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4461 mtx_lock(mtxp); 4462 if (--bp->b_pin_count == 0) 4463 wakeup(bp); 4464 mtx_unlock(mtxp); 4465 } 4466 4467 void 4468 bunpin_wait(struct buf *bp) 4469 { 4470 struct mtx *mtxp; 4471 4472 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4473 mtx_lock(mtxp); 4474 while (bp->b_pin_count > 0) 4475 msleep(bp, mtxp, PRIBIO, "bwunpin", 0); 4476 mtx_unlock(mtxp); 4477 } 4478 4479 /* 4480 * Set bio_data or bio_ma for struct bio from the struct buf. 4481 */ 4482 void 4483 bdata2bio(struct buf *bp, struct bio *bip) 4484 { 4485 4486 if ((bp->b_flags & B_UNMAPPED) != 0) { 4487 KASSERT(unmapped_buf_allowed, ("unmapped")); 4488 bip->bio_ma = bp->b_pages; 4489 bip->bio_ma_n = bp->b_npages; 4490 bip->bio_data = unmapped_buf; 4491 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 4492 bip->bio_flags |= BIO_UNMAPPED; 4493 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) / 4494 PAGE_SIZE == bp->b_npages, 4495 ("Buffer %p too short: %d %d %d", bp, bip->bio_ma_offset, 4496 bip->bio_length, bip->bio_ma_n)); 4497 } else { 4498 bip->bio_data = bp->b_data; 4499 bip->bio_ma = NULL; 4500 } 4501 } 4502 4503 #include "opt_ddb.h" 4504 #ifdef DDB 4505 #include <ddb/ddb.h> 4506 4507 /* DDB command to show buffer data */ 4508 DB_SHOW_COMMAND(buffer, db_show_buffer) 4509 { 4510 /* get args */ 4511 struct buf *bp = (struct buf *)addr; 4512 4513 if (!have_addr) { 4514 db_printf("usage: show buffer <addr>\n"); 4515 return; 4516 } 4517 4518 db_printf("buf at %p\n", bp); 4519 db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n", 4520 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags, 4521 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS); 4522 db_printf( 4523 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n" 4524 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, " 4525 "b_dep = %p\n", 4526 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4527 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, 4528 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first); 4529 if (bp->b_npages) { 4530 int i; 4531 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 4532 for (i = 0; i < bp->b_npages; i++) { 4533 vm_page_t m; 4534 m = bp->b_pages[i]; 4535 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4536 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4537 if ((i + 1) < bp->b_npages) 4538 db_printf(","); 4539 } 4540 db_printf("\n"); 4541 } 4542 db_printf(" "); 4543 BUF_LOCKPRINTINFO(bp); 4544 } 4545 4546 DB_SHOW_COMMAND(lockedbufs, lockedbufs) 4547 { 4548 struct buf *bp; 4549 int i; 4550 4551 for (i = 0; i < nbuf; i++) { 4552 bp = &buf[i]; 4553 if (BUF_ISLOCKED(bp)) { 4554 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4555 db_printf("\n"); 4556 } 4557 } 4558 } 4559 4560 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) 4561 { 4562 struct vnode *vp; 4563 struct buf *bp; 4564 4565 if (!have_addr) { 4566 db_printf("usage: show vnodebufs <addr>\n"); 4567 return; 4568 } 4569 vp = (struct vnode *)addr; 4570 db_printf("Clean buffers:\n"); 4571 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { 4572 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4573 db_printf("\n"); 4574 } 4575 db_printf("Dirty buffers:\n"); 4576 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 4577 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4578 db_printf("\n"); 4579 } 4580 } 4581 4582 DB_COMMAND(countfreebufs, db_coundfreebufs) 4583 { 4584 struct buf *bp; 4585 int i, used = 0, nfree = 0; 4586 4587 if (have_addr) { 4588 db_printf("usage: countfreebufs\n"); 4589 return; 4590 } 4591 4592 for (i = 0; i < nbuf; i++) { 4593 bp = &buf[i]; 4594 if ((bp->b_flags & B_INFREECNT) != 0) 4595 nfree++; 4596 else 4597 used++; 4598 } 4599 4600 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used, 4601 nfree + used); 4602 db_printf("numfreebuffers is %d\n", numfreebuffers); 4603 } 4604 #endif /* DDB */ 4605