1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * Copyright (c) 1994,1997 John S. Dyson 4 * Copyright (c) 2013 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Konstantin Belousov 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * this file contains a new buffer I/O scheme implementing a coherent 34 * VM object and buffer cache scheme. Pains have been taken to make 35 * sure that the performance degradation associated with schemes such 36 * as this is not realized. 37 * 38 * Author: John S. Dyson 39 * Significant help during the development and debugging phases 40 * had been provided by David Greenman, also of the FreeBSD core team. 41 * 42 * see man buf(9) for more info. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/conf.h> 52 #include <sys/buf.h> 53 #include <sys/devicestat.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fail.h> 56 #include <sys/limits.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mount.h> 60 #include <sys/mutex.h> 61 #include <sys/kernel.h> 62 #include <sys/kthread.h> 63 #include <sys/proc.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysproto.h> 68 #include <sys/vmem.h> 69 #include <sys/vmmeter.h> 70 #include <sys/vnode.h> 71 #include <sys/watchdog.h> 72 #include <geom/geom.h> 73 #include <vm/vm.h> 74 #include <vm/vm_param.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_object.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_map.h> 81 #include <vm/swap_pager.h> 82 #include "opt_compat.h" 83 #include "opt_swap.h" 84 85 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer"); 86 87 struct bio_ops bioops; /* I/O operation notification */ 88 89 struct buf_ops buf_ops_bio = { 90 .bop_name = "buf_ops_bio", 91 .bop_write = bufwrite, 92 .bop_strategy = bufstrategy, 93 .bop_sync = bufsync, 94 .bop_bdflush = bufbdflush, 95 }; 96 97 static struct buf *buf; /* buffer header pool */ 98 extern struct buf *swbuf; /* Swap buffer header pool. */ 99 caddr_t unmapped_buf; 100 101 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ 102 struct proc *bufdaemonproc; 103 104 static int inmem(struct vnode *vp, daddr_t blkno); 105 static void vm_hold_free_pages(struct buf *bp, int newbsize); 106 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from, 107 vm_offset_t to); 108 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m); 109 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, 110 vm_page_t m); 111 static void vfs_clean_pages_dirty_buf(struct buf *bp); 112 static void vfs_setdirty_locked_object(struct buf *bp); 113 static void vfs_vmio_release(struct buf *bp); 114 static int vfs_bio_clcheck(struct vnode *vp, int size, 115 daddr_t lblkno, daddr_t blkno); 116 static int buf_flush(struct vnode *vp, int); 117 static int flushbufqueues(struct vnode *, int, int); 118 static void buf_daemon(void); 119 static void bremfreel(struct buf *bp); 120 static __inline void bd_wakeup(void); 121 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS); 122 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 123 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 124 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS); 125 #endif 126 127 int vmiodirenable = TRUE; 128 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 129 "Use the VM system for directory writes"); 130 long runningbufspace; 131 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 132 "Amount of presently outstanding async buffer io"); 133 static long bufspace; 134 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 135 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 136 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD, 137 &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers"); 138 #else 139 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 140 "Physical memory used for buffers"); 141 #endif 142 static long bufkvaspace; 143 SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0, 144 "Kernel virtual memory used for buffers"); 145 static long maxbufspace; 146 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 147 "Maximum allowed value of bufspace (including buf_daemon)"); 148 static long bufmallocspace; 149 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 150 "Amount of malloced memory for buffers"); 151 static long maxbufmallocspace; 152 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0, 153 "Maximum amount of malloced memory for buffers"); 154 static long lobufspace; 155 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 156 "Minimum amount of buffers we want to have"); 157 long hibufspace; 158 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 159 "Maximum allowed value of bufspace (excluding buf_daemon)"); 160 static int bufreusecnt; 161 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0, 162 "Number of times we have reused a buffer"); 163 static int buffreekvacnt; 164 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0, 165 "Number of times we have freed the KVA space from some buffer"); 166 static int bufdefragcnt; 167 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0, 168 "Number of times we have had to repeat buffer allocation to defragment"); 169 static long lorunningspace; 170 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE | 171 CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L", 172 "Minimum preferred space used for in-progress I/O"); 173 static long hirunningspace; 174 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE | 175 CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L", 176 "Maximum amount of space to use for in-progress I/O"); 177 int dirtybufferflushes; 178 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes, 179 0, "Number of bdwrite to bawrite conversions to limit dirty buffers"); 180 int bdwriteskip; 181 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip, 182 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk"); 183 int altbufferflushes; 184 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes, 185 0, "Number of fsync flushes to limit dirty buffers"); 186 static int recursiveflushes; 187 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes, 188 0, "Number of flushes skipped due to being recursive"); 189 static int numdirtybuffers; 190 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 191 "Number of buffers that are dirty (has unwritten changes) at the moment"); 192 static int lodirtybuffers; 193 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 194 "How many buffers we want to have free before bufdaemon can sleep"); 195 static int hidirtybuffers; 196 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 197 "When the number of dirty buffers is considered severe"); 198 int dirtybufthresh; 199 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh, 200 0, "Number of bdwrite to bawrite conversions to clear dirty buffers"); 201 static int numfreebuffers; 202 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 203 "Number of free buffers"); 204 static int lofreebuffers; 205 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 206 "XXX Unused"); 207 static int hifreebuffers; 208 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 209 "XXX Complicatedly unused"); 210 static int getnewbufcalls; 211 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0, 212 "Number of calls to getnewbuf"); 213 static int getnewbufrestarts; 214 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0, 215 "Number of times getnewbuf has had to restart a buffer aquisition"); 216 static int mappingrestarts; 217 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0, 218 "Number of times getblk has had to restart a buffer mapping for " 219 "unmapped buffer"); 220 static int flushbufqtarget = 100; 221 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0, 222 "Amount of work to do in flushbufqueues when helping bufdaemon"); 223 static long notbufdflushes; 224 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, ¬bufdflushes, 0, 225 "Number of dirty buffer flushes done by the bufdaemon helpers"); 226 static long barrierwrites; 227 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0, 228 "Number of barrier writes"); 229 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD, 230 &unmapped_buf_allowed, 0, 231 "Permit the use of the unmapped i/o"); 232 233 /* 234 * Lock for the non-dirty bufqueues 235 */ 236 static struct mtx_padalign bqclean; 237 238 /* 239 * Lock for the dirty queue. 240 */ 241 static struct mtx_padalign bqdirty; 242 243 /* 244 * This lock synchronizes access to bd_request. 245 */ 246 static struct mtx_padalign bdlock; 247 248 /* 249 * This lock protects the runningbufreq and synchronizes runningbufwakeup and 250 * waitrunningbufspace(). 251 */ 252 static struct mtx_padalign rbreqlock; 253 254 /* 255 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it. 256 */ 257 static struct rwlock_padalign nblock; 258 259 /* 260 * Lock that protects bdirtywait. 261 */ 262 static struct mtx_padalign bdirtylock; 263 264 /* 265 * Wakeup point for bufdaemon, as well as indicator of whether it is already 266 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it 267 * is idling. 268 */ 269 static int bd_request; 270 271 /* 272 * Request for the buf daemon to write more buffers than is indicated by 273 * lodirtybuf. This may be necessary to push out excess dependencies or 274 * defragment the address space where a simple count of the number of dirty 275 * buffers is insufficient to characterize the demand for flushing them. 276 */ 277 static int bd_speedupreq; 278 279 /* 280 * bogus page -- for I/O to/from partially complete buffers 281 * this is a temporary solution to the problem, but it is not 282 * really that bad. it would be better to split the buffer 283 * for input in the case of buffers partially already in memory, 284 * but the code is intricate enough already. 285 */ 286 vm_page_t bogus_page; 287 288 /* 289 * Synchronization (sleep/wakeup) variable for active buffer space requests. 290 * Set when wait starts, cleared prior to wakeup(). 291 * Used in runningbufwakeup() and waitrunningbufspace(). 292 */ 293 static int runningbufreq; 294 295 /* 296 * Synchronization (sleep/wakeup) variable for buffer requests. 297 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done 298 * by and/or. 299 * Used in numdirtywakeup(), bufspacewakeup(), bufcountadd(), bwillwrite(), 300 * getnewbuf(), and getblk(). 301 */ 302 static volatile int needsbuffer; 303 304 /* 305 * Synchronization for bwillwrite() waiters. 306 */ 307 static int bdirtywait; 308 309 /* 310 * Definitions for the buffer free lists. 311 */ 312 #define BUFFER_QUEUES 4 /* number of free buffer queues */ 313 314 #define QUEUE_NONE 0 /* on no queue */ 315 #define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */ 316 #define QUEUE_DIRTY 2 /* B_DELWRI buffers */ 317 #define QUEUE_EMPTY 3 /* empty buffer headers */ 318 #define QUEUE_SENTINEL 1024 /* not an queue index, but mark for sentinel */ 319 320 /* Queues for free buffers with various properties */ 321 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } }; 322 #ifdef INVARIANTS 323 static int bq_len[BUFFER_QUEUES]; 324 #endif 325 326 /* 327 * Single global constant for BUF_WMESG, to avoid getting multiple references. 328 * buf_wmesg is referred from macros. 329 */ 330 const char *buf_wmesg = BUF_WMESG; 331 332 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 333 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 334 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 335 336 static int 337 sysctl_runningspace(SYSCTL_HANDLER_ARGS) 338 { 339 long value; 340 int error; 341 342 value = *(long *)arg1; 343 error = sysctl_handle_long(oidp, &value, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 mtx_lock(&rbreqlock); 347 if (arg1 == &hirunningspace) { 348 if (value < lorunningspace) 349 error = EINVAL; 350 else 351 hirunningspace = value; 352 } else { 353 KASSERT(arg1 == &lorunningspace, 354 ("%s: unknown arg1", __func__)); 355 if (value > hirunningspace) 356 error = EINVAL; 357 else 358 lorunningspace = value; 359 } 360 mtx_unlock(&rbreqlock); 361 return (error); 362 } 363 364 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 365 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 366 static int 367 sysctl_bufspace(SYSCTL_HANDLER_ARGS) 368 { 369 long lvalue; 370 int ivalue; 371 372 if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long)) 373 return (sysctl_handle_long(oidp, arg1, arg2, req)); 374 lvalue = *(long *)arg1; 375 if (lvalue > INT_MAX) 376 /* On overflow, still write out a long to trigger ENOMEM. */ 377 return (sysctl_handle_long(oidp, &lvalue, 0, req)); 378 ivalue = lvalue; 379 return (sysctl_handle_int(oidp, &ivalue, 0, req)); 380 } 381 #endif 382 383 /* 384 * bqlock: 385 * 386 * Return the appropriate queue lock based on the index. 387 */ 388 static inline struct mtx * 389 bqlock(int qindex) 390 { 391 392 if (qindex == QUEUE_DIRTY) 393 return (struct mtx *)(&bqdirty); 394 return (struct mtx *)(&bqclean); 395 } 396 397 /* 398 * bdirtywakeup: 399 * 400 * Wakeup any bwillwrite() waiters. 401 */ 402 static void 403 bdirtywakeup(void) 404 { 405 mtx_lock(&bdirtylock); 406 if (bdirtywait) { 407 bdirtywait = 0; 408 wakeup(&bdirtywait); 409 } 410 mtx_unlock(&bdirtylock); 411 } 412 413 /* 414 * bdirtysub: 415 * 416 * Decrement the numdirtybuffers count by one and wakeup any 417 * threads blocked in bwillwrite(). 418 */ 419 static void 420 bdirtysub(void) 421 { 422 423 if (atomic_fetchadd_int(&numdirtybuffers, -1) == 424 (lodirtybuffers + hidirtybuffers) / 2) 425 bdirtywakeup(); 426 } 427 428 /* 429 * bdirtyadd: 430 * 431 * Increment the numdirtybuffers count by one and wakeup the buf 432 * daemon if needed. 433 */ 434 static void 435 bdirtyadd(void) 436 { 437 438 /* 439 * Only do the wakeup once as we cross the boundary. The 440 * buf daemon will keep running until the condition clears. 441 */ 442 if (atomic_fetchadd_int(&numdirtybuffers, 1) == 443 (lodirtybuffers + hidirtybuffers) / 2) 444 bd_wakeup(); 445 } 446 447 /* 448 * bufspacewakeup: 449 * 450 * Called when buffer space is potentially available for recovery. 451 * getnewbuf() will block on this flag when it is unable to free 452 * sufficient buffer space. Buffer space becomes recoverable when 453 * bp's get placed back in the queues. 454 */ 455 static __inline void 456 bufspacewakeup(void) 457 { 458 int need_wakeup, on; 459 460 /* 461 * If someone is waiting for bufspace, wake them up. Even 462 * though we may not have freed the kva space yet, the waiting 463 * process will be able to now. 464 */ 465 rw_rlock(&nblock); 466 for (;;) { 467 need_wakeup = 0; 468 on = needsbuffer; 469 if ((on & VFS_BIO_NEED_BUFSPACE) == 0) 470 break; 471 need_wakeup = 1; 472 if (atomic_cmpset_rel_int(&needsbuffer, on, 473 on & ~VFS_BIO_NEED_BUFSPACE)) 474 break; 475 } 476 if (need_wakeup) 477 wakeup(__DEVOLATILE(void *, &needsbuffer)); 478 rw_runlock(&nblock); 479 } 480 481 /* 482 * bufspaceadjust: 483 * 484 * Adjust the reported bufspace for a KVA managed buffer, possibly 485 * waking any waiters. 486 */ 487 static void 488 bufspaceadjust(struct buf *bp, int bufsize) 489 { 490 int diff; 491 492 KASSERT((bp->b_flags & B_MALLOC) == 0, 493 ("bufspaceadjust: malloc buf %p", bp)); 494 diff = bufsize - bp->b_bufsize; 495 if (diff < 0) { 496 atomic_subtract_long(&bufspace, -diff); 497 bufspacewakeup(); 498 } else 499 atomic_add_long(&bufspace, diff); 500 bp->b_bufsize = bufsize; 501 } 502 503 /* 504 * bufmallocadjust: 505 * 506 * Adjust the reported bufspace for a malloc managed buffer, possibly 507 * waking any waiters. 508 */ 509 static void 510 bufmallocadjust(struct buf *bp, int bufsize) 511 { 512 int diff; 513 514 KASSERT((bp->b_flags & B_MALLOC) != 0, 515 ("bufmallocadjust: non-malloc buf %p", bp)); 516 diff = bufsize - bp->b_bufsize; 517 if (diff < 0) { 518 atomic_subtract_long(&bufmallocspace, -diff); 519 bufspacewakeup(); 520 } else 521 atomic_add_long(&bufmallocspace, diff); 522 bp->b_bufsize = bufsize; 523 } 524 525 /* 526 * runningwakeup: 527 * 528 * Wake up processes that are waiting on asynchronous writes to fall 529 * below lorunningspace. 530 */ 531 static void 532 runningwakeup(void) 533 { 534 535 mtx_lock(&rbreqlock); 536 if (runningbufreq) { 537 runningbufreq = 0; 538 wakeup(&runningbufreq); 539 } 540 mtx_unlock(&rbreqlock); 541 } 542 543 /* 544 * runningbufwakeup: 545 * 546 * Decrement the outstanding write count according. 547 */ 548 void 549 runningbufwakeup(struct buf *bp) 550 { 551 long space, bspace; 552 553 bspace = bp->b_runningbufspace; 554 if (bspace == 0) 555 return; 556 space = atomic_fetchadd_long(&runningbufspace, -bspace); 557 KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld", 558 space, bspace)); 559 bp->b_runningbufspace = 0; 560 /* 561 * Only acquire the lock and wakeup on the transition from exceeding 562 * the threshold to falling below it. 563 */ 564 if (space < lorunningspace) 565 return; 566 if (space - bspace > lorunningspace) 567 return; 568 runningwakeup(); 569 } 570 571 /* 572 * bufcountadd: 573 * 574 * Called when a buffer has been added to one of the free queues to 575 * account for the buffer and to wakeup anyone waiting for free buffers. 576 * This typically occurs when large amounts of metadata are being handled 577 * by the buffer cache ( else buffer space runs out first, usually ). 578 */ 579 static __inline void 580 bufcountadd(struct buf *bp) 581 { 582 int mask, need_wakeup, old, on; 583 584 KASSERT((bp->b_flags & B_INFREECNT) == 0, 585 ("buf %p already counted as free", bp)); 586 bp->b_flags |= B_INFREECNT; 587 old = atomic_fetchadd_int(&numfreebuffers, 1); 588 KASSERT(old >= 0 && old < nbuf, 589 ("numfreebuffers climbed to %d", old + 1)); 590 mask = VFS_BIO_NEED_ANY; 591 if (numfreebuffers >= hifreebuffers) 592 mask |= VFS_BIO_NEED_FREE; 593 rw_rlock(&nblock); 594 for (;;) { 595 need_wakeup = 0; 596 on = needsbuffer; 597 if (on == 0) 598 break; 599 need_wakeup = 1; 600 if (atomic_cmpset_rel_int(&needsbuffer, on, on & ~mask)) 601 break; 602 } 603 if (need_wakeup) 604 wakeup(__DEVOLATILE(void *, &needsbuffer)); 605 rw_runlock(&nblock); 606 } 607 608 /* 609 * bufcountsub: 610 * 611 * Decrement the numfreebuffers count as needed. 612 */ 613 static void 614 bufcountsub(struct buf *bp) 615 { 616 int old; 617 618 /* 619 * Fixup numfreebuffers count. If the buffer is invalid or not 620 * delayed-write, the buffer was free and we must decrement 621 * numfreebuffers. 622 */ 623 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 624 KASSERT((bp->b_flags & B_INFREECNT) != 0, 625 ("buf %p not counted in numfreebuffers", bp)); 626 bp->b_flags &= ~B_INFREECNT; 627 old = atomic_fetchadd_int(&numfreebuffers, -1); 628 KASSERT(old > 0, ("numfreebuffers dropped to %d", old - 1)); 629 } 630 } 631 632 /* 633 * waitrunningbufspace() 634 * 635 * runningbufspace is a measure of the amount of I/O currently 636 * running. This routine is used in async-write situations to 637 * prevent creating huge backups of pending writes to a device. 638 * Only asynchronous writes are governed by this function. 639 * 640 * This does NOT turn an async write into a sync write. It waits 641 * for earlier writes to complete and generally returns before the 642 * caller's write has reached the device. 643 */ 644 void 645 waitrunningbufspace(void) 646 { 647 648 mtx_lock(&rbreqlock); 649 while (runningbufspace > hirunningspace) { 650 runningbufreq = 1; 651 msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0); 652 } 653 mtx_unlock(&rbreqlock); 654 } 655 656 657 /* 658 * vfs_buf_test_cache: 659 * 660 * Called when a buffer is extended. This function clears the B_CACHE 661 * bit if the newly extended portion of the buffer does not contain 662 * valid data. 663 */ 664 static __inline 665 void 666 vfs_buf_test_cache(struct buf *bp, 667 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 668 vm_page_t m) 669 { 670 671 VM_OBJECT_ASSERT_LOCKED(m->object); 672 if (bp->b_flags & B_CACHE) { 673 int base = (foff + off) & PAGE_MASK; 674 if (vm_page_is_valid(m, base, size) == 0) 675 bp->b_flags &= ~B_CACHE; 676 } 677 } 678 679 /* Wake up the buffer daemon if necessary */ 680 static __inline void 681 bd_wakeup(void) 682 { 683 684 mtx_lock(&bdlock); 685 if (bd_request == 0) { 686 bd_request = 1; 687 wakeup(&bd_request); 688 } 689 mtx_unlock(&bdlock); 690 } 691 692 /* 693 * bd_speedup - speedup the buffer cache flushing code 694 */ 695 void 696 bd_speedup(void) 697 { 698 int needwake; 699 700 mtx_lock(&bdlock); 701 needwake = 0; 702 if (bd_speedupreq == 0 || bd_request == 0) 703 needwake = 1; 704 bd_speedupreq = 1; 705 bd_request = 1; 706 if (needwake) 707 wakeup(&bd_request); 708 mtx_unlock(&bdlock); 709 } 710 711 #ifndef NSWBUF_MIN 712 #define NSWBUF_MIN 16 713 #endif 714 715 #ifdef __i386__ 716 #define TRANSIENT_DENOM 5 717 #else 718 #define TRANSIENT_DENOM 10 719 #endif 720 721 /* 722 * Calculating buffer cache scaling values and reserve space for buffer 723 * headers. This is called during low level kernel initialization and 724 * may be called more then once. We CANNOT write to the memory area 725 * being reserved at this time. 726 */ 727 caddr_t 728 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) 729 { 730 int tuned_nbuf; 731 long maxbuf, maxbuf_sz, buf_sz, biotmap_sz; 732 733 /* 734 * physmem_est is in pages. Convert it to kilobytes (assumes 735 * PAGE_SIZE is >= 1K) 736 */ 737 physmem_est = physmem_est * (PAGE_SIZE / 1024); 738 739 /* 740 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 741 * For the first 64MB of ram nominally allocate sufficient buffers to 742 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 743 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing 744 * the buffer cache we limit the eventual kva reservation to 745 * maxbcache bytes. 746 * 747 * factor represents the 1/4 x ram conversion. 748 */ 749 if (nbuf == 0) { 750 int factor = 4 * BKVASIZE / 1024; 751 752 nbuf = 50; 753 if (physmem_est > 4096) 754 nbuf += min((physmem_est - 4096) / factor, 755 65536 / factor); 756 if (physmem_est > 65536) 757 nbuf += min((physmem_est - 65536) * 2 / (factor * 5), 758 32 * 1024 * 1024 / (factor * 5)); 759 760 if (maxbcache && nbuf > maxbcache / BKVASIZE) 761 nbuf = maxbcache / BKVASIZE; 762 tuned_nbuf = 1; 763 } else 764 tuned_nbuf = 0; 765 766 /* XXX Avoid unsigned long overflows later on with maxbufspace. */ 767 maxbuf = (LONG_MAX / 3) / BKVASIZE; 768 if (nbuf > maxbuf) { 769 if (!tuned_nbuf) 770 printf("Warning: nbufs lowered from %d to %ld\n", nbuf, 771 maxbuf); 772 nbuf = maxbuf; 773 } 774 775 /* 776 * Ideal allocation size for the transient bio submap is 10% 777 * of the maximal space buffer map. This roughly corresponds 778 * to the amount of the buffer mapped for typical UFS load. 779 * 780 * Clip the buffer map to reserve space for the transient 781 * BIOs, if its extent is bigger than 90% (80% on i386) of the 782 * maximum buffer map extent on the platform. 783 * 784 * The fall-back to the maxbuf in case of maxbcache unset, 785 * allows to not trim the buffer KVA for the architectures 786 * with ample KVA space. 787 */ 788 if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { 789 maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; 790 buf_sz = (long)nbuf * BKVASIZE; 791 if (buf_sz < maxbuf_sz / TRANSIENT_DENOM * 792 (TRANSIENT_DENOM - 1)) { 793 /* 794 * There is more KVA than memory. Do not 795 * adjust buffer map size, and assign the rest 796 * of maxbuf to transient map. 797 */ 798 biotmap_sz = maxbuf_sz - buf_sz; 799 } else { 800 /* 801 * Buffer map spans all KVA we could afford on 802 * this platform. Give 10% (20% on i386) of 803 * the buffer map to the transient bio map. 804 */ 805 biotmap_sz = buf_sz / TRANSIENT_DENOM; 806 buf_sz -= biotmap_sz; 807 } 808 if (biotmap_sz / INT_MAX > MAXPHYS) 809 bio_transient_maxcnt = INT_MAX; 810 else 811 bio_transient_maxcnt = biotmap_sz / MAXPHYS; 812 /* 813 * Artifically limit to 1024 simultaneous in-flight I/Os 814 * using the transient mapping. 815 */ 816 if (bio_transient_maxcnt > 1024) 817 bio_transient_maxcnt = 1024; 818 if (tuned_nbuf) 819 nbuf = buf_sz / BKVASIZE; 820 } 821 822 /* 823 * swbufs are used as temporary holders for I/O, such as paging I/O. 824 * We have no less then 16 and no more then 256. 825 */ 826 nswbuf = min(nbuf / 4, 256); 827 TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf); 828 if (nswbuf < NSWBUF_MIN) 829 nswbuf = NSWBUF_MIN; 830 831 /* 832 * Reserve space for the buffer cache buffers 833 */ 834 swbuf = (void *)v; 835 v = (caddr_t)(swbuf + nswbuf); 836 buf = (void *)v; 837 v = (caddr_t)(buf + nbuf); 838 839 return(v); 840 } 841 842 /* Initialize the buffer subsystem. Called before use of any buffers. */ 843 void 844 bufinit(void) 845 { 846 struct buf *bp; 847 int i; 848 849 CTASSERT(MAXBCACHEBUF >= MAXBSIZE); 850 mtx_init(&bqclean, "bufq clean lock", NULL, MTX_DEF); 851 mtx_init(&bqdirty, "bufq dirty lock", NULL, MTX_DEF); 852 mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF); 853 rw_init(&nblock, "needsbuffer lock"); 854 mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); 855 mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); 856 857 /* next, make a null set of free lists */ 858 for (i = 0; i < BUFFER_QUEUES; i++) 859 TAILQ_INIT(&bufqueues[i]); 860 861 unmapped_buf = (caddr_t)kva_alloc(MAXPHYS); 862 863 /* finally, initialize each buffer header and stick on empty q */ 864 for (i = 0; i < nbuf; i++) { 865 bp = &buf[i]; 866 bzero(bp, sizeof *bp); 867 bp->b_flags = B_INVAL | B_INFREECNT; 868 bp->b_rcred = NOCRED; 869 bp->b_wcred = NOCRED; 870 bp->b_qindex = QUEUE_EMPTY; 871 bp->b_xflags = 0; 872 bp->b_data = bp->b_kvabase = unmapped_buf; 873 LIST_INIT(&bp->b_dep); 874 BUF_LOCKINIT(bp); 875 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 876 #ifdef INVARIANTS 877 bq_len[QUEUE_EMPTY]++; 878 #endif 879 } 880 881 /* 882 * maxbufspace is the absolute maximum amount of buffer space we are 883 * allowed to reserve in KVM and in real terms. The absolute maximum 884 * is nominally used by buf_daemon. hibufspace is the nominal maximum 885 * used by most other processes. The differential is required to 886 * ensure that buf_daemon is able to run when other processes might 887 * be blocked waiting for buffer space. 888 * 889 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 890 * this may result in KVM fragmentation which is not handled optimally 891 * by the system. 892 */ 893 maxbufspace = (long)nbuf * BKVASIZE; 894 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10); 895 lobufspace = hibufspace - MAXBCACHEBUF; 896 897 /* 898 * Note: The 16 MiB upper limit for hirunningspace was chosen 899 * arbitrarily and may need further tuning. It corresponds to 900 * 128 outstanding write IO requests (if IO size is 128 KiB), 901 * which fits with many RAID controllers' tagged queuing limits. 902 * The lower 1 MiB limit is the historical upper limit for 903 * hirunningspace. 904 */ 905 hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF), 906 16 * 1024 * 1024), 1024 * 1024); 907 lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF); 908 909 /* 910 * Limit the amount of malloc memory since it is wired permanently into 911 * the kernel space. Even though this is accounted for in the buffer 912 * allocation, we don't want the malloced region to grow uncontrolled. 913 * The malloc scheme improves memory utilization significantly on average 914 * (small) directories. 915 */ 916 maxbufmallocspace = hibufspace / 20; 917 918 /* 919 * Reduce the chance of a deadlock occuring by limiting the number 920 * of delayed-write dirty buffers we allow to stack up. 921 */ 922 hidirtybuffers = nbuf / 4 + 20; 923 dirtybufthresh = hidirtybuffers * 9 / 10; 924 numdirtybuffers = 0; 925 /* 926 * To support extreme low-memory systems, make sure hidirtybuffers cannot 927 * eat up all available buffer space. This occurs when our minimum cannot 928 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 929 * BKVASIZE'd buffers. 930 */ 931 while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 932 hidirtybuffers >>= 1; 933 } 934 lodirtybuffers = hidirtybuffers / 2; 935 936 /* 937 * Try to keep the number of free buffers in the specified range, 938 * and give special processes (e.g. like buf_daemon) access to an 939 * emergency reserve. 940 */ 941 lofreebuffers = nbuf / 18 + 5; 942 hifreebuffers = 2 * lofreebuffers; 943 numfreebuffers = nbuf; 944 945 bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | 946 VM_ALLOC_NORMAL | VM_ALLOC_WIRED); 947 } 948 949 #ifdef INVARIANTS 950 static inline void 951 vfs_buf_check_mapped(struct buf *bp) 952 { 953 954 KASSERT(bp->b_kvabase != unmapped_buf, 955 ("mapped buf: b_kvabase was not updated %p", bp)); 956 KASSERT(bp->b_data != unmapped_buf, 957 ("mapped buf: b_data was not updated %p", bp)); 958 KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf + 959 MAXPHYS, ("b_data + b_offset unmapped %p", bp)); 960 } 961 962 static inline void 963 vfs_buf_check_unmapped(struct buf *bp) 964 { 965 966 KASSERT(bp->b_data == unmapped_buf, 967 ("unmapped buf: corrupted b_data %p", bp)); 968 } 969 970 #define BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp) 971 #define BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp) 972 #else 973 #define BUF_CHECK_MAPPED(bp) do {} while (0) 974 #define BUF_CHECK_UNMAPPED(bp) do {} while (0) 975 #endif 976 977 static int 978 isbufbusy(struct buf *bp) 979 { 980 if (((bp->b_flags & (B_INVAL | B_PERSISTENT)) == 0 && 981 BUF_ISLOCKED(bp)) || 982 ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI)) 983 return (1); 984 return (0); 985 } 986 987 /* 988 * Shutdown the system cleanly to prepare for reboot, halt, or power off. 989 */ 990 void 991 bufshutdown(int show_busybufs) 992 { 993 static int first_buf_printf = 1; 994 struct buf *bp; 995 int iter, nbusy, pbusy; 996 #ifndef PREEMPTION 997 int subiter; 998 #endif 999 1000 /* 1001 * Sync filesystems for shutdown 1002 */ 1003 wdog_kern_pat(WD_LASTVAL); 1004 sys_sync(curthread, NULL); 1005 1006 /* 1007 * With soft updates, some buffers that are 1008 * written will be remarked as dirty until other 1009 * buffers are written. 1010 */ 1011 for (iter = pbusy = 0; iter < 20; iter++) { 1012 nbusy = 0; 1013 for (bp = &buf[nbuf]; --bp >= buf; ) 1014 if (isbufbusy(bp)) 1015 nbusy++; 1016 if (nbusy == 0) { 1017 if (first_buf_printf) 1018 printf("All buffers synced."); 1019 break; 1020 } 1021 if (first_buf_printf) { 1022 printf("Syncing disks, buffers remaining... "); 1023 first_buf_printf = 0; 1024 } 1025 printf("%d ", nbusy); 1026 if (nbusy < pbusy) 1027 iter = 0; 1028 pbusy = nbusy; 1029 1030 wdog_kern_pat(WD_LASTVAL); 1031 sys_sync(curthread, NULL); 1032 1033 #ifdef PREEMPTION 1034 /* 1035 * Drop Giant and spin for a while to allow 1036 * interrupt threads to run. 1037 */ 1038 DROP_GIANT(); 1039 DELAY(50000 * iter); 1040 PICKUP_GIANT(); 1041 #else 1042 /* 1043 * Drop Giant and context switch several times to 1044 * allow interrupt threads to run. 1045 */ 1046 DROP_GIANT(); 1047 for (subiter = 0; subiter < 50 * iter; subiter++) { 1048 thread_lock(curthread); 1049 mi_switch(SW_VOL, NULL); 1050 thread_unlock(curthread); 1051 DELAY(1000); 1052 } 1053 PICKUP_GIANT(); 1054 #endif 1055 } 1056 printf("\n"); 1057 /* 1058 * Count only busy local buffers to prevent forcing 1059 * a fsck if we're just a client of a wedged NFS server 1060 */ 1061 nbusy = 0; 1062 for (bp = &buf[nbuf]; --bp >= buf; ) { 1063 if (isbufbusy(bp)) { 1064 #if 0 1065 /* XXX: This is bogus. We should probably have a BO_REMOTE flag instead */ 1066 if (bp->b_dev == NULL) { 1067 TAILQ_REMOVE(&mountlist, 1068 bp->b_vp->v_mount, mnt_list); 1069 continue; 1070 } 1071 #endif 1072 nbusy++; 1073 if (show_busybufs > 0) { 1074 printf( 1075 "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:", 1076 nbusy, bp, bp->b_vp, bp->b_flags, 1077 (intmax_t)bp->b_blkno, 1078 (intmax_t)bp->b_lblkno); 1079 BUF_LOCKPRINTINFO(bp); 1080 if (show_busybufs > 1) 1081 vn_printf(bp->b_vp, 1082 "vnode content: "); 1083 } 1084 } 1085 } 1086 if (nbusy) { 1087 /* 1088 * Failed to sync all blocks. Indicate this and don't 1089 * unmount filesystems (thus forcing an fsck on reboot). 1090 */ 1091 printf("Giving up on %d buffers\n", nbusy); 1092 DELAY(5000000); /* 5 seconds */ 1093 } else { 1094 if (!first_buf_printf) 1095 printf("Final sync complete\n"); 1096 /* 1097 * Unmount filesystems 1098 */ 1099 if (panicstr == 0) 1100 vfs_unmountall(); 1101 } 1102 swapoff_all(); 1103 DELAY(100000); /* wait for console output to finish */ 1104 } 1105 1106 static void 1107 bpmap_qenter(struct buf *bp) 1108 { 1109 1110 BUF_CHECK_MAPPED(bp); 1111 1112 /* 1113 * bp->b_data is relative to bp->b_offset, but 1114 * bp->b_offset may be offset into the first page. 1115 */ 1116 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 1117 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1118 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 1119 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 1120 } 1121 1122 /* 1123 * binsfree: 1124 * 1125 * Insert the buffer into the appropriate free list. 1126 */ 1127 static void 1128 binsfree(struct buf *bp, int qindex) 1129 { 1130 struct mtx *olock, *nlock; 1131 1132 BUF_ASSERT_XLOCKED(bp); 1133 1134 nlock = bqlock(qindex); 1135 /* Handle delayed bremfree() processing. */ 1136 if (bp->b_flags & B_REMFREE) { 1137 olock = bqlock(bp->b_qindex); 1138 mtx_lock(olock); 1139 bremfreel(bp); 1140 if (olock != nlock) { 1141 mtx_unlock(olock); 1142 mtx_lock(nlock); 1143 } 1144 } else 1145 mtx_lock(nlock); 1146 1147 if (bp->b_qindex != QUEUE_NONE) 1148 panic("binsfree: free buffer onto another queue???"); 1149 1150 bp->b_qindex = qindex; 1151 if (bp->b_flags & B_AGE) 1152 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1153 else 1154 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 1155 #ifdef INVARIANTS 1156 bq_len[bp->b_qindex]++; 1157 #endif 1158 mtx_unlock(nlock); 1159 1160 /* 1161 * Something we can maybe free or reuse. 1162 */ 1163 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1164 bufspacewakeup(); 1165 1166 if ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI)) 1167 bufcountadd(bp); 1168 } 1169 1170 /* 1171 * bremfree: 1172 * 1173 * Mark the buffer for removal from the appropriate free list. 1174 * 1175 */ 1176 void 1177 bremfree(struct buf *bp) 1178 { 1179 1180 CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1181 KASSERT((bp->b_flags & B_REMFREE) == 0, 1182 ("bremfree: buffer %p already marked for delayed removal.", bp)); 1183 KASSERT(bp->b_qindex != QUEUE_NONE, 1184 ("bremfree: buffer %p not on a queue.", bp)); 1185 BUF_ASSERT_XLOCKED(bp); 1186 1187 bp->b_flags |= B_REMFREE; 1188 bufcountsub(bp); 1189 } 1190 1191 /* 1192 * bremfreef: 1193 * 1194 * Force an immediate removal from a free list. Used only in nfs when 1195 * it abuses the b_freelist pointer. 1196 */ 1197 void 1198 bremfreef(struct buf *bp) 1199 { 1200 struct mtx *qlock; 1201 1202 qlock = bqlock(bp->b_qindex); 1203 mtx_lock(qlock); 1204 bremfreel(bp); 1205 mtx_unlock(qlock); 1206 } 1207 1208 /* 1209 * bremfreel: 1210 * 1211 * Removes a buffer from the free list, must be called with the 1212 * correct qlock held. 1213 */ 1214 static void 1215 bremfreel(struct buf *bp) 1216 { 1217 1218 CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", 1219 bp, bp->b_vp, bp->b_flags); 1220 KASSERT(bp->b_qindex != QUEUE_NONE, 1221 ("bremfreel: buffer %p not on a queue.", bp)); 1222 BUF_ASSERT_XLOCKED(bp); 1223 mtx_assert(bqlock(bp->b_qindex), MA_OWNED); 1224 1225 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 1226 #ifdef INVARIANTS 1227 KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow", 1228 bp->b_qindex)); 1229 bq_len[bp->b_qindex]--; 1230 #endif 1231 bp->b_qindex = QUEUE_NONE; 1232 /* 1233 * If this was a delayed bremfree() we only need to remove the buffer 1234 * from the queue and return the stats are already done. 1235 */ 1236 if (bp->b_flags & B_REMFREE) { 1237 bp->b_flags &= ~B_REMFREE; 1238 return; 1239 } 1240 bufcountsub(bp); 1241 } 1242 1243 /* 1244 * bufkvafree: 1245 * 1246 * Free the kva allocation for a buffer. 1247 * 1248 */ 1249 static void 1250 bufkvafree(struct buf *bp) 1251 { 1252 1253 #ifdef INVARIANTS 1254 if (bp->b_kvasize == 0) { 1255 KASSERT(bp->b_kvabase == unmapped_buf && 1256 bp->b_data == unmapped_buf, 1257 ("Leaked KVA space on %p", bp)); 1258 } else if (buf_mapped(bp)) 1259 BUF_CHECK_MAPPED(bp); 1260 else 1261 BUF_CHECK_UNMAPPED(bp); 1262 #endif 1263 if (bp->b_kvasize == 0) 1264 return; 1265 1266 vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize); 1267 atomic_subtract_long(&bufkvaspace, bp->b_kvasize); 1268 atomic_add_int(&buffreekvacnt, 1); 1269 bp->b_data = bp->b_kvabase = unmapped_buf; 1270 bp->b_kvasize = 0; 1271 } 1272 1273 /* 1274 * bufkvaalloc: 1275 * 1276 * Allocate the buffer KVA and set b_kvasize and b_kvabase. 1277 */ 1278 static int 1279 bufkvaalloc(struct buf *bp, int maxsize, int gbflags) 1280 { 1281 vm_offset_t addr; 1282 int error; 1283 1284 KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0, 1285 ("Invalid gbflags 0x%x in %s", gbflags, __func__)); 1286 1287 bufkvafree(bp); 1288 1289 addr = 0; 1290 error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr); 1291 if (error != 0) { 1292 /* 1293 * Buffer map is too fragmented. Request the caller 1294 * to defragment the map. 1295 */ 1296 atomic_add_int(&bufdefragcnt, 1); 1297 return (error); 1298 } 1299 bp->b_kvabase = (caddr_t)addr; 1300 bp->b_kvasize = maxsize; 1301 atomic_add_long(&bufkvaspace, bp->b_kvasize); 1302 if ((gbflags & GB_UNMAPPED) != 0) { 1303 bp->b_data = unmapped_buf; 1304 BUF_CHECK_UNMAPPED(bp); 1305 } else { 1306 bp->b_data = bp->b_kvabase; 1307 BUF_CHECK_MAPPED(bp); 1308 } 1309 return (0); 1310 } 1311 1312 /* 1313 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must 1314 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set, 1315 * the buffer is valid and we do not have to do anything. 1316 */ 1317 void 1318 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, 1319 int cnt, struct ucred * cred) 1320 { 1321 struct buf *rabp; 1322 int i; 1323 1324 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 1325 if (inmem(vp, *rablkno)) 1326 continue; 1327 rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0); 1328 1329 if ((rabp->b_flags & B_CACHE) == 0) { 1330 if (!TD_IS_IDLETHREAD(curthread)) 1331 curthread->td_ru.ru_inblock++; 1332 rabp->b_flags |= B_ASYNC; 1333 rabp->b_flags &= ~B_INVAL; 1334 rabp->b_ioflags &= ~BIO_ERROR; 1335 rabp->b_iocmd = BIO_READ; 1336 if (rabp->b_rcred == NOCRED && cred != NOCRED) 1337 rabp->b_rcred = crhold(cred); 1338 vfs_busy_pages(rabp, 0); 1339 BUF_KERNPROC(rabp); 1340 rabp->b_iooffset = dbtob(rabp->b_blkno); 1341 bstrategy(rabp); 1342 } else { 1343 brelse(rabp); 1344 } 1345 } 1346 } 1347 1348 /* 1349 * Entry point for bread() and breadn() via #defines in sys/buf.h. 1350 * 1351 * Get a buffer with the specified data. Look in the cache first. We 1352 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 1353 * is set, the buffer is valid and we do not have to do anything, see 1354 * getblk(). Also starts asynchronous I/O on read-ahead blocks. 1355 */ 1356 int 1357 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno, 1358 int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp) 1359 { 1360 struct buf *bp; 1361 int rv = 0, readwait = 0; 1362 1363 CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size); 1364 /* 1365 * Can only return NULL if GB_LOCK_NOWAIT flag is specified. 1366 */ 1367 *bpp = bp = getblk(vp, blkno, size, 0, 0, flags); 1368 if (bp == NULL) 1369 return (EBUSY); 1370 1371 /* if not found in cache, do some I/O */ 1372 if ((bp->b_flags & B_CACHE) == 0) { 1373 if (!TD_IS_IDLETHREAD(curthread)) 1374 curthread->td_ru.ru_inblock++; 1375 bp->b_iocmd = BIO_READ; 1376 bp->b_flags &= ~B_INVAL; 1377 bp->b_ioflags &= ~BIO_ERROR; 1378 if (bp->b_rcred == NOCRED && cred != NOCRED) 1379 bp->b_rcred = crhold(cred); 1380 vfs_busy_pages(bp, 0); 1381 bp->b_iooffset = dbtob(bp->b_blkno); 1382 bstrategy(bp); 1383 ++readwait; 1384 } 1385 1386 breada(vp, rablkno, rabsize, cnt, cred); 1387 1388 if (readwait) { 1389 rv = bufwait(bp); 1390 } 1391 return (rv); 1392 } 1393 1394 /* 1395 * Write, release buffer on completion. (Done by iodone 1396 * if async). Do not bother writing anything if the buffer 1397 * is invalid. 1398 * 1399 * Note that we set B_CACHE here, indicating that buffer is 1400 * fully valid and thus cacheable. This is true even of NFS 1401 * now so we set it generally. This could be set either here 1402 * or in biodone() since the I/O is synchronous. We put it 1403 * here. 1404 */ 1405 int 1406 bufwrite(struct buf *bp) 1407 { 1408 int oldflags; 1409 struct vnode *vp; 1410 long space; 1411 int vp_md; 1412 1413 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1414 if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) { 1415 bp->b_flags |= B_INVAL | B_RELBUF; 1416 bp->b_flags &= ~B_CACHE; 1417 brelse(bp); 1418 return (ENXIO); 1419 } 1420 if (bp->b_flags & B_INVAL) { 1421 brelse(bp); 1422 return (0); 1423 } 1424 1425 if (bp->b_flags & B_BARRIER) 1426 barrierwrites++; 1427 1428 oldflags = bp->b_flags; 1429 1430 BUF_ASSERT_HELD(bp); 1431 1432 if (bp->b_pin_count > 0) 1433 bunpin_wait(bp); 1434 1435 KASSERT(!(bp->b_vflags & BV_BKGRDINPROG), 1436 ("FFS background buffer should not get here %p", bp)); 1437 1438 vp = bp->b_vp; 1439 if (vp) 1440 vp_md = vp->v_vflag & VV_MD; 1441 else 1442 vp_md = 0; 1443 1444 /* 1445 * Mark the buffer clean. Increment the bufobj write count 1446 * before bundirty() call, to prevent other thread from seeing 1447 * empty dirty list and zero counter for writes in progress, 1448 * falsely indicating that the bufobj is clean. 1449 */ 1450 bufobj_wref(bp->b_bufobj); 1451 bundirty(bp); 1452 1453 bp->b_flags &= ~B_DONE; 1454 bp->b_ioflags &= ~BIO_ERROR; 1455 bp->b_flags |= B_CACHE; 1456 bp->b_iocmd = BIO_WRITE; 1457 1458 vfs_busy_pages(bp, 1); 1459 1460 /* 1461 * Normal bwrites pipeline writes 1462 */ 1463 bp->b_runningbufspace = bp->b_bufsize; 1464 space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace); 1465 1466 if (!TD_IS_IDLETHREAD(curthread)) 1467 curthread->td_ru.ru_oublock++; 1468 if (oldflags & B_ASYNC) 1469 BUF_KERNPROC(bp); 1470 bp->b_iooffset = dbtob(bp->b_blkno); 1471 bstrategy(bp); 1472 1473 if ((oldflags & B_ASYNC) == 0) { 1474 int rtval = bufwait(bp); 1475 brelse(bp); 1476 return (rtval); 1477 } else if (space > hirunningspace) { 1478 /* 1479 * don't allow the async write to saturate the I/O 1480 * system. We will not deadlock here because 1481 * we are blocking waiting for I/O that is already in-progress 1482 * to complete. We do not block here if it is the update 1483 * or syncer daemon trying to clean up as that can lead 1484 * to deadlock. 1485 */ 1486 if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md) 1487 waitrunningbufspace(); 1488 } 1489 1490 return (0); 1491 } 1492 1493 void 1494 bufbdflush(struct bufobj *bo, struct buf *bp) 1495 { 1496 struct buf *nbp; 1497 1498 if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) { 1499 (void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread); 1500 altbufferflushes++; 1501 } else if (bo->bo_dirty.bv_cnt > dirtybufthresh) { 1502 BO_LOCK(bo); 1503 /* 1504 * Try to find a buffer to flush. 1505 */ 1506 TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) { 1507 if ((nbp->b_vflags & BV_BKGRDINPROG) || 1508 BUF_LOCK(nbp, 1509 LK_EXCLUSIVE | LK_NOWAIT, NULL)) 1510 continue; 1511 if (bp == nbp) 1512 panic("bdwrite: found ourselves"); 1513 BO_UNLOCK(bo); 1514 /* Don't countdeps with the bo lock held. */ 1515 if (buf_countdeps(nbp, 0)) { 1516 BO_LOCK(bo); 1517 BUF_UNLOCK(nbp); 1518 continue; 1519 } 1520 if (nbp->b_flags & B_CLUSTEROK) { 1521 vfs_bio_awrite(nbp); 1522 } else { 1523 bremfree(nbp); 1524 bawrite(nbp); 1525 } 1526 dirtybufferflushes++; 1527 break; 1528 } 1529 if (nbp == NULL) 1530 BO_UNLOCK(bo); 1531 } 1532 } 1533 1534 /* 1535 * Delayed write. (Buffer is marked dirty). Do not bother writing 1536 * anything if the buffer is marked invalid. 1537 * 1538 * Note that since the buffer must be completely valid, we can safely 1539 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1540 * biodone() in order to prevent getblk from writing the buffer 1541 * out synchronously. 1542 */ 1543 void 1544 bdwrite(struct buf *bp) 1545 { 1546 struct thread *td = curthread; 1547 struct vnode *vp; 1548 struct bufobj *bo; 1549 1550 CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1551 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1552 KASSERT((bp->b_flags & B_BARRIER) == 0, 1553 ("Barrier request in delayed write %p", bp)); 1554 BUF_ASSERT_HELD(bp); 1555 1556 if (bp->b_flags & B_INVAL) { 1557 brelse(bp); 1558 return; 1559 } 1560 1561 /* 1562 * If we have too many dirty buffers, don't create any more. 1563 * If we are wildly over our limit, then force a complete 1564 * cleanup. Otherwise, just keep the situation from getting 1565 * out of control. Note that we have to avoid a recursive 1566 * disaster and not try to clean up after our own cleanup! 1567 */ 1568 vp = bp->b_vp; 1569 bo = bp->b_bufobj; 1570 if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) { 1571 td->td_pflags |= TDP_INBDFLUSH; 1572 BO_BDFLUSH(bo, bp); 1573 td->td_pflags &= ~TDP_INBDFLUSH; 1574 } else 1575 recursiveflushes++; 1576 1577 bdirty(bp); 1578 /* 1579 * Set B_CACHE, indicating that the buffer is fully valid. This is 1580 * true even of NFS now. 1581 */ 1582 bp->b_flags |= B_CACHE; 1583 1584 /* 1585 * This bmap keeps the system from needing to do the bmap later, 1586 * perhaps when the system is attempting to do a sync. Since it 1587 * is likely that the indirect block -- or whatever other datastructure 1588 * that the filesystem needs is still in memory now, it is a good 1589 * thing to do this. Note also, that if the pageout daemon is 1590 * requesting a sync -- there might not be enough memory to do 1591 * the bmap then... So, this is important to do. 1592 */ 1593 if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) { 1594 VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 1595 } 1596 1597 /* 1598 * Set the *dirty* buffer range based upon the VM system dirty 1599 * pages. 1600 * 1601 * Mark the buffer pages as clean. We need to do this here to 1602 * satisfy the vnode_pager and the pageout daemon, so that it 1603 * thinks that the pages have been "cleaned". Note that since 1604 * the pages are in a delayed write buffer -- the VFS layer 1605 * "will" see that the pages get written out on the next sync, 1606 * or perhaps the cluster will be completed. 1607 */ 1608 vfs_clean_pages_dirty_buf(bp); 1609 bqrelse(bp); 1610 1611 /* 1612 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1613 * due to the softdep code. 1614 */ 1615 } 1616 1617 /* 1618 * bdirty: 1619 * 1620 * Turn buffer into delayed write request. We must clear BIO_READ and 1621 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 1622 * itself to properly update it in the dirty/clean lists. We mark it 1623 * B_DONE to ensure that any asynchronization of the buffer properly 1624 * clears B_DONE ( else a panic will occur later ). 1625 * 1626 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 1627 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 1628 * should only be called if the buffer is known-good. 1629 * 1630 * Since the buffer is not on a queue, we do not update the numfreebuffers 1631 * count. 1632 * 1633 * The buffer must be on QUEUE_NONE. 1634 */ 1635 void 1636 bdirty(struct buf *bp) 1637 { 1638 1639 CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", 1640 bp, bp->b_vp, bp->b_flags); 1641 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1642 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1643 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1644 BUF_ASSERT_HELD(bp); 1645 bp->b_flags &= ~(B_RELBUF); 1646 bp->b_iocmd = BIO_WRITE; 1647 1648 if ((bp->b_flags & B_DELWRI) == 0) { 1649 bp->b_flags |= /* XXX B_DONE | */ B_DELWRI; 1650 reassignbuf(bp); 1651 bdirtyadd(); 1652 } 1653 } 1654 1655 /* 1656 * bundirty: 1657 * 1658 * Clear B_DELWRI for buffer. 1659 * 1660 * Since the buffer is not on a queue, we do not update the numfreebuffers 1661 * count. 1662 * 1663 * The buffer must be on QUEUE_NONE. 1664 */ 1665 1666 void 1667 bundirty(struct buf *bp) 1668 { 1669 1670 CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1671 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1672 KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, 1673 ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1674 BUF_ASSERT_HELD(bp); 1675 1676 if (bp->b_flags & B_DELWRI) { 1677 bp->b_flags &= ~B_DELWRI; 1678 reassignbuf(bp); 1679 bdirtysub(); 1680 } 1681 /* 1682 * Since it is now being written, we can clear its deferred write flag. 1683 */ 1684 bp->b_flags &= ~B_DEFERRED; 1685 } 1686 1687 /* 1688 * bawrite: 1689 * 1690 * Asynchronous write. Start output on a buffer, but do not wait for 1691 * it to complete. The buffer is released when the output completes. 1692 * 1693 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1694 * B_INVAL buffers. Not us. 1695 */ 1696 void 1697 bawrite(struct buf *bp) 1698 { 1699 1700 bp->b_flags |= B_ASYNC; 1701 (void) bwrite(bp); 1702 } 1703 1704 /* 1705 * babarrierwrite: 1706 * 1707 * Asynchronous barrier write. Start output on a buffer, but do not 1708 * wait for it to complete. Place a write barrier after this write so 1709 * that this buffer and all buffers written before it are committed to 1710 * the disk before any buffers written after this write are committed 1711 * to the disk. The buffer is released when the output completes. 1712 */ 1713 void 1714 babarrierwrite(struct buf *bp) 1715 { 1716 1717 bp->b_flags |= B_ASYNC | B_BARRIER; 1718 (void) bwrite(bp); 1719 } 1720 1721 /* 1722 * bbarrierwrite: 1723 * 1724 * Synchronous barrier write. Start output on a buffer and wait for 1725 * it to complete. Place a write barrier after this write so that 1726 * this buffer and all buffers written before it are committed to 1727 * the disk before any buffers written after this write are committed 1728 * to the disk. The buffer is released when the output completes. 1729 */ 1730 int 1731 bbarrierwrite(struct buf *bp) 1732 { 1733 1734 bp->b_flags |= B_BARRIER; 1735 return (bwrite(bp)); 1736 } 1737 1738 /* 1739 * bwillwrite: 1740 * 1741 * Called prior to the locking of any vnodes when we are expecting to 1742 * write. We do not want to starve the buffer cache with too many 1743 * dirty buffers so we block here. By blocking prior to the locking 1744 * of any vnodes we attempt to avoid the situation where a locked vnode 1745 * prevents the various system daemons from flushing related buffers. 1746 */ 1747 void 1748 bwillwrite(void) 1749 { 1750 1751 if (numdirtybuffers >= hidirtybuffers) { 1752 mtx_lock(&bdirtylock); 1753 while (numdirtybuffers >= hidirtybuffers) { 1754 bdirtywait = 1; 1755 msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4), 1756 "flswai", 0); 1757 } 1758 mtx_unlock(&bdirtylock); 1759 } 1760 } 1761 1762 /* 1763 * Return true if we have too many dirty buffers. 1764 */ 1765 int 1766 buf_dirty_count_severe(void) 1767 { 1768 1769 return(numdirtybuffers >= hidirtybuffers); 1770 } 1771 1772 /* 1773 * brelse: 1774 * 1775 * Release a busy buffer and, if requested, free its resources. The 1776 * buffer will be stashed in the appropriate bufqueue[] allowing it 1777 * to be accessed later as a cache entity or reused for other purposes. 1778 */ 1779 void 1780 brelse(struct buf *bp) 1781 { 1782 int qindex; 1783 1784 CTR3(KTR_BUF, "brelse(%p) vp %p flags %X", 1785 bp, bp->b_vp, bp->b_flags); 1786 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1787 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1788 1789 if (BUF_LOCKRECURSED(bp)) { 1790 /* 1791 * Do not process, in particular, do not handle the 1792 * B_INVAL/B_RELBUF and do not release to free list. 1793 */ 1794 BUF_UNLOCK(bp); 1795 return; 1796 } 1797 1798 if (bp->b_flags & B_MANAGED) { 1799 bqrelse(bp); 1800 return; 1801 } 1802 1803 if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) { 1804 BO_LOCK(bp->b_bufobj); 1805 bp->b_vflags &= ~BV_BKGRDERR; 1806 BO_UNLOCK(bp->b_bufobj); 1807 bdirty(bp); 1808 } 1809 if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) && 1810 bp->b_error == EIO && !(bp->b_flags & B_INVAL)) { 1811 /* 1812 * Failed write, redirty. Must clear BIO_ERROR to prevent 1813 * pages from being scrapped. If the error is anything 1814 * other than an I/O error (EIO), assume that retrying 1815 * is futile. 1816 */ 1817 bp->b_ioflags &= ~BIO_ERROR; 1818 bdirty(bp); 1819 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) || 1820 (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) { 1821 /* 1822 * Either a failed I/O or we were asked to free or not 1823 * cache the buffer. 1824 */ 1825 bp->b_flags |= B_INVAL; 1826 if (!LIST_EMPTY(&bp->b_dep)) 1827 buf_deallocate(bp); 1828 if (bp->b_flags & B_DELWRI) 1829 bdirtysub(); 1830 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1831 if ((bp->b_flags & B_VMIO) == 0) { 1832 if (bp->b_bufsize) 1833 allocbuf(bp, 0); 1834 if (bp->b_vp) 1835 brelvp(bp); 1836 } 1837 } 1838 1839 /* 1840 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1841 * is called with B_DELWRI set, the underlying pages may wind up 1842 * getting freed causing a previous write (bdwrite()) to get 'lost' 1843 * because pages associated with a B_DELWRI bp are marked clean. 1844 * 1845 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1846 * if B_DELWRI is set. 1847 */ 1848 if (bp->b_flags & B_DELWRI) 1849 bp->b_flags &= ~B_RELBUF; 1850 1851 /* 1852 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1853 * constituted, not even NFS buffers now. Two flags effect this. If 1854 * B_INVAL, the struct buf is invalidated but the VM object is kept 1855 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1856 * 1857 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be 1858 * invalidated. BIO_ERROR cannot be set for a failed write unless the 1859 * buffer is also B_INVAL because it hits the re-dirtying code above. 1860 * 1861 * Normally we can do this whether a buffer is B_DELWRI or not. If 1862 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1863 * the commit state and we cannot afford to lose the buffer. If the 1864 * buffer has a background write in progress, we need to keep it 1865 * around to prevent it from being reconstituted and starting a second 1866 * background write. 1867 */ 1868 if ((bp->b_flags & B_VMIO) 1869 && !(bp->b_vp->v_mount != NULL && 1870 (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 && 1871 !vn_isdisk(bp->b_vp, NULL) && 1872 (bp->b_flags & B_DELWRI)) 1873 ) { 1874 1875 int i, j, resid; 1876 vm_page_t m; 1877 off_t foff; 1878 vm_pindex_t poff; 1879 vm_object_t obj; 1880 1881 obj = bp->b_bufobj->bo_object; 1882 1883 /* 1884 * Get the base offset and length of the buffer. Note that 1885 * in the VMIO case if the buffer block size is not 1886 * page-aligned then b_data pointer may not be page-aligned. 1887 * But our b_pages[] array *IS* page aligned. 1888 * 1889 * block sizes less then DEV_BSIZE (usually 512) are not 1890 * supported due to the page granularity bits (m->valid, 1891 * m->dirty, etc...). 1892 * 1893 * See man buf(9) for more information 1894 */ 1895 resid = bp->b_bufsize; 1896 foff = bp->b_offset; 1897 for (i = 0; i < bp->b_npages; i++) { 1898 int had_bogus = 0; 1899 1900 m = bp->b_pages[i]; 1901 1902 /* 1903 * If we hit a bogus page, fixup *all* the bogus pages 1904 * now. 1905 */ 1906 if (m == bogus_page) { 1907 poff = OFF_TO_IDX(bp->b_offset); 1908 had_bogus = 1; 1909 1910 VM_OBJECT_RLOCK(obj); 1911 for (j = i; j < bp->b_npages; j++) { 1912 vm_page_t mtmp; 1913 mtmp = bp->b_pages[j]; 1914 if (mtmp == bogus_page) { 1915 mtmp = vm_page_lookup(obj, poff + j); 1916 if (!mtmp) { 1917 panic("brelse: page missing\n"); 1918 } 1919 bp->b_pages[j] = mtmp; 1920 } 1921 } 1922 VM_OBJECT_RUNLOCK(obj); 1923 1924 if ((bp->b_flags & B_INVAL) == 0 && 1925 buf_mapped(bp)) { 1926 BUF_CHECK_MAPPED(bp); 1927 pmap_qenter( 1928 trunc_page((vm_offset_t)bp->b_data), 1929 bp->b_pages, bp->b_npages); 1930 } 1931 m = bp->b_pages[i]; 1932 } 1933 if ((bp->b_flags & B_NOCACHE) || 1934 (bp->b_ioflags & BIO_ERROR && 1935 bp->b_iocmd == BIO_READ)) { 1936 int poffset = foff & PAGE_MASK; 1937 int presid = resid > (PAGE_SIZE - poffset) ? 1938 (PAGE_SIZE - poffset) : resid; 1939 1940 KASSERT(presid >= 0, ("brelse: extra page")); 1941 VM_OBJECT_WLOCK(obj); 1942 while (vm_page_xbusied(m)) { 1943 vm_page_lock(m); 1944 VM_OBJECT_WUNLOCK(obj); 1945 vm_page_busy_sleep(m, "mbncsh"); 1946 VM_OBJECT_WLOCK(obj); 1947 } 1948 if (pmap_page_wired_mappings(m) == 0) 1949 vm_page_set_invalid(m, poffset, presid); 1950 VM_OBJECT_WUNLOCK(obj); 1951 if (had_bogus) 1952 printf("avoided corruption bug in bogus_page/brelse code\n"); 1953 } 1954 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1955 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1956 } 1957 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1958 vfs_vmio_release(bp); 1959 1960 } else if (bp->b_flags & B_VMIO) { 1961 1962 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1963 vfs_vmio_release(bp); 1964 } 1965 1966 } else if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0) { 1967 if (bp->b_bufsize != 0) 1968 allocbuf(bp, 0); 1969 if (bp->b_vp != NULL) 1970 brelvp(bp); 1971 } 1972 1973 /* 1974 * If the buffer has junk contents signal it and eventually 1975 * clean up B_DELWRI and diassociate the vnode so that gbincore() 1976 * doesn't find it. 1977 */ 1978 if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 || 1979 (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0) 1980 bp->b_flags |= B_INVAL; 1981 if (bp->b_flags & B_INVAL) { 1982 if (bp->b_flags & B_DELWRI) 1983 bundirty(bp); 1984 if (bp->b_vp) 1985 brelvp(bp); 1986 } 1987 1988 /* buffers with no memory */ 1989 if (bp->b_bufsize == 0) { 1990 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 1991 if (bp->b_vflags & BV_BKGRDINPROG) 1992 panic("losing buffer 1"); 1993 bufkvafree(bp); 1994 qindex = QUEUE_EMPTY; 1995 bp->b_flags |= B_AGE; 1996 /* buffers with junk contents */ 1997 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) || 1998 (bp->b_ioflags & BIO_ERROR)) { 1999 bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA); 2000 if (bp->b_vflags & BV_BKGRDINPROG) 2001 panic("losing buffer 2"); 2002 qindex = QUEUE_CLEAN; 2003 bp->b_flags |= B_AGE; 2004 /* remaining buffers */ 2005 } else if (bp->b_flags & B_DELWRI) 2006 qindex = QUEUE_DIRTY; 2007 else 2008 qindex = QUEUE_CLEAN; 2009 2010 binsfree(bp, qindex); 2011 2012 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT); 2013 if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) 2014 panic("brelse: not dirty"); 2015 /* unlock */ 2016 BUF_UNLOCK(bp); 2017 } 2018 2019 /* 2020 * Release a buffer back to the appropriate queue but do not try to free 2021 * it. The buffer is expected to be used again soon. 2022 * 2023 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 2024 * biodone() to requeue an async I/O on completion. It is also used when 2025 * known good buffers need to be requeued but we think we may need the data 2026 * again soon. 2027 * 2028 * XXX we should be able to leave the B_RELBUF hint set on completion. 2029 */ 2030 void 2031 bqrelse(struct buf *bp) 2032 { 2033 int qindex; 2034 2035 CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2036 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 2037 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 2038 2039 if (BUF_LOCKRECURSED(bp)) { 2040 /* do not release to free list */ 2041 BUF_UNLOCK(bp); 2042 return; 2043 } 2044 bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 2045 2046 if (bp->b_flags & B_MANAGED) { 2047 if (bp->b_flags & B_REMFREE) 2048 bremfreef(bp); 2049 goto out; 2050 } 2051 2052 /* buffers with stale but valid contents */ 2053 if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG | 2054 BV_BKGRDERR)) == BV_BKGRDERR) { 2055 BO_LOCK(bp->b_bufobj); 2056 bp->b_vflags &= ~BV_BKGRDERR; 2057 BO_UNLOCK(bp->b_bufobj); 2058 qindex = QUEUE_DIRTY; 2059 } else { 2060 if ((bp->b_flags & B_DELWRI) == 0 && 2061 (bp->b_xflags & BX_VNDIRTY)) 2062 panic("bqrelse: not dirty"); 2063 qindex = QUEUE_CLEAN; 2064 } 2065 binsfree(bp, qindex); 2066 2067 out: 2068 /* unlock */ 2069 BUF_UNLOCK(bp); 2070 } 2071 2072 /* Give pages used by the bp back to the VM system (where possible) */ 2073 static void 2074 vfs_vmio_release(struct buf *bp) 2075 { 2076 vm_object_t obj; 2077 vm_page_t m; 2078 int i; 2079 2080 if (buf_mapped(bp)) { 2081 BUF_CHECK_MAPPED(bp); 2082 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages); 2083 } else 2084 BUF_CHECK_UNMAPPED(bp); 2085 obj = bp->b_bufobj->bo_object; 2086 if (obj != NULL) 2087 VM_OBJECT_WLOCK(obj); 2088 for (i = 0; i < bp->b_npages; i++) { 2089 m = bp->b_pages[i]; 2090 bp->b_pages[i] = NULL; 2091 /* 2092 * In order to keep page LRU ordering consistent, put 2093 * everything on the inactive queue. 2094 */ 2095 vm_page_lock(m); 2096 vm_page_unwire(m, PQ_INACTIVE); 2097 2098 /* 2099 * Might as well free the page if we can and it has 2100 * no valid data. We also free the page if the 2101 * buffer was used for direct I/O 2102 */ 2103 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid) { 2104 if (m->wire_count == 0 && !vm_page_busied(m)) 2105 vm_page_free(m); 2106 } else if (bp->b_flags & B_DIRECT) 2107 vm_page_try_to_free(m); 2108 vm_page_unlock(m); 2109 } 2110 if (obj != NULL) 2111 VM_OBJECT_WUNLOCK(obj); 2112 2113 if (bp->b_bufsize) 2114 bufspaceadjust(bp, 0); 2115 bp->b_npages = 0; 2116 bp->b_flags &= ~B_VMIO; 2117 if (bp->b_vp) 2118 brelvp(bp); 2119 } 2120 2121 /* 2122 * Check to see if a block at a particular lbn is available for a clustered 2123 * write. 2124 */ 2125 static int 2126 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno) 2127 { 2128 struct buf *bpa; 2129 int match; 2130 2131 match = 0; 2132 2133 /* If the buf isn't in core skip it */ 2134 if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL) 2135 return (0); 2136 2137 /* If the buf is busy we don't want to wait for it */ 2138 if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2139 return (0); 2140 2141 /* Only cluster with valid clusterable delayed write buffers */ 2142 if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) != 2143 (B_DELWRI | B_CLUSTEROK)) 2144 goto done; 2145 2146 if (bpa->b_bufsize != size) 2147 goto done; 2148 2149 /* 2150 * Check to see if it is in the expected place on disk and that the 2151 * block has been mapped. 2152 */ 2153 if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno)) 2154 match = 1; 2155 done: 2156 BUF_UNLOCK(bpa); 2157 return (match); 2158 } 2159 2160 /* 2161 * vfs_bio_awrite: 2162 * 2163 * Implement clustered async writes for clearing out B_DELWRI buffers. 2164 * This is much better then the old way of writing only one buffer at 2165 * a time. Note that we may not be presented with the buffers in the 2166 * correct order, so we search for the cluster in both directions. 2167 */ 2168 int 2169 vfs_bio_awrite(struct buf *bp) 2170 { 2171 struct bufobj *bo; 2172 int i; 2173 int j; 2174 daddr_t lblkno = bp->b_lblkno; 2175 struct vnode *vp = bp->b_vp; 2176 int ncl; 2177 int nwritten; 2178 int size; 2179 int maxcl; 2180 int gbflags; 2181 2182 bo = &vp->v_bufobj; 2183 gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0; 2184 /* 2185 * right now we support clustered writing only to regular files. If 2186 * we find a clusterable block we could be in the middle of a cluster 2187 * rather then at the beginning. 2188 */ 2189 if ((vp->v_type == VREG) && 2190 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 2191 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 2192 2193 size = vp->v_mount->mnt_stat.f_iosize; 2194 maxcl = MAXPHYS / size; 2195 2196 BO_RLOCK(bo); 2197 for (i = 1; i < maxcl; i++) 2198 if (vfs_bio_clcheck(vp, size, lblkno + i, 2199 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0) 2200 break; 2201 2202 for (j = 1; i + j <= maxcl && j <= lblkno; j++) 2203 if (vfs_bio_clcheck(vp, size, lblkno - j, 2204 bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0) 2205 break; 2206 BO_RUNLOCK(bo); 2207 --j; 2208 ncl = i + j; 2209 /* 2210 * this is a possible cluster write 2211 */ 2212 if (ncl != 1) { 2213 BUF_UNLOCK(bp); 2214 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl, 2215 gbflags); 2216 return (nwritten); 2217 } 2218 } 2219 bremfree(bp); 2220 bp->b_flags |= B_ASYNC; 2221 /* 2222 * default (old) behavior, writing out only one block 2223 * 2224 * XXX returns b_bufsize instead of b_bcount for nwritten? 2225 */ 2226 nwritten = bp->b_bufsize; 2227 (void) bwrite(bp); 2228 2229 return (nwritten); 2230 } 2231 2232 /* 2233 * Ask the bufdaemon for help, or act as bufdaemon itself, when a 2234 * locked vnode is supplied. 2235 */ 2236 static void 2237 getnewbuf_bufd_help(struct vnode *vp, int gbflags, int slpflag, int slptimeo, 2238 int defrag) 2239 { 2240 struct thread *td; 2241 char *waitmsg; 2242 int error, fl, flags, norunbuf; 2243 2244 mtx_assert(&bqclean, MA_OWNED); 2245 2246 if (defrag) { 2247 flags = VFS_BIO_NEED_BUFSPACE; 2248 waitmsg = "nbufkv"; 2249 } else if (bufspace >= hibufspace) { 2250 waitmsg = "nbufbs"; 2251 flags = VFS_BIO_NEED_BUFSPACE; 2252 } else { 2253 waitmsg = "newbuf"; 2254 flags = VFS_BIO_NEED_ANY; 2255 } 2256 atomic_set_int(&needsbuffer, flags); 2257 mtx_unlock(&bqclean); 2258 2259 bd_speedup(); /* heeeelp */ 2260 if ((gbflags & GB_NOWAIT_BD) != 0) 2261 return; 2262 2263 td = curthread; 2264 rw_wlock(&nblock); 2265 while ((needsbuffer & flags) != 0) { 2266 if (vp != NULL && vp->v_type != VCHR && 2267 (td->td_pflags & TDP_BUFNEED) == 0) { 2268 rw_wunlock(&nblock); 2269 /* 2270 * getblk() is called with a vnode locked, and 2271 * some majority of the dirty buffers may as 2272 * well belong to the vnode. Flushing the 2273 * buffers there would make a progress that 2274 * cannot be achieved by the buf_daemon, that 2275 * cannot lock the vnode. 2276 */ 2277 norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) | 2278 (td->td_pflags & TDP_NORUNNINGBUF); 2279 2280 /* 2281 * Play bufdaemon. The getnewbuf() function 2282 * may be called while the thread owns lock 2283 * for another dirty buffer for the same 2284 * vnode, which makes it impossible to use 2285 * VOP_FSYNC() there, due to the buffer lock 2286 * recursion. 2287 */ 2288 td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF; 2289 fl = buf_flush(vp, flushbufqtarget); 2290 td->td_pflags &= norunbuf; 2291 rw_wlock(&nblock); 2292 if (fl != 0) 2293 continue; 2294 if ((needsbuffer & flags) == 0) 2295 break; 2296 } 2297 error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock, 2298 (PRIBIO + 4) | slpflag, waitmsg, slptimeo); 2299 if (error != 0) 2300 break; 2301 } 2302 rw_wunlock(&nblock); 2303 } 2304 2305 static void 2306 getnewbuf_reuse_bp(struct buf *bp, int qindex) 2307 { 2308 2309 CTR6(KTR_BUF, "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d " 2310 "queue %d (recycling)", bp, bp->b_vp, bp->b_flags, 2311 bp->b_kvasize, bp->b_bufsize, qindex); 2312 mtx_assert(&bqclean, MA_NOTOWNED); 2313 2314 /* 2315 * Note: we no longer distinguish between VMIO and non-VMIO 2316 * buffers. 2317 */ 2318 KASSERT((bp->b_flags & B_DELWRI) == 0, 2319 ("delwri buffer %p found in queue %d", bp, qindex)); 2320 2321 if (qindex == QUEUE_CLEAN) { 2322 if (bp->b_flags & B_VMIO) { 2323 bp->b_flags &= ~B_ASYNC; 2324 vfs_vmio_release(bp); 2325 } 2326 if (bp->b_vp != NULL) 2327 brelvp(bp); 2328 } 2329 2330 /* 2331 * Get the rest of the buffer freed up. b_kva* is still valid 2332 * after this operation. 2333 */ 2334 2335 if (bp->b_rcred != NOCRED) { 2336 crfree(bp->b_rcred); 2337 bp->b_rcred = NOCRED; 2338 } 2339 if (bp->b_wcred != NOCRED) { 2340 crfree(bp->b_wcred); 2341 bp->b_wcred = NOCRED; 2342 } 2343 if (!LIST_EMPTY(&bp->b_dep)) 2344 buf_deallocate(bp); 2345 if (bp->b_vflags & BV_BKGRDINPROG) 2346 panic("losing buffer 3"); 2347 KASSERT(bp->b_vp == NULL, ("bp: %p still has vnode %p. qindex: %d", 2348 bp, bp->b_vp, qindex)); 2349 KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0, 2350 ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags)); 2351 2352 if (bp->b_bufsize) 2353 allocbuf(bp, 0); 2354 2355 bp->b_flags = 0; 2356 bp->b_ioflags = 0; 2357 bp->b_xflags = 0; 2358 KASSERT((bp->b_flags & B_INFREECNT) == 0, 2359 ("buf %p still counted as free?", bp)); 2360 bp->b_vflags = 0; 2361 bp->b_vp = NULL; 2362 bp->b_blkno = bp->b_lblkno = 0; 2363 bp->b_offset = NOOFFSET; 2364 bp->b_iodone = 0; 2365 bp->b_error = 0; 2366 bp->b_resid = 0; 2367 bp->b_bcount = 0; 2368 bp->b_npages = 0; 2369 bp->b_dirtyoff = bp->b_dirtyend = 0; 2370 bp->b_bufobj = NULL; 2371 bp->b_pin_count = 0; 2372 bp->b_data = bp->b_kvabase; 2373 bp->b_fsprivate1 = NULL; 2374 bp->b_fsprivate2 = NULL; 2375 bp->b_fsprivate3 = NULL; 2376 2377 LIST_INIT(&bp->b_dep); 2378 } 2379 2380 static struct buf * 2381 getnewbuf_scan(int maxsize, int defrag, int unmapped, int metadata) 2382 { 2383 struct buf *bp, *nbp; 2384 int nqindex, qindex, pass; 2385 2386 KASSERT(!unmapped || !defrag, ("both unmapped and defrag")); 2387 2388 pass = 0; 2389 restart: 2390 if (pass != 0) 2391 atomic_add_int(&getnewbufrestarts, 1); 2392 2393 nbp = NULL; 2394 mtx_lock(&bqclean); 2395 /* 2396 * If we're not defragging or low on bufspace attempt to make a new 2397 * buf from a header. 2398 */ 2399 if (defrag == 0 && bufspace + maxsize < hibufspace) { 2400 nqindex = QUEUE_EMPTY; 2401 nbp = TAILQ_FIRST(&bufqueues[nqindex]); 2402 } 2403 /* 2404 * All available buffers might be clean or we need to start recycling. 2405 */ 2406 if (nbp == NULL) { 2407 nqindex = QUEUE_CLEAN; 2408 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 2409 } 2410 2411 /* 2412 * Run scan, possibly freeing data and/or kva mappings on the fly 2413 * depending. 2414 */ 2415 while ((bp = nbp) != NULL) { 2416 qindex = nqindex; 2417 2418 /* 2419 * Calculate next bp (we can only use it if we do not 2420 * release the bqlock) 2421 */ 2422 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 2423 switch (qindex) { 2424 case QUEUE_EMPTY: 2425 nqindex = QUEUE_CLEAN; 2426 nbp = TAILQ_FIRST(&bufqueues[nqindex]); 2427 if (nbp != NULL) 2428 break; 2429 /* FALLTHROUGH */ 2430 case QUEUE_CLEAN: 2431 if (metadata && pass == 0) { 2432 pass = 1; 2433 nqindex = QUEUE_EMPTY; 2434 nbp = TAILQ_FIRST(&bufqueues[nqindex]); 2435 } 2436 /* 2437 * nbp is NULL. 2438 */ 2439 break; 2440 } 2441 } 2442 /* 2443 * If we are defragging then we need a buffer with 2444 * b_kvasize != 0. This situation occurs when we 2445 * have many unmapped bufs. 2446 */ 2447 if (defrag && bp->b_kvasize == 0) 2448 continue; 2449 2450 /* 2451 * Start freeing the bp. This is somewhat involved. nbp 2452 * remains valid only for QUEUE_EMPTY[KVA] bp's. 2453 */ 2454 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) 2455 continue; 2456 /* 2457 * BKGRDINPROG can only be set with the buf and bufobj 2458 * locks both held. We tolerate a race to clear it here. 2459 */ 2460 if (bp->b_vflags & BV_BKGRDINPROG) { 2461 BUF_UNLOCK(bp); 2462 continue; 2463 } 2464 2465 /* 2466 * Requeue the background write buffer with error. 2467 */ 2468 if ((bp->b_vflags & BV_BKGRDERR) != 0) { 2469 bremfreel(bp); 2470 mtx_unlock(&bqclean); 2471 bqrelse(bp); 2472 continue; 2473 } 2474 2475 KASSERT(bp->b_qindex == qindex, 2476 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2477 2478 bremfreel(bp); 2479 mtx_unlock(&bqclean); 2480 2481 /* 2482 * NOTE: nbp is now entirely invalid. We can only restart 2483 * the scan from this point on. 2484 */ 2485 getnewbuf_reuse_bp(bp, qindex); 2486 mtx_assert(&bqclean, MA_NOTOWNED); 2487 2488 /* 2489 * If we are defragging then free the buffer. 2490 */ 2491 if (defrag) { 2492 bp->b_flags |= B_INVAL; 2493 brelse(bp); 2494 defrag = 0; 2495 goto restart; 2496 } 2497 2498 /* 2499 * Notify any waiters for the buffer lock about 2500 * identity change by freeing the buffer. 2501 */ 2502 if (qindex == QUEUE_CLEAN && BUF_LOCKWAITERS(bp)) { 2503 bp->b_flags |= B_INVAL; 2504 brelse(bp); 2505 goto restart; 2506 } 2507 2508 if (metadata) 2509 break; 2510 2511 /* 2512 * If we are overcomitted then recover the buffer and its 2513 * KVM space. This occurs in rare situations when multiple 2514 * processes are blocked in getnewbuf() or allocbuf(). 2515 */ 2516 if (bufspace >= hibufspace && bp->b_kvasize != 0) { 2517 bp->b_flags |= B_INVAL; 2518 brelse(bp); 2519 goto restart; 2520 } 2521 break; 2522 } 2523 return (bp); 2524 } 2525 2526 /* 2527 * getnewbuf: 2528 * 2529 * Find and initialize a new buffer header, freeing up existing buffers 2530 * in the bufqueues as necessary. The new buffer is returned locked. 2531 * 2532 * Important: B_INVAL is not set. If the caller wishes to throw the 2533 * buffer away, the caller must set B_INVAL prior to calling brelse(). 2534 * 2535 * We block if: 2536 * We have insufficient buffer headers 2537 * We have insufficient buffer space 2538 * buffer_arena is too fragmented ( space reservation fails ) 2539 * If we have to flush dirty buffers ( but we try to avoid this ) 2540 */ 2541 static struct buf * 2542 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int size, int maxsize, 2543 int gbflags) 2544 { 2545 struct buf *bp; 2546 int defrag, metadata; 2547 2548 KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 2549 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 2550 if (!unmapped_buf_allowed) 2551 gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC); 2552 2553 defrag = 0; 2554 if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 || 2555 vp->v_type == VCHR) 2556 metadata = 1; 2557 else 2558 metadata = 0; 2559 /* 2560 * We can't afford to block since we might be holding a vnode lock, 2561 * which may prevent system daemons from running. We deal with 2562 * low-memory situations by proactively returning memory and running 2563 * async I/O rather then sync I/O. 2564 */ 2565 atomic_add_int(&getnewbufcalls, 1); 2566 restart: 2567 bp = getnewbuf_scan(maxsize, defrag, (gbflags & (GB_UNMAPPED | 2568 GB_KVAALLOC)) == GB_UNMAPPED, metadata); 2569 if (bp != NULL) 2570 defrag = 0; 2571 2572 /* 2573 * If we exhausted our list, sleep as appropriate. We may have to 2574 * wakeup various daemons and write out some dirty buffers. 2575 * 2576 * Generally we are sleeping due to insufficient buffer space. 2577 */ 2578 if (bp == NULL) { 2579 mtx_assert(&bqclean, MA_OWNED); 2580 getnewbuf_bufd_help(vp, gbflags, slpflag, slptimeo, defrag); 2581 mtx_assert(&bqclean, MA_NOTOWNED); 2582 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == GB_UNMAPPED) { 2583 mtx_assert(&bqclean, MA_NOTOWNED); 2584 2585 bufkvafree(bp); 2586 atomic_add_int(&bufreusecnt, 1); 2587 } else { 2588 mtx_assert(&bqclean, MA_NOTOWNED); 2589 2590 /* 2591 * We finally have a valid bp. We aren't quite out of the 2592 * woods, we still have to reserve kva space. In order to 2593 * keep fragmentation sane we only allocate kva in BKVASIZE 2594 * chunks. 2595 */ 2596 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2597 2598 if (maxsize != bp->b_kvasize && 2599 bufkvaalloc(bp, maxsize, gbflags)) { 2600 defrag = 1; 2601 bp->b_flags |= B_INVAL; 2602 brelse(bp); 2603 goto restart; 2604 } else if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) == 2605 (GB_UNMAPPED | GB_KVAALLOC)) { 2606 bp->b_data = unmapped_buf; 2607 BUF_CHECK_UNMAPPED(bp); 2608 } 2609 atomic_add_int(&bufreusecnt, 1); 2610 } 2611 return (bp); 2612 } 2613 2614 /* 2615 * buf_daemon: 2616 * 2617 * buffer flushing daemon. Buffers are normally flushed by the 2618 * update daemon but if it cannot keep up this process starts to 2619 * take the load in an attempt to prevent getnewbuf() from blocking. 2620 */ 2621 2622 static struct kproc_desc buf_kp = { 2623 "bufdaemon", 2624 buf_daemon, 2625 &bufdaemonproc 2626 }; 2627 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp); 2628 2629 static int 2630 buf_flush(struct vnode *vp, int target) 2631 { 2632 int flushed; 2633 2634 flushed = flushbufqueues(vp, target, 0); 2635 if (flushed == 0) { 2636 /* 2637 * Could not find any buffers without rollback 2638 * dependencies, so just write the first one 2639 * in the hopes of eventually making progress. 2640 */ 2641 if (vp != NULL && target > 2) 2642 target /= 2; 2643 flushbufqueues(vp, target, 1); 2644 } 2645 return (flushed); 2646 } 2647 2648 static void 2649 buf_daemon() 2650 { 2651 int lodirty; 2652 2653 /* 2654 * This process needs to be suspended prior to shutdown sync. 2655 */ 2656 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc, 2657 SHUTDOWN_PRI_LAST); 2658 2659 /* 2660 * This process is allowed to take the buffer cache to the limit 2661 */ 2662 curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED; 2663 mtx_lock(&bdlock); 2664 for (;;) { 2665 bd_request = 0; 2666 mtx_unlock(&bdlock); 2667 2668 kproc_suspend_check(bufdaemonproc); 2669 lodirty = lodirtybuffers; 2670 if (bd_speedupreq) { 2671 lodirty = numdirtybuffers / 2; 2672 bd_speedupreq = 0; 2673 } 2674 /* 2675 * Do the flush. Limit the amount of in-transit I/O we 2676 * allow to build up, otherwise we would completely saturate 2677 * the I/O system. 2678 */ 2679 while (numdirtybuffers > lodirty) { 2680 if (buf_flush(NULL, numdirtybuffers - lodirty) == 0) 2681 break; 2682 kern_yield(PRI_USER); 2683 } 2684 2685 /* 2686 * Only clear bd_request if we have reached our low water 2687 * mark. The buf_daemon normally waits 1 second and 2688 * then incrementally flushes any dirty buffers that have 2689 * built up, within reason. 2690 * 2691 * If we were unable to hit our low water mark and couldn't 2692 * find any flushable buffers, we sleep for a short period 2693 * to avoid endless loops on unlockable buffers. 2694 */ 2695 mtx_lock(&bdlock); 2696 if (numdirtybuffers <= lodirtybuffers) { 2697 /* 2698 * We reached our low water mark, reset the 2699 * request and sleep until we are needed again. 2700 * The sleep is just so the suspend code works. 2701 */ 2702 bd_request = 0; 2703 /* 2704 * Do an extra wakeup in case dirty threshold 2705 * changed via sysctl and the explicit transition 2706 * out of shortfall was missed. 2707 */ 2708 bdirtywakeup(); 2709 if (runningbufspace <= lorunningspace) 2710 runningwakeup(); 2711 msleep(&bd_request, &bdlock, PVM, "psleep", hz); 2712 } else { 2713 /* 2714 * We couldn't find any flushable dirty buffers but 2715 * still have too many dirty buffers, we 2716 * have to sleep and try again. (rare) 2717 */ 2718 msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10); 2719 } 2720 } 2721 } 2722 2723 /* 2724 * flushbufqueues: 2725 * 2726 * Try to flush a buffer in the dirty queue. We must be careful to 2727 * free up B_INVAL buffers instead of write them, which NFS is 2728 * particularly sensitive to. 2729 */ 2730 static int flushwithdeps = 0; 2731 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps, 2732 0, "Number of buffers flushed with dependecies that require rollbacks"); 2733 2734 static int 2735 flushbufqueues(struct vnode *lvp, int target, int flushdeps) 2736 { 2737 struct buf *sentinel; 2738 struct vnode *vp; 2739 struct mount *mp; 2740 struct buf *bp; 2741 int hasdeps; 2742 int flushed; 2743 int queue; 2744 int error; 2745 bool unlock; 2746 2747 flushed = 0; 2748 queue = QUEUE_DIRTY; 2749 bp = NULL; 2750 sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO); 2751 sentinel->b_qindex = QUEUE_SENTINEL; 2752 mtx_lock(&bqdirty); 2753 TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist); 2754 mtx_unlock(&bqdirty); 2755 while (flushed != target) { 2756 maybe_yield(); 2757 mtx_lock(&bqdirty); 2758 bp = TAILQ_NEXT(sentinel, b_freelist); 2759 if (bp != NULL) { 2760 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2761 TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel, 2762 b_freelist); 2763 } else { 2764 mtx_unlock(&bqdirty); 2765 break; 2766 } 2767 /* 2768 * Skip sentinels inserted by other invocations of the 2769 * flushbufqueues(), taking care to not reorder them. 2770 * 2771 * Only flush the buffers that belong to the 2772 * vnode locked by the curthread. 2773 */ 2774 if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL && 2775 bp->b_vp != lvp)) { 2776 mtx_unlock(&bqdirty); 2777 continue; 2778 } 2779 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL); 2780 mtx_unlock(&bqdirty); 2781 if (error != 0) 2782 continue; 2783 if (bp->b_pin_count > 0) { 2784 BUF_UNLOCK(bp); 2785 continue; 2786 } 2787 /* 2788 * BKGRDINPROG can only be set with the buf and bufobj 2789 * locks both held. We tolerate a race to clear it here. 2790 */ 2791 if ((bp->b_vflags & BV_BKGRDINPROG) != 0 || 2792 (bp->b_flags & B_DELWRI) == 0) { 2793 BUF_UNLOCK(bp); 2794 continue; 2795 } 2796 if (bp->b_flags & B_INVAL) { 2797 bremfreef(bp); 2798 brelse(bp); 2799 flushed++; 2800 continue; 2801 } 2802 2803 if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) { 2804 if (flushdeps == 0) { 2805 BUF_UNLOCK(bp); 2806 continue; 2807 } 2808 hasdeps = 1; 2809 } else 2810 hasdeps = 0; 2811 /* 2812 * We must hold the lock on a vnode before writing 2813 * one of its buffers. Otherwise we may confuse, or 2814 * in the case of a snapshot vnode, deadlock the 2815 * system. 2816 * 2817 * The lock order here is the reverse of the normal 2818 * of vnode followed by buf lock. This is ok because 2819 * the NOWAIT will prevent deadlock. 2820 */ 2821 vp = bp->b_vp; 2822 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2823 BUF_UNLOCK(bp); 2824 continue; 2825 } 2826 if (lvp == NULL) { 2827 unlock = true; 2828 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT); 2829 } else { 2830 ASSERT_VOP_LOCKED(vp, "getbuf"); 2831 unlock = false; 2832 error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 : 2833 vn_lock(vp, LK_TRYUPGRADE); 2834 } 2835 if (error == 0) { 2836 CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X", 2837 bp, bp->b_vp, bp->b_flags); 2838 if (curproc == bufdaemonproc) { 2839 vfs_bio_awrite(bp); 2840 } else { 2841 bremfree(bp); 2842 bwrite(bp); 2843 notbufdflushes++; 2844 } 2845 vn_finished_write(mp); 2846 if (unlock) 2847 VOP_UNLOCK(vp, 0); 2848 flushwithdeps += hasdeps; 2849 flushed++; 2850 2851 /* 2852 * Sleeping on runningbufspace while holding 2853 * vnode lock leads to deadlock. 2854 */ 2855 if (curproc == bufdaemonproc && 2856 runningbufspace > hirunningspace) 2857 waitrunningbufspace(); 2858 continue; 2859 } 2860 vn_finished_write(mp); 2861 BUF_UNLOCK(bp); 2862 } 2863 mtx_lock(&bqdirty); 2864 TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist); 2865 mtx_unlock(&bqdirty); 2866 free(sentinel, M_TEMP); 2867 return (flushed); 2868 } 2869 2870 /* 2871 * Check to see if a block is currently memory resident. 2872 */ 2873 struct buf * 2874 incore(struct bufobj *bo, daddr_t blkno) 2875 { 2876 struct buf *bp; 2877 2878 BO_RLOCK(bo); 2879 bp = gbincore(bo, blkno); 2880 BO_RUNLOCK(bo); 2881 return (bp); 2882 } 2883 2884 /* 2885 * Returns true if no I/O is needed to access the 2886 * associated VM object. This is like incore except 2887 * it also hunts around in the VM system for the data. 2888 */ 2889 2890 static int 2891 inmem(struct vnode * vp, daddr_t blkno) 2892 { 2893 vm_object_t obj; 2894 vm_offset_t toff, tinc, size; 2895 vm_page_t m; 2896 vm_ooffset_t off; 2897 2898 ASSERT_VOP_LOCKED(vp, "inmem"); 2899 2900 if (incore(&vp->v_bufobj, blkno)) 2901 return 1; 2902 if (vp->v_mount == NULL) 2903 return 0; 2904 obj = vp->v_object; 2905 if (obj == NULL) 2906 return (0); 2907 2908 size = PAGE_SIZE; 2909 if (size > vp->v_mount->mnt_stat.f_iosize) 2910 size = vp->v_mount->mnt_stat.f_iosize; 2911 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2912 2913 VM_OBJECT_RLOCK(obj); 2914 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2915 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2916 if (!m) 2917 goto notinmem; 2918 tinc = size; 2919 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2920 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2921 if (vm_page_is_valid(m, 2922 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2923 goto notinmem; 2924 } 2925 VM_OBJECT_RUNLOCK(obj); 2926 return 1; 2927 2928 notinmem: 2929 VM_OBJECT_RUNLOCK(obj); 2930 return (0); 2931 } 2932 2933 /* 2934 * Set the dirty range for a buffer based on the status of the dirty 2935 * bits in the pages comprising the buffer. The range is limited 2936 * to the size of the buffer. 2937 * 2938 * Tell the VM system that the pages associated with this buffer 2939 * are clean. This is used for delayed writes where the data is 2940 * going to go to disk eventually without additional VM intevention. 2941 * 2942 * Note that while we only really need to clean through to b_bcount, we 2943 * just go ahead and clean through to b_bufsize. 2944 */ 2945 static void 2946 vfs_clean_pages_dirty_buf(struct buf *bp) 2947 { 2948 vm_ooffset_t foff, noff, eoff; 2949 vm_page_t m; 2950 int i; 2951 2952 if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0) 2953 return; 2954 2955 foff = bp->b_offset; 2956 KASSERT(bp->b_offset != NOOFFSET, 2957 ("vfs_clean_pages_dirty_buf: no buffer offset")); 2958 2959 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 2960 vfs_drain_busy_pages(bp); 2961 vfs_setdirty_locked_object(bp); 2962 for (i = 0; i < bp->b_npages; i++) { 2963 noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 2964 eoff = noff; 2965 if (eoff > bp->b_offset + bp->b_bufsize) 2966 eoff = bp->b_offset + bp->b_bufsize; 2967 m = bp->b_pages[i]; 2968 vfs_page_set_validclean(bp, foff, m); 2969 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 2970 foff = noff; 2971 } 2972 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 2973 } 2974 2975 static void 2976 vfs_setdirty_locked_object(struct buf *bp) 2977 { 2978 vm_object_t object; 2979 int i; 2980 2981 object = bp->b_bufobj->bo_object; 2982 VM_OBJECT_ASSERT_WLOCKED(object); 2983 2984 /* 2985 * We qualify the scan for modified pages on whether the 2986 * object has been flushed yet. 2987 */ 2988 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) { 2989 vm_offset_t boffset; 2990 vm_offset_t eoffset; 2991 2992 /* 2993 * test the pages to see if they have been modified directly 2994 * by users through the VM system. 2995 */ 2996 for (i = 0; i < bp->b_npages; i++) 2997 vm_page_test_dirty(bp->b_pages[i]); 2998 2999 /* 3000 * Calculate the encompassing dirty range, boffset and eoffset, 3001 * (eoffset - boffset) bytes. 3002 */ 3003 3004 for (i = 0; i < bp->b_npages; i++) { 3005 if (bp->b_pages[i]->dirty) 3006 break; 3007 } 3008 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 3009 3010 for (i = bp->b_npages - 1; i >= 0; --i) { 3011 if (bp->b_pages[i]->dirty) { 3012 break; 3013 } 3014 } 3015 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 3016 3017 /* 3018 * Fit it to the buffer. 3019 */ 3020 3021 if (eoffset > bp->b_bcount) 3022 eoffset = bp->b_bcount; 3023 3024 /* 3025 * If we have a good dirty range, merge with the existing 3026 * dirty range. 3027 */ 3028 3029 if (boffset < eoffset) { 3030 if (bp->b_dirtyoff > boffset) 3031 bp->b_dirtyoff = boffset; 3032 if (bp->b_dirtyend < eoffset) 3033 bp->b_dirtyend = eoffset; 3034 } 3035 } 3036 } 3037 3038 /* 3039 * Allocate the KVA mapping for an existing buffer. 3040 * If an unmapped buffer is provided but a mapped buffer is requested, take 3041 * also care to properly setup mappings between pages and KVA. 3042 */ 3043 static void 3044 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags) 3045 { 3046 struct buf *scratch_bp; 3047 int bsize, maxsize, need_mapping, need_kva; 3048 off_t offset; 3049 3050 need_mapping = bp->b_data == unmapped_buf && 3051 (gbflags & GB_UNMAPPED) == 0; 3052 need_kva = bp->b_kvabase == unmapped_buf && 3053 bp->b_data == unmapped_buf && 3054 (gbflags & GB_KVAALLOC) != 0; 3055 if (!need_mapping && !need_kva) 3056 return; 3057 3058 BUF_CHECK_UNMAPPED(bp); 3059 3060 if (need_mapping && bp->b_kvabase != unmapped_buf) { 3061 /* 3062 * Buffer is not mapped, but the KVA was already 3063 * reserved at the time of the instantiation. Use the 3064 * allocated space. 3065 */ 3066 goto has_addr; 3067 } 3068 3069 /* 3070 * Calculate the amount of the address space we would reserve 3071 * if the buffer was mapped. 3072 */ 3073 bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize; 3074 KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize")); 3075 offset = blkno * bsize; 3076 maxsize = size + (offset & PAGE_MASK); 3077 maxsize = imax(maxsize, bsize); 3078 3079 mapping_loop: 3080 if (bufkvaalloc(bp, maxsize, gbflags)) { 3081 /* 3082 * Request defragmentation. getnewbuf() returns us the 3083 * allocated space by the scratch buffer KVA. 3084 */ 3085 scratch_bp = getnewbuf(bp->b_vp, 0, 0, size, maxsize, gbflags | 3086 (GB_UNMAPPED | GB_KVAALLOC)); 3087 if (scratch_bp == NULL) { 3088 if ((gbflags & GB_NOWAIT_BD) != 0) { 3089 /* 3090 * XXXKIB: defragmentation cannot 3091 * succeed, not sure what else to do. 3092 */ 3093 panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp); 3094 } 3095 atomic_add_int(&mappingrestarts, 1); 3096 goto mapping_loop; 3097 } 3098 KASSERT(scratch_bp->b_kvabase != unmapped_buf, 3099 ("scratch bp has no KVA %p", scratch_bp)); 3100 /* Grab pointers. */ 3101 bp->b_kvabase = scratch_bp->b_kvabase; 3102 bp->b_kvasize = scratch_bp->b_kvasize; 3103 bp->b_data = scratch_bp->b_data; 3104 3105 /* Get rid of the scratch buffer. */ 3106 scratch_bp->b_kvasize = 0; 3107 scratch_bp->b_flags |= B_INVAL; 3108 scratch_bp->b_data = scratch_bp->b_kvabase = unmapped_buf; 3109 brelse(scratch_bp); 3110 } 3111 has_addr: 3112 if (need_mapping) { 3113 /* b_offset is handled by bpmap_qenter. */ 3114 bp->b_data = bp->b_kvabase; 3115 BUF_CHECK_MAPPED(bp); 3116 bpmap_qenter(bp); 3117 } 3118 } 3119 3120 /* 3121 * getblk: 3122 * 3123 * Get a block given a specified block and offset into a file/device. 3124 * The buffers B_DONE bit will be cleared on return, making it almost 3125 * ready for an I/O initiation. B_INVAL may or may not be set on 3126 * return. The caller should clear B_INVAL prior to initiating a 3127 * READ. 3128 * 3129 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 3130 * an existing buffer. 3131 * 3132 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 3133 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 3134 * and then cleared based on the backing VM. If the previous buffer is 3135 * non-0-sized but invalid, B_CACHE will be cleared. 3136 * 3137 * If getblk() must create a new buffer, the new buffer is returned with 3138 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 3139 * case it is returned with B_INVAL clear and B_CACHE set based on the 3140 * backing VM. 3141 * 3142 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 3143 * B_CACHE bit is clear. 3144 * 3145 * What this means, basically, is that the caller should use B_CACHE to 3146 * determine whether the buffer is fully valid or not and should clear 3147 * B_INVAL prior to issuing a read. If the caller intends to validate 3148 * the buffer by loading its data area with something, the caller needs 3149 * to clear B_INVAL. If the caller does this without issuing an I/O, 3150 * the caller should set B_CACHE ( as an optimization ), else the caller 3151 * should issue the I/O and biodone() will set B_CACHE if the I/O was 3152 * a write attempt or if it was a successfull read. If the caller 3153 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR 3154 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 3155 */ 3156 struct buf * 3157 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo, 3158 int flags) 3159 { 3160 struct buf *bp; 3161 struct bufobj *bo; 3162 int bsize, error, maxsize, vmio; 3163 off_t offset; 3164 3165 CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size); 3166 KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC, 3167 ("GB_KVAALLOC only makes sense with GB_UNMAPPED")); 3168 ASSERT_VOP_LOCKED(vp, "getblk"); 3169 if (size > MAXBCACHEBUF) 3170 panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size, 3171 MAXBCACHEBUF); 3172 if (!unmapped_buf_allowed) 3173 flags &= ~(GB_UNMAPPED | GB_KVAALLOC); 3174 3175 bo = &vp->v_bufobj; 3176 loop: 3177 BO_RLOCK(bo); 3178 bp = gbincore(bo, blkno); 3179 if (bp != NULL) { 3180 int lockflags; 3181 /* 3182 * Buffer is in-core. If the buffer is not busy nor managed, 3183 * it must be on a queue. 3184 */ 3185 lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK; 3186 3187 if (flags & GB_LOCK_NOWAIT) 3188 lockflags |= LK_NOWAIT; 3189 3190 error = BUF_TIMELOCK(bp, lockflags, 3191 BO_LOCKPTR(bo), "getblk", slpflag, slptimeo); 3192 3193 /* 3194 * If we slept and got the lock we have to restart in case 3195 * the buffer changed identities. 3196 */ 3197 if (error == ENOLCK) 3198 goto loop; 3199 /* We timed out or were interrupted. */ 3200 else if (error) 3201 return (NULL); 3202 /* If recursed, assume caller knows the rules. */ 3203 else if (BUF_LOCKRECURSED(bp)) 3204 goto end; 3205 3206 /* 3207 * The buffer is locked. B_CACHE is cleared if the buffer is 3208 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 3209 * and for a VMIO buffer B_CACHE is adjusted according to the 3210 * backing VM cache. 3211 */ 3212 if (bp->b_flags & B_INVAL) 3213 bp->b_flags &= ~B_CACHE; 3214 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 3215 bp->b_flags |= B_CACHE; 3216 if (bp->b_flags & B_MANAGED) 3217 MPASS(bp->b_qindex == QUEUE_NONE); 3218 else 3219 bremfree(bp); 3220 3221 /* 3222 * check for size inconsistencies for non-VMIO case. 3223 */ 3224 if (bp->b_bcount != size) { 3225 if ((bp->b_flags & B_VMIO) == 0 || 3226 (size > bp->b_kvasize)) { 3227 if (bp->b_flags & B_DELWRI) { 3228 /* 3229 * If buffer is pinned and caller does 3230 * not want sleep waiting for it to be 3231 * unpinned, bail out 3232 * */ 3233 if (bp->b_pin_count > 0) { 3234 if (flags & GB_LOCK_NOWAIT) { 3235 bqrelse(bp); 3236 return (NULL); 3237 } else { 3238 bunpin_wait(bp); 3239 } 3240 } 3241 bp->b_flags |= B_NOCACHE; 3242 bwrite(bp); 3243 } else { 3244 if (LIST_EMPTY(&bp->b_dep)) { 3245 bp->b_flags |= B_RELBUF; 3246 brelse(bp); 3247 } else { 3248 bp->b_flags |= B_NOCACHE; 3249 bwrite(bp); 3250 } 3251 } 3252 goto loop; 3253 } 3254 } 3255 3256 /* 3257 * Handle the case of unmapped buffer which should 3258 * become mapped, or the buffer for which KVA 3259 * reservation is requested. 3260 */ 3261 bp_unmapped_get_kva(bp, blkno, size, flags); 3262 3263 /* 3264 * If the size is inconsistant in the VMIO case, we can resize 3265 * the buffer. This might lead to B_CACHE getting set or 3266 * cleared. If the size has not changed, B_CACHE remains 3267 * unchanged from its previous state. 3268 */ 3269 if (bp->b_bcount != size) 3270 allocbuf(bp, size); 3271 3272 KASSERT(bp->b_offset != NOOFFSET, 3273 ("getblk: no buffer offset")); 3274 3275 /* 3276 * A buffer with B_DELWRI set and B_CACHE clear must 3277 * be committed before we can return the buffer in 3278 * order to prevent the caller from issuing a read 3279 * ( due to B_CACHE not being set ) and overwriting 3280 * it. 3281 * 3282 * Most callers, including NFS and FFS, need this to 3283 * operate properly either because they assume they 3284 * can issue a read if B_CACHE is not set, or because 3285 * ( for example ) an uncached B_DELWRI might loop due 3286 * to softupdates re-dirtying the buffer. In the latter 3287 * case, B_CACHE is set after the first write completes, 3288 * preventing further loops. 3289 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 3290 * above while extending the buffer, we cannot allow the 3291 * buffer to remain with B_CACHE set after the write 3292 * completes or it will represent a corrupt state. To 3293 * deal with this we set B_NOCACHE to scrap the buffer 3294 * after the write. 3295 * 3296 * We might be able to do something fancy, like setting 3297 * B_CACHE in bwrite() except if B_DELWRI is already set, 3298 * so the below call doesn't set B_CACHE, but that gets real 3299 * confusing. This is much easier. 3300 */ 3301 3302 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 3303 bp->b_flags |= B_NOCACHE; 3304 bwrite(bp); 3305 goto loop; 3306 } 3307 bp->b_flags &= ~B_DONE; 3308 } else { 3309 /* 3310 * Buffer is not in-core, create new buffer. The buffer 3311 * returned by getnewbuf() is locked. Note that the returned 3312 * buffer is also considered valid (not marked B_INVAL). 3313 */ 3314 BO_RUNLOCK(bo); 3315 /* 3316 * If the user does not want us to create the buffer, bail out 3317 * here. 3318 */ 3319 if (flags & GB_NOCREAT) 3320 return NULL; 3321 if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread)) 3322 return NULL; 3323 3324 bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize; 3325 KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize")); 3326 offset = blkno * bsize; 3327 vmio = vp->v_object != NULL; 3328 if (vmio) { 3329 maxsize = size + (offset & PAGE_MASK); 3330 } else { 3331 maxsize = size; 3332 /* Do not allow non-VMIO notmapped buffers. */ 3333 flags &= ~(GB_UNMAPPED | GB_KVAALLOC); 3334 } 3335 maxsize = imax(maxsize, bsize); 3336 3337 bp = getnewbuf(vp, slpflag, slptimeo, size, maxsize, flags); 3338 if (bp == NULL) { 3339 if (slpflag || slptimeo) 3340 return NULL; 3341 goto loop; 3342 } 3343 3344 /* 3345 * This code is used to make sure that a buffer is not 3346 * created while the getnewbuf routine is blocked. 3347 * This can be a problem whether the vnode is locked or not. 3348 * If the buffer is created out from under us, we have to 3349 * throw away the one we just created. 3350 * 3351 * Note: this must occur before we associate the buffer 3352 * with the vp especially considering limitations in 3353 * the splay tree implementation when dealing with duplicate 3354 * lblkno's. 3355 */ 3356 BO_LOCK(bo); 3357 if (gbincore(bo, blkno)) { 3358 BO_UNLOCK(bo); 3359 bp->b_flags |= B_INVAL; 3360 brelse(bp); 3361 goto loop; 3362 } 3363 3364 /* 3365 * Insert the buffer into the hash, so that it can 3366 * be found by incore. 3367 */ 3368 bp->b_blkno = bp->b_lblkno = blkno; 3369 bp->b_offset = offset; 3370 bgetvp(vp, bp); 3371 BO_UNLOCK(bo); 3372 3373 /* 3374 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 3375 * buffer size starts out as 0, B_CACHE will be set by 3376 * allocbuf() for the VMIO case prior to it testing the 3377 * backing store for validity. 3378 */ 3379 3380 if (vmio) { 3381 bp->b_flags |= B_VMIO; 3382 KASSERT(vp->v_object == bp->b_bufobj->bo_object, 3383 ("ARGH! different b_bufobj->bo_object %p %p %p\n", 3384 bp, vp->v_object, bp->b_bufobj->bo_object)); 3385 } else { 3386 bp->b_flags &= ~B_VMIO; 3387 KASSERT(bp->b_bufobj->bo_object == NULL, 3388 ("ARGH! has b_bufobj->bo_object %p %p\n", 3389 bp, bp->b_bufobj->bo_object)); 3390 BUF_CHECK_MAPPED(bp); 3391 } 3392 3393 allocbuf(bp, size); 3394 bp->b_flags &= ~B_DONE; 3395 } 3396 CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); 3397 BUF_ASSERT_HELD(bp); 3398 end: 3399 KASSERT(bp->b_bufobj == bo, 3400 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3401 return (bp); 3402 } 3403 3404 /* 3405 * Get an empty, disassociated buffer of given size. The buffer is initially 3406 * set to B_INVAL. 3407 */ 3408 struct buf * 3409 geteblk(int size, int flags) 3410 { 3411 struct buf *bp; 3412 int maxsize; 3413 3414 maxsize = (size + BKVAMASK) & ~BKVAMASK; 3415 while ((bp = getnewbuf(NULL, 0, 0, size, maxsize, flags)) == NULL) { 3416 if ((flags & GB_NOWAIT_BD) && 3417 (curthread->td_pflags & TDP_BUFNEED) != 0) 3418 return (NULL); 3419 } 3420 allocbuf(bp, size); 3421 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 3422 BUF_ASSERT_HELD(bp); 3423 return (bp); 3424 } 3425 3426 /* 3427 * This code constitutes the buffer memory from either anonymous system 3428 * memory (in the case of non-VMIO operations) or from an associated 3429 * VM object (in the case of VMIO operations). This code is able to 3430 * resize a buffer up or down. 3431 * 3432 * Note that this code is tricky, and has many complications to resolve 3433 * deadlock or inconsistant data situations. Tread lightly!!! 3434 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3435 * the caller. Calling this code willy nilly can result in the loss of data. 3436 * 3437 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3438 * B_CACHE for the non-VMIO case. 3439 */ 3440 3441 int 3442 allocbuf(struct buf *bp, int size) 3443 { 3444 int newbsize, mbsize; 3445 int i; 3446 3447 BUF_ASSERT_HELD(bp); 3448 3449 if (bp->b_kvasize != 0 && bp->b_kvasize < size) 3450 panic("allocbuf: buffer too small"); 3451 3452 if ((bp->b_flags & B_VMIO) == 0) { 3453 caddr_t origbuf; 3454 int origbufsize; 3455 /* 3456 * Just get anonymous memory from the kernel. Don't 3457 * mess with B_CACHE. 3458 */ 3459 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3460 if (bp->b_flags & B_MALLOC) 3461 newbsize = mbsize; 3462 else 3463 newbsize = round_page(size); 3464 3465 if (newbsize < bp->b_bufsize) { 3466 /* 3467 * malloced buffers are not shrunk 3468 */ 3469 if (bp->b_flags & B_MALLOC) { 3470 if (newbsize) { 3471 bp->b_bcount = size; 3472 } else { 3473 free(bp->b_data, M_BIOBUF); 3474 bufmallocadjust(bp, 0); 3475 bp->b_data = bp->b_kvabase; 3476 bp->b_bcount = 0; 3477 bp->b_flags &= ~B_MALLOC; 3478 } 3479 return 1; 3480 } 3481 vm_hold_free_pages(bp, newbsize); 3482 } else if (newbsize > bp->b_bufsize) { 3483 /* 3484 * We only use malloced memory on the first allocation. 3485 * and revert to page-allocated memory when the buffer 3486 * grows. 3487 */ 3488 /* 3489 * There is a potential smp race here that could lead 3490 * to bufmallocspace slightly passing the max. It 3491 * is probably extremely rare and not worth worrying 3492 * over. 3493 */ 3494 if ((bufmallocspace < maxbufmallocspace) && 3495 (bp->b_bufsize == 0) && 3496 (mbsize <= PAGE_SIZE/2)) { 3497 3498 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 3499 bp->b_bcount = size; 3500 bp->b_flags |= B_MALLOC; 3501 bufmallocadjust(bp, mbsize); 3502 return 1; 3503 } 3504 origbuf = NULL; 3505 origbufsize = 0; 3506 /* 3507 * If the buffer is growing on its other-than-first 3508 * allocation then we revert to the page-allocation 3509 * scheme. 3510 */ 3511 if (bp->b_flags & B_MALLOC) { 3512 origbuf = bp->b_data; 3513 origbufsize = bp->b_bufsize; 3514 bp->b_data = bp->b_kvabase; 3515 bufmallocadjust(bp, 0); 3516 bp->b_flags &= ~B_MALLOC; 3517 newbsize = round_page(newbsize); 3518 } 3519 vm_hold_load_pages( 3520 bp, 3521 (vm_offset_t) bp->b_data + bp->b_bufsize, 3522 (vm_offset_t) bp->b_data + newbsize); 3523 if (origbuf) { 3524 bcopy(origbuf, bp->b_data, origbufsize); 3525 free(origbuf, M_BIOBUF); 3526 } 3527 } 3528 } else { 3529 int desiredpages; 3530 3531 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3532 desiredpages = (size == 0) ? 0 : 3533 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 3534 3535 if (bp->b_flags & B_MALLOC) 3536 panic("allocbuf: VMIO buffer can't be malloced"); 3537 /* 3538 * Set B_CACHE initially if buffer is 0 length or will become 3539 * 0-length. 3540 */ 3541 if (size == 0 || bp->b_bufsize == 0) 3542 bp->b_flags |= B_CACHE; 3543 3544 if (newbsize < bp->b_bufsize) { 3545 /* 3546 * DEV_BSIZE aligned new buffer size is less then the 3547 * DEV_BSIZE aligned existing buffer size. Figure out 3548 * if we have to remove any pages. 3549 */ 3550 if (desiredpages < bp->b_npages) { 3551 vm_page_t m; 3552 3553 if (buf_mapped(bp)) { 3554 BUF_CHECK_MAPPED(bp); 3555 pmap_qremove((vm_offset_t)trunc_page( 3556 (vm_offset_t)bp->b_data) + 3557 (desiredpages << PAGE_SHIFT), 3558 (bp->b_npages - desiredpages)); 3559 } else 3560 BUF_CHECK_UNMAPPED(bp); 3561 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 3562 for (i = desiredpages; i < bp->b_npages; i++) { 3563 /* 3564 * the page is not freed here -- it 3565 * is the responsibility of 3566 * vnode_pager_setsize 3567 */ 3568 m = bp->b_pages[i]; 3569 KASSERT(m != bogus_page, 3570 ("allocbuf: bogus page found")); 3571 while (vm_page_sleep_if_busy(m, 3572 "biodep")) 3573 continue; 3574 3575 bp->b_pages[i] = NULL; 3576 vm_page_lock(m); 3577 vm_page_unwire(m, PQ_INACTIVE); 3578 vm_page_unlock(m); 3579 } 3580 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 3581 bp->b_npages = desiredpages; 3582 } 3583 } else if (size > bp->b_bcount) { 3584 /* 3585 * We are growing the buffer, possibly in a 3586 * byte-granular fashion. 3587 */ 3588 vm_object_t obj; 3589 vm_offset_t toff; 3590 vm_offset_t tinc; 3591 3592 /* 3593 * Step 1, bring in the VM pages from the object, 3594 * allocating them if necessary. We must clear 3595 * B_CACHE if these pages are not valid for the 3596 * range covered by the buffer. 3597 */ 3598 3599 obj = bp->b_bufobj->bo_object; 3600 3601 VM_OBJECT_WLOCK(obj); 3602 while (bp->b_npages < desiredpages) { 3603 vm_page_t m; 3604 3605 /* 3606 * We must allocate system pages since blocking 3607 * here could interfere with paging I/O, no 3608 * matter which process we are. 3609 * 3610 * Only exclusive busy can be tested here. 3611 * Blocking on shared busy might lead to 3612 * deadlocks once allocbuf() is called after 3613 * pages are vfs_busy_pages(). 3614 */ 3615 m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + 3616 bp->b_npages, VM_ALLOC_NOBUSY | 3617 VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | 3618 VM_ALLOC_IGN_SBUSY | 3619 VM_ALLOC_COUNT(desiredpages - bp->b_npages)); 3620 if (m->valid == 0) 3621 bp->b_flags &= ~B_CACHE; 3622 bp->b_pages[bp->b_npages] = m; 3623 ++bp->b_npages; 3624 } 3625 3626 /* 3627 * Step 2. We've loaded the pages into the buffer, 3628 * we have to figure out if we can still have B_CACHE 3629 * set. Note that B_CACHE is set according to the 3630 * byte-granular range ( bcount and size ), new the 3631 * aligned range ( newbsize ). 3632 * 3633 * The VM test is against m->valid, which is DEV_BSIZE 3634 * aligned. Needless to say, the validity of the data 3635 * needs to also be DEV_BSIZE aligned. Note that this 3636 * fails with NFS if the server or some other client 3637 * extends the file's EOF. If our buffer is resized, 3638 * B_CACHE may remain set! XXX 3639 */ 3640 3641 toff = bp->b_bcount; 3642 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 3643 3644 while ((bp->b_flags & B_CACHE) && toff < size) { 3645 vm_pindex_t pi; 3646 3647 if (tinc > (size - toff)) 3648 tinc = size - toff; 3649 3650 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 3651 PAGE_SHIFT; 3652 3653 vfs_buf_test_cache( 3654 bp, 3655 bp->b_offset, 3656 toff, 3657 tinc, 3658 bp->b_pages[pi] 3659 ); 3660 toff += tinc; 3661 tinc = PAGE_SIZE; 3662 } 3663 VM_OBJECT_WUNLOCK(obj); 3664 3665 /* 3666 * Step 3, fixup the KVA pmap. 3667 */ 3668 if (buf_mapped(bp)) 3669 bpmap_qenter(bp); 3670 else 3671 BUF_CHECK_UNMAPPED(bp); 3672 } 3673 } 3674 /* Record changes in allocation size. */ 3675 if (bp->b_bufsize != newbsize) 3676 bufspaceadjust(bp, newbsize); 3677 bp->b_bcount = size; /* requested buffer size. */ 3678 return 1; 3679 } 3680 3681 extern int inflight_transient_maps; 3682 3683 void 3684 biodone(struct bio *bp) 3685 { 3686 struct mtx *mtxp; 3687 void (*done)(struct bio *); 3688 vm_offset_t start, end; 3689 3690 if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { 3691 bp->bio_flags &= ~BIO_TRANSIENT_MAPPING; 3692 bp->bio_flags |= BIO_UNMAPPED; 3693 start = trunc_page((vm_offset_t)bp->bio_data); 3694 end = round_page((vm_offset_t)bp->bio_data + bp->bio_length); 3695 bp->bio_data = unmapped_buf; 3696 pmap_qremove(start, OFF_TO_IDX(end - start)); 3697 vmem_free(transient_arena, start, end - start); 3698 atomic_add_int(&inflight_transient_maps, -1); 3699 } 3700 done = bp->bio_done; 3701 if (done == NULL) { 3702 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3703 mtx_lock(mtxp); 3704 bp->bio_flags |= BIO_DONE; 3705 wakeup(bp); 3706 mtx_unlock(mtxp); 3707 } else { 3708 bp->bio_flags |= BIO_DONE; 3709 done(bp); 3710 } 3711 } 3712 3713 /* 3714 * Wait for a BIO to finish. 3715 */ 3716 int 3717 biowait(struct bio *bp, const char *wchan) 3718 { 3719 struct mtx *mtxp; 3720 3721 mtxp = mtx_pool_find(mtxpool_sleep, bp); 3722 mtx_lock(mtxp); 3723 while ((bp->bio_flags & BIO_DONE) == 0) 3724 msleep(bp, mtxp, PRIBIO, wchan, 0); 3725 mtx_unlock(mtxp); 3726 if (bp->bio_error != 0) 3727 return (bp->bio_error); 3728 if (!(bp->bio_flags & BIO_ERROR)) 3729 return (0); 3730 return (EIO); 3731 } 3732 3733 void 3734 biofinish(struct bio *bp, struct devstat *stat, int error) 3735 { 3736 3737 if (error) { 3738 bp->bio_error = error; 3739 bp->bio_flags |= BIO_ERROR; 3740 } 3741 if (stat != NULL) 3742 devstat_end_transaction_bio(stat, bp); 3743 biodone(bp); 3744 } 3745 3746 /* 3747 * bufwait: 3748 * 3749 * Wait for buffer I/O completion, returning error status. The buffer 3750 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR 3751 * error and cleared. 3752 */ 3753 int 3754 bufwait(struct buf *bp) 3755 { 3756 if (bp->b_iocmd == BIO_READ) 3757 bwait(bp, PRIBIO, "biord"); 3758 else 3759 bwait(bp, PRIBIO, "biowr"); 3760 if (bp->b_flags & B_EINTR) { 3761 bp->b_flags &= ~B_EINTR; 3762 return (EINTR); 3763 } 3764 if (bp->b_ioflags & BIO_ERROR) { 3765 return (bp->b_error ? bp->b_error : EIO); 3766 } else { 3767 return (0); 3768 } 3769 } 3770 3771 /* 3772 * Call back function from struct bio back up to struct buf. 3773 */ 3774 static void 3775 bufdonebio(struct bio *bip) 3776 { 3777 struct buf *bp; 3778 3779 bp = bip->bio_caller2; 3780 bp->b_resid = bip->bio_resid; 3781 bp->b_ioflags = bip->bio_flags; 3782 bp->b_error = bip->bio_error; 3783 if (bp->b_error) 3784 bp->b_ioflags |= BIO_ERROR; 3785 bufdone(bp); 3786 g_destroy_bio(bip); 3787 } 3788 3789 void 3790 dev_strategy(struct cdev *dev, struct buf *bp) 3791 { 3792 struct cdevsw *csw; 3793 int ref; 3794 3795 KASSERT(dev->si_refcount > 0, 3796 ("dev_strategy on un-referenced struct cdev *(%s) %p", 3797 devtoname(dev), dev)); 3798 3799 csw = dev_refthread(dev, &ref); 3800 dev_strategy_csw(dev, csw, bp); 3801 dev_relthread(dev, ref); 3802 } 3803 3804 void 3805 dev_strategy_csw(struct cdev *dev, struct cdevsw *csw, struct buf *bp) 3806 { 3807 struct bio *bip; 3808 3809 KASSERT(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE, 3810 ("b_iocmd botch")); 3811 KASSERT(((dev->si_flags & SI_ETERNAL) != 0 && csw != NULL) || 3812 dev->si_threadcount > 0, 3813 ("dev_strategy_csw threadcount cdev *(%s) %p", devtoname(dev), 3814 dev)); 3815 if (csw == NULL) { 3816 bp->b_error = ENXIO; 3817 bp->b_ioflags = BIO_ERROR; 3818 bufdone(bp); 3819 return; 3820 } 3821 for (;;) { 3822 bip = g_new_bio(); 3823 if (bip != NULL) 3824 break; 3825 /* Try again later */ 3826 tsleep(&bp, PRIBIO, "dev_strat", hz/10); 3827 } 3828 bip->bio_cmd = bp->b_iocmd; 3829 bip->bio_offset = bp->b_iooffset; 3830 bip->bio_length = bp->b_bcount; 3831 bip->bio_bcount = bp->b_bcount; /* XXX: remove */ 3832 bdata2bio(bp, bip); 3833 bip->bio_done = bufdonebio; 3834 bip->bio_caller2 = bp; 3835 bip->bio_dev = dev; 3836 (*csw->d_strategy)(bip); 3837 } 3838 3839 /* 3840 * bufdone: 3841 * 3842 * Finish I/O on a buffer, optionally calling a completion function. 3843 * This is usually called from an interrupt so process blocking is 3844 * not allowed. 3845 * 3846 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3847 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3848 * assuming B_INVAL is clear. 3849 * 3850 * For the VMIO case, we set B_CACHE if the op was a read and no 3851 * read error occured, or if the op was a write. B_CACHE is never 3852 * set if the buffer is invalid or otherwise uncacheable. 3853 * 3854 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3855 * initiator to leave B_INVAL set to brelse the buffer out of existance 3856 * in the biodone routine. 3857 */ 3858 void 3859 bufdone(struct buf *bp) 3860 { 3861 struct bufobj *dropobj; 3862 void (*biodone)(struct buf *); 3863 3864 CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 3865 dropobj = NULL; 3866 3867 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 3868 BUF_ASSERT_HELD(bp); 3869 3870 runningbufwakeup(bp); 3871 if (bp->b_iocmd == BIO_WRITE) 3872 dropobj = bp->b_bufobj; 3873 /* call optional completion function if requested */ 3874 if (bp->b_iodone != NULL) { 3875 biodone = bp->b_iodone; 3876 bp->b_iodone = NULL; 3877 (*biodone) (bp); 3878 if (dropobj) 3879 bufobj_wdrop(dropobj); 3880 return; 3881 } 3882 3883 bufdone_finish(bp); 3884 3885 if (dropobj) 3886 bufobj_wdrop(dropobj); 3887 } 3888 3889 void 3890 bufdone_finish(struct buf *bp) 3891 { 3892 BUF_ASSERT_HELD(bp); 3893 3894 if (!LIST_EMPTY(&bp->b_dep)) 3895 buf_complete(bp); 3896 3897 if (bp->b_flags & B_VMIO) { 3898 vm_ooffset_t foff; 3899 vm_page_t m; 3900 vm_object_t obj; 3901 struct vnode *vp; 3902 int bogus, i, iosize; 3903 3904 obj = bp->b_bufobj->bo_object; 3905 KASSERT(obj->paging_in_progress >= bp->b_npages, 3906 ("biodone_finish: paging in progress(%d) < b_npages(%d)", 3907 obj->paging_in_progress, bp->b_npages)); 3908 3909 vp = bp->b_vp; 3910 KASSERT(vp->v_holdcnt > 0, 3911 ("biodone_finish: vnode %p has zero hold count", vp)); 3912 KASSERT(vp->v_object != NULL, 3913 ("biodone_finish: vnode %p has no vm_object", vp)); 3914 3915 foff = bp->b_offset; 3916 KASSERT(bp->b_offset != NOOFFSET, 3917 ("biodone_finish: bp %p has no buffer offset", bp)); 3918 3919 /* 3920 * Set B_CACHE if the op was a normal read and no error 3921 * occured. B_CACHE is set for writes in the b*write() 3922 * routines. 3923 */ 3924 iosize = bp->b_bcount - bp->b_resid; 3925 if (bp->b_iocmd == BIO_READ && 3926 !(bp->b_flags & (B_INVAL|B_NOCACHE)) && 3927 !(bp->b_ioflags & BIO_ERROR)) { 3928 bp->b_flags |= B_CACHE; 3929 } 3930 bogus = 0; 3931 VM_OBJECT_WLOCK(obj); 3932 for (i = 0; i < bp->b_npages; i++) { 3933 int bogusflag = 0; 3934 int resid; 3935 3936 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3937 if (resid > iosize) 3938 resid = iosize; 3939 3940 /* 3941 * cleanup bogus pages, restoring the originals 3942 */ 3943 m = bp->b_pages[i]; 3944 if (m == bogus_page) { 3945 bogus = bogusflag = 1; 3946 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3947 if (m == NULL) 3948 panic("biodone: page disappeared!"); 3949 bp->b_pages[i] = m; 3950 } 3951 KASSERT(OFF_TO_IDX(foff) == m->pindex, 3952 ("biodone_finish: foff(%jd)/pindex(%ju) mismatch", 3953 (intmax_t)foff, (uintmax_t)m->pindex)); 3954 3955 /* 3956 * In the write case, the valid and clean bits are 3957 * already changed correctly ( see bdwrite() ), so we 3958 * only need to do this here in the read case. 3959 */ 3960 if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) { 3961 KASSERT((m->dirty & vm_page_bits(foff & 3962 PAGE_MASK, resid)) == 0, ("bufdone_finish:" 3963 " page %p has unexpected dirty bits", m)); 3964 vfs_page_set_valid(bp, foff, m); 3965 } 3966 3967 vm_page_sunbusy(m); 3968 vm_object_pip_subtract(obj, 1); 3969 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3970 iosize -= resid; 3971 } 3972 vm_object_pip_wakeupn(obj, 0); 3973 VM_OBJECT_WUNLOCK(obj); 3974 if (bogus && buf_mapped(bp)) { 3975 BUF_CHECK_MAPPED(bp); 3976 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3977 bp->b_pages, bp->b_npages); 3978 } 3979 } 3980 3981 /* 3982 * For asynchronous completions, release the buffer now. The brelse 3983 * will do a wakeup there if necessary - so no need to do a wakeup 3984 * here in the async case. The sync case always needs to do a wakeup. 3985 */ 3986 3987 if (bp->b_flags & B_ASYNC) { 3988 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR)) 3989 brelse(bp); 3990 else 3991 bqrelse(bp); 3992 } else 3993 bdone(bp); 3994 } 3995 3996 /* 3997 * This routine is called in lieu of iodone in the case of 3998 * incomplete I/O. This keeps the busy status for pages 3999 * consistant. 4000 */ 4001 void 4002 vfs_unbusy_pages(struct buf *bp) 4003 { 4004 int i; 4005 vm_object_t obj; 4006 vm_page_t m; 4007 4008 runningbufwakeup(bp); 4009 if (!(bp->b_flags & B_VMIO)) 4010 return; 4011 4012 obj = bp->b_bufobj->bo_object; 4013 VM_OBJECT_WLOCK(obj); 4014 for (i = 0; i < bp->b_npages; i++) { 4015 m = bp->b_pages[i]; 4016 if (m == bogus_page) { 4017 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 4018 if (!m) 4019 panic("vfs_unbusy_pages: page missing\n"); 4020 bp->b_pages[i] = m; 4021 if (buf_mapped(bp)) { 4022 BUF_CHECK_MAPPED(bp); 4023 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4024 bp->b_pages, bp->b_npages); 4025 } else 4026 BUF_CHECK_UNMAPPED(bp); 4027 } 4028 vm_object_pip_subtract(obj, 1); 4029 vm_page_sunbusy(m); 4030 } 4031 vm_object_pip_wakeupn(obj, 0); 4032 VM_OBJECT_WUNLOCK(obj); 4033 } 4034 4035 /* 4036 * vfs_page_set_valid: 4037 * 4038 * Set the valid bits in a page based on the supplied offset. The 4039 * range is restricted to the buffer's size. 4040 * 4041 * This routine is typically called after a read completes. 4042 */ 4043 static void 4044 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m) 4045 { 4046 vm_ooffset_t eoff; 4047 4048 /* 4049 * Compute the end offset, eoff, such that [off, eoff) does not span a 4050 * page boundary and eoff is not greater than the end of the buffer. 4051 * The end of the buffer, in this case, is our file EOF, not the 4052 * allocation size of the buffer. 4053 */ 4054 eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK; 4055 if (eoff > bp->b_offset + bp->b_bcount) 4056 eoff = bp->b_offset + bp->b_bcount; 4057 4058 /* 4059 * Set valid range. This is typically the entire buffer and thus the 4060 * entire page. 4061 */ 4062 if (eoff > off) 4063 vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off); 4064 } 4065 4066 /* 4067 * vfs_page_set_validclean: 4068 * 4069 * Set the valid bits and clear the dirty bits in a page based on the 4070 * supplied offset. The range is restricted to the buffer's size. 4071 */ 4072 static void 4073 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m) 4074 { 4075 vm_ooffset_t soff, eoff; 4076 4077 /* 4078 * Start and end offsets in buffer. eoff - soff may not cross a 4079 * page boundry or cross the end of the buffer. The end of the 4080 * buffer, in this case, is our file EOF, not the allocation size 4081 * of the buffer. 4082 */ 4083 soff = off; 4084 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 4085 if (eoff > bp->b_offset + bp->b_bcount) 4086 eoff = bp->b_offset + bp->b_bcount; 4087 4088 /* 4089 * Set valid range. This is typically the entire buffer and thus the 4090 * entire page. 4091 */ 4092 if (eoff > soff) { 4093 vm_page_set_validclean( 4094 m, 4095 (vm_offset_t) (soff & PAGE_MASK), 4096 (vm_offset_t) (eoff - soff) 4097 ); 4098 } 4099 } 4100 4101 /* 4102 * Ensure that all buffer pages are not exclusive busied. If any page is 4103 * exclusive busy, drain it. 4104 */ 4105 void 4106 vfs_drain_busy_pages(struct buf *bp) 4107 { 4108 vm_page_t m; 4109 int i, last_busied; 4110 4111 VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object); 4112 last_busied = 0; 4113 for (i = 0; i < bp->b_npages; i++) { 4114 m = bp->b_pages[i]; 4115 if (vm_page_xbusied(m)) { 4116 for (; last_busied < i; last_busied++) 4117 vm_page_sbusy(bp->b_pages[last_busied]); 4118 while (vm_page_xbusied(m)) { 4119 vm_page_lock(m); 4120 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4121 vm_page_busy_sleep(m, "vbpage"); 4122 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4123 } 4124 } 4125 } 4126 for (i = 0; i < last_busied; i++) 4127 vm_page_sunbusy(bp->b_pages[i]); 4128 } 4129 4130 /* 4131 * This routine is called before a device strategy routine. 4132 * It is used to tell the VM system that paging I/O is in 4133 * progress, and treat the pages associated with the buffer 4134 * almost as being exclusive busy. Also the object paging_in_progress 4135 * flag is handled to make sure that the object doesn't become 4136 * inconsistant. 4137 * 4138 * Since I/O has not been initiated yet, certain buffer flags 4139 * such as BIO_ERROR or B_INVAL may be in an inconsistant state 4140 * and should be ignored. 4141 */ 4142 void 4143 vfs_busy_pages(struct buf *bp, int clear_modify) 4144 { 4145 int i, bogus; 4146 vm_object_t obj; 4147 vm_ooffset_t foff; 4148 vm_page_t m; 4149 4150 if (!(bp->b_flags & B_VMIO)) 4151 return; 4152 4153 obj = bp->b_bufobj->bo_object; 4154 foff = bp->b_offset; 4155 KASSERT(bp->b_offset != NOOFFSET, 4156 ("vfs_busy_pages: no buffer offset")); 4157 VM_OBJECT_WLOCK(obj); 4158 vfs_drain_busy_pages(bp); 4159 if (bp->b_bufsize != 0) 4160 vfs_setdirty_locked_object(bp); 4161 bogus = 0; 4162 for (i = 0; i < bp->b_npages; i++) { 4163 m = bp->b_pages[i]; 4164 4165 if ((bp->b_flags & B_CLUSTER) == 0) { 4166 vm_object_pip_add(obj, 1); 4167 vm_page_sbusy(m); 4168 } 4169 /* 4170 * When readying a buffer for a read ( i.e 4171 * clear_modify == 0 ), it is important to do 4172 * bogus_page replacement for valid pages in 4173 * partially instantiated buffers. Partially 4174 * instantiated buffers can, in turn, occur when 4175 * reconstituting a buffer from its VM backing store 4176 * base. We only have to do this if B_CACHE is 4177 * clear ( which causes the I/O to occur in the 4178 * first place ). The replacement prevents the read 4179 * I/O from overwriting potentially dirty VM-backed 4180 * pages. XXX bogus page replacement is, uh, bogus. 4181 * It may not work properly with small-block devices. 4182 * We need to find a better way. 4183 */ 4184 if (clear_modify) { 4185 pmap_remove_write(m); 4186 vfs_page_set_validclean(bp, foff, m); 4187 } else if (m->valid == VM_PAGE_BITS_ALL && 4188 (bp->b_flags & B_CACHE) == 0) { 4189 bp->b_pages[i] = bogus_page; 4190 bogus++; 4191 } 4192 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 4193 } 4194 VM_OBJECT_WUNLOCK(obj); 4195 if (bogus && buf_mapped(bp)) { 4196 BUF_CHECK_MAPPED(bp); 4197 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4198 bp->b_pages, bp->b_npages); 4199 } 4200 } 4201 4202 /* 4203 * vfs_bio_set_valid: 4204 * 4205 * Set the range within the buffer to valid. The range is 4206 * relative to the beginning of the buffer, b_offset. Note that 4207 * b_offset itself may be offset from the beginning of the first 4208 * page. 4209 */ 4210 void 4211 vfs_bio_set_valid(struct buf *bp, int base, int size) 4212 { 4213 int i, n; 4214 vm_page_t m; 4215 4216 if (!(bp->b_flags & B_VMIO)) 4217 return; 4218 4219 /* 4220 * Fixup base to be relative to beginning of first page. 4221 * Set initial n to be the maximum number of bytes in the 4222 * first page that can be validated. 4223 */ 4224 base += (bp->b_offset & PAGE_MASK); 4225 n = PAGE_SIZE - (base & PAGE_MASK); 4226 4227 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4228 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4229 m = bp->b_pages[i]; 4230 if (n > size) 4231 n = size; 4232 vm_page_set_valid_range(m, base & PAGE_MASK, n); 4233 base += n; 4234 size -= n; 4235 n = PAGE_SIZE; 4236 } 4237 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4238 } 4239 4240 /* 4241 * vfs_bio_clrbuf: 4242 * 4243 * If the specified buffer is a non-VMIO buffer, clear the entire 4244 * buffer. If the specified buffer is a VMIO buffer, clear and 4245 * validate only the previously invalid portions of the buffer. 4246 * This routine essentially fakes an I/O, so we need to clear 4247 * BIO_ERROR and B_INVAL. 4248 * 4249 * Note that while we only theoretically need to clear through b_bcount, 4250 * we go ahead and clear through b_bufsize. 4251 */ 4252 void 4253 vfs_bio_clrbuf(struct buf *bp) 4254 { 4255 int i, j, mask, sa, ea, slide; 4256 4257 if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { 4258 clrbuf(bp); 4259 return; 4260 } 4261 bp->b_flags &= ~B_INVAL; 4262 bp->b_ioflags &= ~BIO_ERROR; 4263 VM_OBJECT_WLOCK(bp->b_bufobj->bo_object); 4264 if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4265 (bp->b_offset & PAGE_MASK) == 0) { 4266 if (bp->b_pages[0] == bogus_page) 4267 goto unlock; 4268 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4269 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object); 4270 if ((bp->b_pages[0]->valid & mask) == mask) 4271 goto unlock; 4272 if ((bp->b_pages[0]->valid & mask) == 0) { 4273 pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize); 4274 bp->b_pages[0]->valid |= mask; 4275 goto unlock; 4276 } 4277 } 4278 sa = bp->b_offset & PAGE_MASK; 4279 slide = 0; 4280 for (i = 0; i < bp->b_npages; i++, sa = 0) { 4281 slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize); 4282 ea = slide & PAGE_MASK; 4283 if (ea == 0) 4284 ea = PAGE_SIZE; 4285 if (bp->b_pages[i] == bogus_page) 4286 continue; 4287 j = sa / DEV_BSIZE; 4288 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4289 VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object); 4290 if ((bp->b_pages[i]->valid & mask) == mask) 4291 continue; 4292 if ((bp->b_pages[i]->valid & mask) == 0) 4293 pmap_zero_page_area(bp->b_pages[i], sa, ea - sa); 4294 else { 4295 for (; sa < ea; sa += DEV_BSIZE, j++) { 4296 if ((bp->b_pages[i]->valid & (1 << j)) == 0) { 4297 pmap_zero_page_area(bp->b_pages[i], 4298 sa, DEV_BSIZE); 4299 } 4300 } 4301 } 4302 bp->b_pages[i]->valid |= mask; 4303 } 4304 unlock: 4305 VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object); 4306 bp->b_resid = 0; 4307 } 4308 4309 void 4310 vfs_bio_bzero_buf(struct buf *bp, int base, int size) 4311 { 4312 vm_page_t m; 4313 int i, n; 4314 4315 if (buf_mapped(bp)) { 4316 BUF_CHECK_MAPPED(bp); 4317 bzero(bp->b_data + base, size); 4318 } else { 4319 BUF_CHECK_UNMAPPED(bp); 4320 n = PAGE_SIZE - (base & PAGE_MASK); 4321 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 4322 m = bp->b_pages[i]; 4323 if (n > size) 4324 n = size; 4325 pmap_zero_page_area(m, base & PAGE_MASK, n); 4326 base += n; 4327 size -= n; 4328 n = PAGE_SIZE; 4329 } 4330 } 4331 } 4332 4333 /* 4334 * vm_hold_load_pages and vm_hold_free_pages get pages into 4335 * a buffers address space. The pages are anonymous and are 4336 * not associated with a file object. 4337 */ 4338 static void 4339 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4340 { 4341 vm_offset_t pg; 4342 vm_page_t p; 4343 int index; 4344 4345 BUF_CHECK_MAPPED(bp); 4346 4347 to = round_page(to); 4348 from = round_page(from); 4349 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4350 4351 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 4352 tryagain: 4353 /* 4354 * note: must allocate system pages since blocking here 4355 * could interfere with paging I/O, no matter which 4356 * process we are. 4357 */ 4358 p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | 4359 VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT)); 4360 if (p == NULL) { 4361 VM_WAIT; 4362 goto tryagain; 4363 } 4364 pmap_qenter(pg, &p, 1); 4365 bp->b_pages[index] = p; 4366 } 4367 bp->b_npages = index; 4368 } 4369 4370 /* Return pages associated with this buf to the vm system */ 4371 static void 4372 vm_hold_free_pages(struct buf *bp, int newbsize) 4373 { 4374 vm_offset_t from; 4375 vm_page_t p; 4376 int index, newnpages; 4377 4378 BUF_CHECK_MAPPED(bp); 4379 4380 from = round_page((vm_offset_t)bp->b_data + newbsize); 4381 newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4382 if (bp->b_npages > newnpages) 4383 pmap_qremove(from, bp->b_npages - newnpages); 4384 for (index = newnpages; index < bp->b_npages; index++) { 4385 p = bp->b_pages[index]; 4386 bp->b_pages[index] = NULL; 4387 if (vm_page_sbusied(p)) 4388 printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n", 4389 (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno); 4390 p->wire_count--; 4391 vm_page_free(p); 4392 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 4393 } 4394 bp->b_npages = newnpages; 4395 } 4396 4397 /* 4398 * Map an IO request into kernel virtual address space. 4399 * 4400 * All requests are (re)mapped into kernel VA space. 4401 * Notice that we use b_bufsize for the size of the buffer 4402 * to be mapped. b_bcount might be modified by the driver. 4403 * 4404 * Note that even if the caller determines that the address space should 4405 * be valid, a race or a smaller-file mapped into a larger space may 4406 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST 4407 * check the return value. 4408 * 4409 * This function only works with pager buffers. 4410 */ 4411 int 4412 vmapbuf(struct buf *bp, int mapbuf) 4413 { 4414 vm_prot_t prot; 4415 int pidx; 4416 4417 if (bp->b_bufsize < 0) 4418 return (-1); 4419 prot = VM_PROT_READ; 4420 if (bp->b_iocmd == BIO_READ) 4421 prot |= VM_PROT_WRITE; /* Less backwards than it looks */ 4422 if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, 4423 (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages, 4424 btoc(MAXPHYS))) < 0) 4425 return (-1); 4426 bp->b_npages = pidx; 4427 bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK; 4428 if (mapbuf || !unmapped_buf_allowed) { 4429 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx); 4430 bp->b_data = bp->b_kvabase + bp->b_offset; 4431 } else 4432 bp->b_data = unmapped_buf; 4433 return(0); 4434 } 4435 4436 /* 4437 * Free the io map PTEs associated with this IO operation. 4438 * We also invalidate the TLB entries and restore the original b_addr. 4439 * 4440 * This function only works with pager buffers. 4441 */ 4442 void 4443 vunmapbuf(struct buf *bp) 4444 { 4445 int npages; 4446 4447 npages = bp->b_npages; 4448 if (buf_mapped(bp)) 4449 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 4450 vm_page_unhold_pages(bp->b_pages, npages); 4451 4452 bp->b_data = unmapped_buf; 4453 } 4454 4455 void 4456 bdone(struct buf *bp) 4457 { 4458 struct mtx *mtxp; 4459 4460 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4461 mtx_lock(mtxp); 4462 bp->b_flags |= B_DONE; 4463 wakeup(bp); 4464 mtx_unlock(mtxp); 4465 } 4466 4467 void 4468 bwait(struct buf *bp, u_char pri, const char *wchan) 4469 { 4470 struct mtx *mtxp; 4471 4472 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4473 mtx_lock(mtxp); 4474 while ((bp->b_flags & B_DONE) == 0) 4475 msleep(bp, mtxp, pri, wchan, 0); 4476 mtx_unlock(mtxp); 4477 } 4478 4479 int 4480 bufsync(struct bufobj *bo, int waitfor) 4481 { 4482 4483 return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread)); 4484 } 4485 4486 void 4487 bufstrategy(struct bufobj *bo, struct buf *bp) 4488 { 4489 int i = 0; 4490 struct vnode *vp; 4491 4492 vp = bp->b_vp; 4493 KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy")); 4494 KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 4495 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp)); 4496 i = VOP_STRATEGY(vp, bp); 4497 KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp)); 4498 } 4499 4500 void 4501 bufobj_wrefl(struct bufobj *bo) 4502 { 4503 4504 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4505 ASSERT_BO_WLOCKED(bo); 4506 bo->bo_numoutput++; 4507 } 4508 4509 void 4510 bufobj_wref(struct bufobj *bo) 4511 { 4512 4513 KASSERT(bo != NULL, ("NULL bo in bufobj_wref")); 4514 BO_LOCK(bo); 4515 bo->bo_numoutput++; 4516 BO_UNLOCK(bo); 4517 } 4518 4519 void 4520 bufobj_wdrop(struct bufobj *bo) 4521 { 4522 4523 KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop")); 4524 BO_LOCK(bo); 4525 KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count")); 4526 if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) { 4527 bo->bo_flag &= ~BO_WWAIT; 4528 wakeup(&bo->bo_numoutput); 4529 } 4530 BO_UNLOCK(bo); 4531 } 4532 4533 int 4534 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo) 4535 { 4536 int error; 4537 4538 KASSERT(bo != NULL, ("NULL bo in bufobj_wwait")); 4539 ASSERT_BO_WLOCKED(bo); 4540 error = 0; 4541 while (bo->bo_numoutput) { 4542 bo->bo_flag |= BO_WWAIT; 4543 error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo), 4544 slpflag | (PRIBIO + 1), "bo_wwait", timeo); 4545 if (error) 4546 break; 4547 } 4548 return (error); 4549 } 4550 4551 void 4552 bpin(struct buf *bp) 4553 { 4554 struct mtx *mtxp; 4555 4556 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4557 mtx_lock(mtxp); 4558 bp->b_pin_count++; 4559 mtx_unlock(mtxp); 4560 } 4561 4562 void 4563 bunpin(struct buf *bp) 4564 { 4565 struct mtx *mtxp; 4566 4567 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4568 mtx_lock(mtxp); 4569 if (--bp->b_pin_count == 0) 4570 wakeup(bp); 4571 mtx_unlock(mtxp); 4572 } 4573 4574 void 4575 bunpin_wait(struct buf *bp) 4576 { 4577 struct mtx *mtxp; 4578 4579 mtxp = mtx_pool_find(mtxpool_sleep, bp); 4580 mtx_lock(mtxp); 4581 while (bp->b_pin_count > 0) 4582 msleep(bp, mtxp, PRIBIO, "bwunpin", 0); 4583 mtx_unlock(mtxp); 4584 } 4585 4586 /* 4587 * Set bio_data or bio_ma for struct bio from the struct buf. 4588 */ 4589 void 4590 bdata2bio(struct buf *bp, struct bio *bip) 4591 { 4592 4593 if (!buf_mapped(bp)) { 4594 KASSERT(unmapped_buf_allowed, ("unmapped")); 4595 bip->bio_ma = bp->b_pages; 4596 bip->bio_ma_n = bp->b_npages; 4597 bip->bio_data = unmapped_buf; 4598 bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK; 4599 bip->bio_flags |= BIO_UNMAPPED; 4600 KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) / 4601 PAGE_SIZE == bp->b_npages, 4602 ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset, 4603 (long long)bip->bio_length, bip->bio_ma_n)); 4604 } else { 4605 bip->bio_data = bp->b_data; 4606 bip->bio_ma = NULL; 4607 } 4608 } 4609 4610 #include "opt_ddb.h" 4611 #ifdef DDB 4612 #include <ddb/ddb.h> 4613 4614 /* DDB command to show buffer data */ 4615 DB_SHOW_COMMAND(buffer, db_show_buffer) 4616 { 4617 /* get args */ 4618 struct buf *bp = (struct buf *)addr; 4619 4620 if (!have_addr) { 4621 db_printf("usage: show buffer <addr>\n"); 4622 return; 4623 } 4624 4625 db_printf("buf at %p\n", bp); 4626 db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n", 4627 (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags, 4628 PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS); 4629 db_printf( 4630 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n" 4631 "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, " 4632 "b_dep = %p\n", 4633 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4634 bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno, 4635 (intmax_t)bp->b_lblkno, bp->b_dep.lh_first); 4636 db_printf("b_kvabase = %p, b_kvasize = %d\n", 4637 bp->b_kvabase, bp->b_kvasize); 4638 if (bp->b_npages) { 4639 int i; 4640 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 4641 for (i = 0; i < bp->b_npages; i++) { 4642 vm_page_t m; 4643 m = bp->b_pages[i]; 4644 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4645 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4646 if ((i + 1) < bp->b_npages) 4647 db_printf(","); 4648 } 4649 db_printf("\n"); 4650 } 4651 db_printf(" "); 4652 BUF_LOCKPRINTINFO(bp); 4653 } 4654 4655 DB_SHOW_COMMAND(lockedbufs, lockedbufs) 4656 { 4657 struct buf *bp; 4658 int i; 4659 4660 for (i = 0; i < nbuf; i++) { 4661 bp = &buf[i]; 4662 if (BUF_ISLOCKED(bp)) { 4663 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4664 db_printf("\n"); 4665 } 4666 } 4667 } 4668 4669 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs) 4670 { 4671 struct vnode *vp; 4672 struct buf *bp; 4673 4674 if (!have_addr) { 4675 db_printf("usage: show vnodebufs <addr>\n"); 4676 return; 4677 } 4678 vp = (struct vnode *)addr; 4679 db_printf("Clean buffers:\n"); 4680 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) { 4681 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4682 db_printf("\n"); 4683 } 4684 db_printf("Dirty buffers:\n"); 4685 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 4686 db_show_buffer((uintptr_t)bp, 1, 0, NULL); 4687 db_printf("\n"); 4688 } 4689 } 4690 4691 DB_COMMAND(countfreebufs, db_coundfreebufs) 4692 { 4693 struct buf *bp; 4694 int i, used = 0, nfree = 0; 4695 4696 if (have_addr) { 4697 db_printf("usage: countfreebufs\n"); 4698 return; 4699 } 4700 4701 for (i = 0; i < nbuf; i++) { 4702 bp = &buf[i]; 4703 if ((bp->b_flags & B_INFREECNT) != 0) 4704 nfree++; 4705 else 4706 used++; 4707 } 4708 4709 db_printf("Counted %d free, %d used (%d tot)\n", nfree, used, 4710 nfree + used); 4711 db_printf("numfreebuffers is %d\n", numfreebuffers); 4712 } 4713 #endif /* DDB */ 4714