1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD$ 15 */ 16 17 /* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/conf.h> 34 #include <sys/eventhandler.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mount.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/proc.h> 41 #include <sys/reboot.h> 42 #include <sys/resourcevar.h> 43 #include <sys/sysctl.h> 44 #include <sys/vmmeter.h> 45 #include <sys/vnode.h> 46 #include <vm/vm.h> 47 #include <vm/vm_param.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_pageout.h> 50 #include <vm/vm_page.h> 51 #include <vm/vm_object.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_map.h> 54 55 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57 struct bio_ops bioops; /* I/O operation notification */ 58 59 struct buf *buf; /* buffer header pool */ 60 struct swqueue bswlist; 61 62 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 63 vm_offset_t to); 64 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 65 vm_offset_t to); 66 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 67 int pageno, vm_page_t m); 68 static void vfs_clean_pages(struct buf * bp); 69 static void vfs_setdirty(struct buf *bp); 70 static void vfs_vmio_release(struct buf *bp); 71 static void vfs_backgroundwritedone(struct buf *bp); 72 static int flushbufqueues(void); 73 74 static int bd_request; 75 76 static void buf_daemon __P((void)); 77 /* 78 * bogus page -- for I/O to/from partially complete buffers 79 * this is a temporary solution to the problem, but it is not 80 * really that bad. it would be better to split the buffer 81 * for input in the case of buffers partially already in memory, 82 * but the code is intricate enough already. 83 */ 84 vm_page_t bogus_page; 85 int runningbufspace; 86 int vmiodirenable = FALSE; 87 int buf_maxio = DFLTPHYS; 88 static vm_offset_t bogus_offset; 89 90 static int bufspace, maxbufspace, vmiospace, 91 bufmallocspace, maxbufmallocspace, hibufspace; 92 static int maxbdrun; 93 static int needsbuffer; 94 static int numdirtybuffers, hidirtybuffers; 95 static int numfreebuffers, lofreebuffers, hifreebuffers; 96 static int getnewbufcalls; 97 static int getnewbufrestarts; 98 static int kvafreespace; 99 100 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 101 &numdirtybuffers, 0, ""); 102 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 103 &hidirtybuffers, 0, ""); 104 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 105 &numfreebuffers, 0, ""); 106 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 107 &lofreebuffers, 0, ""); 108 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 109 &hifreebuffers, 0, ""); 110 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, 111 &runningbufspace, 0, ""); 112 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 113 &maxbufspace, 0, ""); 114 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, 115 &hibufspace, 0, ""); 116 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 117 &bufspace, 0, ""); 118 SYSCTL_INT(_vfs, OID_AUTO, maxbdrun, CTLFLAG_RW, 119 &maxbdrun, 0, ""); 120 SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 121 &vmiospace, 0, ""); 122 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 123 &maxbufmallocspace, 0, ""); 124 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 125 &bufmallocspace, 0, ""); 126 SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 127 &kvafreespace, 0, ""); 128 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, 129 &getnewbufcalls, 0, ""); 130 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, 131 &getnewbufrestarts, 0, ""); 132 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, 133 &vmiodirenable, 0, ""); 134 135 136 static int bufhashmask; 137 static LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 138 struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } }; 139 char *buf_wmesg = BUF_WMESG; 140 141 extern int vm_swap_size; 142 143 #define BUF_MAXUSE 24 144 145 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 146 #define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */ 147 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 148 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 149 #define VFS_BIO_NEED_KVASPACE 0x10 /* wait for buffer_map space, emerg */ 150 151 /* 152 * Buffer hash table code. Note that the logical block scans linearly, which 153 * gives us some L1 cache locality. 154 */ 155 156 static __inline 157 struct bufhashhdr * 158 bufhash(struct vnode *vnp, daddr_t bn) 159 { 160 return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]); 161 } 162 163 /* 164 * kvaspacewakeup: 165 * 166 * Called when kva space is potential available for recovery or when 167 * kva space is recovered in the buffer_map. This function wakes up 168 * anyone waiting for buffer_map kva space. Even though the buffer_map 169 * is larger then maxbufspace, this situation will typically occur 170 * when the buffer_map gets fragmented. 171 */ 172 173 static __inline void 174 kvaspacewakeup(void) 175 { 176 /* 177 * If someone is waiting for KVA space, wake them up. Even 178 * though we haven't freed the kva space yet, the waiting 179 * process will be able to now. 180 */ 181 if (needsbuffer & VFS_BIO_NEED_KVASPACE) { 182 needsbuffer &= ~VFS_BIO_NEED_KVASPACE; 183 wakeup(&needsbuffer); 184 } 185 } 186 187 /* 188 * numdirtywakeup: 189 * 190 * If someone is blocked due to there being too many dirty buffers, 191 * and numdirtybuffers is now reasonable, wake them up. 192 */ 193 194 static __inline void 195 numdirtywakeup(void) 196 { 197 if (numdirtybuffers < hidirtybuffers) { 198 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) { 199 needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH; 200 wakeup(&needsbuffer); 201 } 202 } 203 } 204 205 /* 206 * bufspacewakeup: 207 * 208 * Called when buffer space is potentially available for recovery or when 209 * buffer space is recovered. getnewbuf() will block on this flag when 210 * it is unable to free sufficient buffer space. Buffer space becomes 211 * recoverable when bp's get placed back in the queues. 212 */ 213 214 static __inline void 215 bufspacewakeup(void) 216 { 217 /* 218 * If someone is waiting for BUF space, wake them up. Even 219 * though we haven't freed the kva space yet, the waiting 220 * process will be able to now. 221 */ 222 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 223 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 224 wakeup(&needsbuffer); 225 } 226 } 227 228 /* 229 * bufcountwakeup: 230 * 231 * Called when a buffer has been added to one of the free queues to 232 * account for the buffer and to wakeup anyone waiting for free buffers. 233 * This typically occurs when large amounts of metadata are being handled 234 * by the buffer cache ( else buffer space runs out first, usually ). 235 */ 236 237 static __inline void 238 bufcountwakeup(void) 239 { 240 ++numfreebuffers; 241 if (needsbuffer) { 242 needsbuffer &= ~VFS_BIO_NEED_ANY; 243 if (numfreebuffers >= hifreebuffers) 244 needsbuffer &= ~VFS_BIO_NEED_FREE; 245 wakeup(&needsbuffer); 246 } 247 } 248 249 /* 250 * vfs_buf_test_cache: 251 * 252 * Called when a buffer is extended. This function clears the B_CACHE 253 * bit if the newly extended portion of the buffer does not contain 254 * valid data. 255 */ 256 static __inline__ 257 void 258 vfs_buf_test_cache(struct buf *bp, 259 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 260 vm_page_t m) 261 { 262 if (bp->b_flags & B_CACHE) { 263 int base = (foff + off) & PAGE_MASK; 264 if (vm_page_is_valid(m, base, size) == 0) 265 bp->b_flags &= ~B_CACHE; 266 } 267 } 268 269 static __inline__ 270 void 271 bd_wakeup(int dirtybuflevel) 272 { 273 if (numdirtybuffers >= dirtybuflevel && bd_request == 0) { 274 bd_request = 1; 275 wakeup(&bd_request); 276 } 277 } 278 279 /* 280 * bd_speedup - speedup the buffer cache flushing code 281 */ 282 283 static __inline__ 284 void 285 bd_speedup(void) 286 { 287 bd_wakeup(1); 288 } 289 290 /* 291 * Initialize buffer headers and related structures. 292 */ 293 294 caddr_t 295 bufhashinit(caddr_t vaddr) 296 { 297 /* first, make a null hash table */ 298 for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1) 299 ; 300 bufhashtbl = (void *)vaddr; 301 vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask; 302 --bufhashmask; 303 return(vaddr); 304 } 305 306 void 307 bufinit(void) 308 { 309 struct buf *bp; 310 int i; 311 312 TAILQ_INIT(&bswlist); 313 LIST_INIT(&invalhash); 314 simple_lock_init(&buftimelock); 315 316 for (i = 0; i <= bufhashmask; i++) 317 LIST_INIT(&bufhashtbl[i]); 318 319 /* next, make a null set of free lists */ 320 for (i = 0; i < BUFFER_QUEUES; i++) 321 TAILQ_INIT(&bufqueues[i]); 322 323 /* finally, initialize each buffer header and stick on empty q */ 324 for (i = 0; i < nbuf; i++) { 325 bp = &buf[i]; 326 bzero(bp, sizeof *bp); 327 bp->b_flags = B_INVAL; /* we're just an empty header */ 328 bp->b_dev = NODEV; 329 bp->b_rcred = NOCRED; 330 bp->b_wcred = NOCRED; 331 bp->b_qindex = QUEUE_EMPTY; 332 bp->b_xflags = 0; 333 LIST_INIT(&bp->b_dep); 334 BUF_LOCKINIT(bp); 335 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 336 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 337 } 338 339 /* 340 * maxbufspace is currently calculated to be maximally efficient 341 * when the filesystem block size is DFLTBSIZE or DFLTBSIZE*2 342 * (4K or 8K). To reduce the number of stall points our calculation 343 * is based on DFLTBSIZE which should reduce the chances of actually 344 * running out of buffer headers. The maxbufspace calculation is also 345 * based on DFLTBSIZE (4K) instead of BKVASIZE (8K) in order to 346 * reduce the chance that a KVA allocation will fail due to 347 * fragmentation. While this does not usually create a stall, 348 * the KVA map allocation/free functions are O(N) rather then O(1) 349 * so running them constantly would result in inefficient O(N*M) 350 * buffer cache operation. 351 */ 352 maxbufspace = (nbuf + 8) * DFLTBSIZE; 353 hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 354 /* 355 * Limit the amount of malloc memory since it is wired permanently into 356 * the kernel space. Even though this is accounted for in the buffer 357 * allocation, we don't want the malloced region to grow uncontrolled. 358 * The malloc scheme improves memory utilization significantly on average 359 * (small) directories. 360 */ 361 maxbufmallocspace = hibufspace / 20; 362 363 /* 364 * Reduce the chance of a deadlock occuring by limiting the number 365 * of delayed-write dirty buffers we allow to stack up. 366 */ 367 hidirtybuffers = nbuf / 4 + 20; 368 numdirtybuffers = 0; 369 /* 370 * To support extreme low-memory systems, make sure hidirtybuffers cannot 371 * eat up all available buffer space. This occurs when our minimum cannot 372 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 373 * BKVASIZE'd (8K) buffers. We also reduce buf_maxio in this case (used 374 * by the clustering code) in an attempt to further reduce the load on 375 * the buffer cache. 376 */ 377 while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 378 hidirtybuffers >>= 1; 379 buf_maxio >>= 1; 380 } 381 382 /* 383 * Temporary, BKVASIZE may be manipulated soon, make sure we don't 384 * do something illegal. XXX 385 */ 386 #if BKVASIZE < MAXBSIZE 387 if (buf_maxio < BKVASIZE * 2) 388 buf_maxio = BKVASIZE * 2; 389 #else 390 if (buf_maxio < MAXBSIZE) 391 buf_maxio = MAXBSIZE; 392 #endif 393 394 /* 395 * Try to keep the number of free buffers in the specified range, 396 * and give the syncer access to an emergency reserve. 397 */ 398 lofreebuffers = nbuf / 18 + 5; 399 hifreebuffers = 2 * lofreebuffers; 400 numfreebuffers = nbuf; 401 402 /* 403 * Maximum number of async ops initiated per buf_daemon loop. This is 404 * somewhat of a hack at the moment, we really need to limit ourselves 405 * based on the number of bytes of I/O in-transit that were initiated 406 * from buf_daemon. 407 */ 408 if ((maxbdrun = nswbuf / 4) < 4) 409 maxbdrun = 4; 410 411 kvafreespace = 0; 412 413 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 414 bogus_page = vm_page_alloc(kernel_object, 415 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 416 VM_ALLOC_NORMAL); 417 cnt.v_wire_count++; 418 419 } 420 421 /* 422 * Free the kva allocation for a buffer 423 * Must be called only at splbio or higher, 424 * as this is the only locking for buffer_map. 425 */ 426 static void 427 bfreekva(struct buf * bp) 428 { 429 if (bp->b_kvasize) { 430 vm_map_delete(buffer_map, 431 (vm_offset_t) bp->b_kvabase, 432 (vm_offset_t) bp->b_kvabase + bp->b_kvasize 433 ); 434 bp->b_kvasize = 0; 435 kvaspacewakeup(); 436 } 437 } 438 439 /* 440 * bremfree: 441 * 442 * Remove the buffer from the appropriate free list. 443 */ 444 void 445 bremfree(struct buf * bp) 446 { 447 int s = splbio(); 448 int old_qindex = bp->b_qindex; 449 450 if (bp->b_qindex != QUEUE_NONE) { 451 if (bp->b_qindex == QUEUE_EMPTYKVA) { 452 kvafreespace -= bp->b_kvasize; 453 } 454 KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp)); 455 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 456 bp->b_qindex = QUEUE_NONE; 457 runningbufspace += bp->b_bufsize; 458 } else { 459 #if !defined(MAX_PERF) 460 if (BUF_REFCNT(bp) <= 1) 461 panic("bremfree: removing a buffer not on a queue"); 462 #endif 463 } 464 465 /* 466 * Fixup numfreebuffers count. If the buffer is invalid or not 467 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 468 * the buffer was free and we must decrement numfreebuffers. 469 */ 470 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 471 switch(old_qindex) { 472 case QUEUE_DIRTY: 473 case QUEUE_CLEAN: 474 case QUEUE_EMPTY: 475 case QUEUE_EMPTYKVA: 476 --numfreebuffers; 477 break; 478 default: 479 break; 480 } 481 } 482 splx(s); 483 } 484 485 486 /* 487 * Get a buffer with the specified data. Look in the cache first. We 488 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 489 * is set, the buffer is valid and we do not have to do anything ( see 490 * getblk() ). 491 */ 492 int 493 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 494 struct buf ** bpp) 495 { 496 struct buf *bp; 497 498 bp = getblk(vp, blkno, size, 0, 0); 499 *bpp = bp; 500 501 /* if not found in cache, do some I/O */ 502 if ((bp->b_flags & B_CACHE) == 0) { 503 if (curproc != NULL) 504 curproc->p_stats->p_ru.ru_inblock++; 505 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 506 bp->b_flags |= B_READ; 507 bp->b_flags &= ~(B_ERROR | B_INVAL); 508 if (bp->b_rcred == NOCRED) { 509 if (cred != NOCRED) 510 crhold(cred); 511 bp->b_rcred = cred; 512 } 513 vfs_busy_pages(bp, 0); 514 VOP_STRATEGY(vp, bp); 515 return (biowait(bp)); 516 } 517 return (0); 518 } 519 520 /* 521 * Operates like bread, but also starts asynchronous I/O on 522 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior 523 * to initiating I/O . If B_CACHE is set, the buffer is valid 524 * and we do not have to do anything. 525 */ 526 int 527 breadn(struct vnode * vp, daddr_t blkno, int size, 528 daddr_t * rablkno, int *rabsize, 529 int cnt, struct ucred * cred, struct buf ** bpp) 530 { 531 struct buf *bp, *rabp; 532 int i; 533 int rv = 0, readwait = 0; 534 535 *bpp = bp = getblk(vp, blkno, size, 0, 0); 536 537 /* if not found in cache, do some I/O */ 538 if ((bp->b_flags & B_CACHE) == 0) { 539 if (curproc != NULL) 540 curproc->p_stats->p_ru.ru_inblock++; 541 bp->b_flags |= B_READ; 542 bp->b_flags &= ~(B_ERROR | B_INVAL); 543 if (bp->b_rcred == NOCRED) { 544 if (cred != NOCRED) 545 crhold(cred); 546 bp->b_rcred = cred; 547 } 548 vfs_busy_pages(bp, 0); 549 VOP_STRATEGY(vp, bp); 550 ++readwait; 551 } 552 553 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 554 if (inmem(vp, *rablkno)) 555 continue; 556 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 557 558 if ((rabp->b_flags & B_CACHE) == 0) { 559 if (curproc != NULL) 560 curproc->p_stats->p_ru.ru_inblock++; 561 rabp->b_flags |= B_READ | B_ASYNC; 562 rabp->b_flags &= ~(B_ERROR | B_INVAL); 563 if (rabp->b_rcred == NOCRED) { 564 if (cred != NOCRED) 565 crhold(cred); 566 rabp->b_rcred = cred; 567 } 568 vfs_busy_pages(rabp, 0); 569 BUF_KERNPROC(rabp); 570 VOP_STRATEGY(vp, rabp); 571 } else { 572 brelse(rabp); 573 } 574 } 575 576 if (readwait) { 577 rv = biowait(bp); 578 } 579 return (rv); 580 } 581 582 /* 583 * Write, release buffer on completion. (Done by iodone 584 * if async). Do not bother writing anything if the buffer 585 * is invalid. 586 * 587 * Note that we set B_CACHE here, indicating that buffer is 588 * fully valid and thus cacheable. This is true even of NFS 589 * now so we set it generally. This could be set either here 590 * or in biodone() since the I/O is synchronous. We put it 591 * here. 592 */ 593 int 594 bwrite(struct buf * bp) 595 { 596 int oldflags, s; 597 struct buf *newbp; 598 599 if (bp->b_flags & B_INVAL) { 600 brelse(bp); 601 return (0); 602 } 603 604 oldflags = bp->b_flags; 605 606 #if !defined(MAX_PERF) 607 if (BUF_REFCNT(bp) == 0) 608 panic("bwrite: buffer is not busy???"); 609 #endif 610 s = splbio(); 611 /* 612 * If a background write is already in progress, delay 613 * writing this block if it is asynchronous. Otherwise 614 * wait for the background write to complete. 615 */ 616 if (bp->b_xflags & BX_BKGRDINPROG) { 617 if (bp->b_flags & B_ASYNC) { 618 splx(s); 619 bdwrite(bp); 620 return (0); 621 } 622 bp->b_xflags |= BX_BKGRDWAIT; 623 tsleep(&bp->b_xflags, PRIBIO, "biord", 0); 624 if (bp->b_xflags & BX_BKGRDINPROG) 625 panic("bwrite: still writing"); 626 } 627 628 /* Mark the buffer clean */ 629 bundirty(bp); 630 631 /* 632 * If this buffer is marked for background writing and we 633 * do not have to wait for it, make a copy and write the 634 * copy so as to leave this buffer ready for further use. 635 */ 636 if ((bp->b_xflags & BX_BKGRDWRITE) && (bp->b_flags & B_ASYNC)) { 637 if (bp->b_flags & B_CALL) 638 panic("bwrite: need chained iodone"); 639 640 /* get a new block */ 641 newbp = geteblk(bp->b_bufsize); 642 643 /* set it to be identical to the old block */ 644 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 645 bgetvp(bp->b_vp, newbp); 646 newbp->b_lblkno = bp->b_lblkno; 647 newbp->b_blkno = bp->b_blkno; 648 newbp->b_offset = bp->b_offset; 649 newbp->b_iodone = vfs_backgroundwritedone; 650 newbp->b_flags |= B_ASYNC | B_CALL; 651 newbp->b_flags &= ~B_INVAL; 652 653 /* move over the dependencies */ 654 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps) 655 (*bioops.io_movedeps)(bp, newbp); 656 657 /* 658 * Initiate write on the copy, release the original to 659 * the B_LOCKED queue so that it cannot go away until 660 * the background write completes. If not locked it could go 661 * away and then be reconstituted while it was being written. 662 * If the reconstituted buffer were written, we could end up 663 * with two background copies being written at the same time. 664 */ 665 bp->b_xflags |= BX_BKGRDINPROG; 666 bp->b_flags |= B_LOCKED; 667 bqrelse(bp); 668 bp = newbp; 669 } 670 671 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 672 bp->b_flags |= B_WRITEINPROG | B_CACHE; 673 674 bp->b_vp->v_numoutput++; 675 vfs_busy_pages(bp, 1); 676 if (curproc != NULL) 677 curproc->p_stats->p_ru.ru_oublock++; 678 splx(s); 679 if (oldflags & B_ASYNC) 680 BUF_KERNPROC(bp); 681 VOP_STRATEGY(bp->b_vp, bp); 682 683 if ((oldflags & B_ASYNC) == 0) { 684 int rtval = biowait(bp); 685 brelse(bp); 686 return (rtval); 687 } 688 689 return (0); 690 } 691 692 /* 693 * Complete a background write started from bwrite. 694 */ 695 static void 696 vfs_backgroundwritedone(bp) 697 struct buf *bp; 698 { 699 struct buf *origbp; 700 701 /* 702 * Find the original buffer that we are writing. 703 */ 704 if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL) 705 panic("backgroundwritedone: lost buffer"); 706 /* 707 * Process dependencies then return any unfinished ones. 708 */ 709 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 710 (*bioops.io_complete)(bp); 711 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps) 712 (*bioops.io_movedeps)(bp, origbp); 713 /* 714 * Clear the BX_BKGRDINPROG flag in the original buffer 715 * and awaken it if it is waiting for the write to complete. 716 */ 717 origbp->b_xflags &= ~BX_BKGRDINPROG; 718 if (origbp->b_xflags & BX_BKGRDWAIT) { 719 origbp->b_xflags &= ~BX_BKGRDWAIT; 720 wakeup(&origbp->b_xflags); 721 } 722 /* 723 * Clear the B_LOCKED flag and remove it from the locked 724 * queue if it currently resides there. 725 */ 726 origbp->b_flags &= ~B_LOCKED; 727 if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 728 bremfree(origbp); 729 bqrelse(origbp); 730 } 731 /* 732 * This buffer is marked B_NOCACHE, so when it is released 733 * by biodone, it will be tossed. We mark it with B_READ 734 * to avoid biodone doing a second vwakeup. 735 */ 736 bp->b_flags |= B_NOCACHE | B_READ; 737 bp->b_flags &= ~(B_CACHE | B_CALL | B_DONE); 738 bp->b_iodone = 0; 739 biodone(bp); 740 } 741 742 /* 743 * Delayed write. (Buffer is marked dirty). Do not bother writing 744 * anything if the buffer is marked invalid. 745 * 746 * Note that since the buffer must be completely valid, we can safely 747 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 748 * biodone() in order to prevent getblk from writing the buffer 749 * out synchronously. 750 */ 751 void 752 bdwrite(struct buf * bp) 753 { 754 #if !defined(MAX_PERF) 755 if (BUF_REFCNT(bp) == 0) 756 panic("bdwrite: buffer is not busy"); 757 #endif 758 759 if (bp->b_flags & B_INVAL) { 760 brelse(bp); 761 return; 762 } 763 bdirty(bp); 764 765 /* 766 * Set B_CACHE, indicating that the buffer is fully valid. This is 767 * true even of NFS now. 768 */ 769 bp->b_flags |= B_CACHE; 770 771 /* 772 * This bmap keeps the system from needing to do the bmap later, 773 * perhaps when the system is attempting to do a sync. Since it 774 * is likely that the indirect block -- or whatever other datastructure 775 * that the filesystem needs is still in memory now, it is a good 776 * thing to do this. Note also, that if the pageout daemon is 777 * requesting a sync -- there might not be enough memory to do 778 * the bmap then... So, this is important to do. 779 */ 780 if (bp->b_lblkno == bp->b_blkno) { 781 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 782 } 783 784 /* 785 * Set the *dirty* buffer range based upon the VM system dirty pages. 786 */ 787 vfs_setdirty(bp); 788 789 /* 790 * We need to do this here to satisfy the vnode_pager and the 791 * pageout daemon, so that it thinks that the pages have been 792 * "cleaned". Note that since the pages are in a delayed write 793 * buffer -- the VFS layer "will" see that the pages get written 794 * out on the next sync, or perhaps the cluster will be completed. 795 */ 796 vfs_clean_pages(bp); 797 bqrelse(bp); 798 799 /* 800 * Wakeup the buffer flushing daemon if we have saturated the 801 * buffer cache. 802 */ 803 804 bd_wakeup(hidirtybuffers); 805 806 /* 807 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 808 * due to the softdep code. 809 */ 810 } 811 812 /* 813 * bdirty: 814 * 815 * Turn buffer into delayed write request. We must clear B_READ and 816 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 817 * itself to properly update it in the dirty/clean lists. We mark it 818 * B_DONE to ensure that any asynchronization of the buffer properly 819 * clears B_DONE ( else a panic will occur later ). 820 * 821 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 822 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 823 * should only be called if the buffer is known-good. 824 * 825 * Since the buffer is not on a queue, we do not update the numfreebuffers 826 * count. 827 * 828 * Must be called at splbio(). 829 * The buffer must be on QUEUE_NONE. 830 */ 831 void 832 bdirty(bp) 833 struct buf *bp; 834 { 835 KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 836 bp->b_flags &= ~(B_READ|B_RELBUF); 837 838 if ((bp->b_flags & B_DELWRI) == 0) { 839 bp->b_flags |= B_DONE | B_DELWRI; 840 reassignbuf(bp, bp->b_vp); 841 ++numdirtybuffers; 842 bd_wakeup(hidirtybuffers); 843 } 844 } 845 846 /* 847 * bundirty: 848 * 849 * Clear B_DELWRI for buffer. 850 * 851 * Since the buffer is not on a queue, we do not update the numfreebuffers 852 * count. 853 * 854 * Must be called at splbio(). 855 * The buffer must be on QUEUE_NONE. 856 */ 857 858 void 859 bundirty(bp) 860 struct buf *bp; 861 { 862 KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); 863 864 if (bp->b_flags & B_DELWRI) { 865 bp->b_flags &= ~B_DELWRI; 866 reassignbuf(bp, bp->b_vp); 867 --numdirtybuffers; 868 numdirtywakeup(); 869 } 870 /* 871 * Since it is now being written, we can clear its deferred write flag. 872 */ 873 bp->b_flags &= ~B_DEFERRED; 874 } 875 876 /* 877 * bawrite: 878 * 879 * Asynchronous write. Start output on a buffer, but do not wait for 880 * it to complete. The buffer is released when the output completes. 881 * 882 * bwrite() ( or the VOP routine anyway ) is responsible for handling 883 * B_INVAL buffers. Not us. 884 */ 885 void 886 bawrite(struct buf * bp) 887 { 888 bp->b_flags |= B_ASYNC; 889 (void) VOP_BWRITE(bp->b_vp, bp); 890 } 891 892 /* 893 * bowrite: 894 * 895 * Ordered write. Start output on a buffer, and flag it so that the 896 * device will write it in the order it was queued. The buffer is 897 * released when the output completes. bwrite() ( or the VOP routine 898 * anyway ) is responsible for handling B_INVAL buffers. 899 */ 900 int 901 bowrite(struct buf * bp) 902 { 903 bp->b_flags |= B_ORDERED | B_ASYNC; 904 return (VOP_BWRITE(bp->b_vp, bp)); 905 } 906 907 /* 908 * bwillwrite: 909 * 910 * Called prior to the locking of any vnodes when we are expecting to 911 * write. We do not want to starve the buffer cache with too many 912 * dirty buffers so we block here. By blocking prior to the locking 913 * of any vnodes we attempt to avoid the situation where a locked vnode 914 * prevents the various system daemons from flushing related buffers. 915 */ 916 917 void 918 bwillwrite(void) 919 { 920 int slop = hidirtybuffers / 10; 921 922 if (numdirtybuffers > hidirtybuffers + slop) { 923 int s; 924 925 s = splbio(); 926 while (numdirtybuffers > hidirtybuffers) { 927 bd_wakeup(hidirtybuffers); 928 needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH; 929 tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0); 930 } 931 splx(s); 932 } 933 } 934 935 /* 936 * brelse: 937 * 938 * Release a busy buffer and, if requested, free its resources. The 939 * buffer will be stashed in the appropriate bufqueue[] allowing it 940 * to be accessed later as a cache entity or reused for other purposes. 941 */ 942 void 943 brelse(struct buf * bp) 944 { 945 int s; 946 int kvawakeup = 0; 947 948 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 949 950 s = splbio(); 951 952 if (bp->b_flags & B_LOCKED) 953 bp->b_flags &= ~B_ERROR; 954 955 if ((bp->b_flags & (B_READ | B_ERROR | B_INVAL)) == B_ERROR) { 956 /* 957 * Failed write, redirty. Must clear B_ERROR to prevent 958 * pages from being scrapped. If B_INVAL is set then 959 * this case is not run and the next case is run to 960 * destroy the buffer. B_INVAL can occur if the buffer 961 * is outside the range supported by the underlying device. 962 */ 963 bp->b_flags &= ~B_ERROR; 964 bdirty(bp); 965 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 966 (bp->b_bufsize <= 0)) { 967 /* 968 * Either a failed I/O or we were asked to free or not 969 * cache the buffer. 970 */ 971 bp->b_flags |= B_INVAL; 972 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 973 (*bioops.io_deallocate)(bp); 974 if (bp->b_flags & B_DELWRI) { 975 --numdirtybuffers; 976 numdirtywakeup(); 977 } 978 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 979 if ((bp->b_flags & B_VMIO) == 0) { 980 if (bp->b_bufsize) 981 allocbuf(bp, 0); 982 if (bp->b_vp) 983 brelvp(bp); 984 } 985 } 986 987 /* 988 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 989 * is called with B_DELWRI set, the underlying pages may wind up 990 * getting freed causing a previous write (bdwrite()) to get 'lost' 991 * because pages associated with a B_DELWRI bp are marked clean. 992 * 993 * We still allow the B_INVAL case to call vfs_vmio_release(), even 994 * if B_DELWRI is set. 995 */ 996 997 if (bp->b_flags & B_DELWRI) 998 bp->b_flags &= ~B_RELBUF; 999 1000 /* 1001 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1002 * constituted, not even NFS buffers now. Two flags effect this. If 1003 * B_INVAL, the struct buf is invalidated but the VM object is kept 1004 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1005 * 1006 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be 1007 * invalidated. B_ERROR cannot be set for a failed write unless the 1008 * buffer is also B_INVAL because it hits the re-dirtying code above. 1009 * 1010 * Normally we can do this whether a buffer is B_DELWRI or not. If 1011 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1012 * the commit state and we cannot afford to lose the buffer. If the 1013 * buffer has a background write in progress, we need to keep it 1014 * around to prevent it from being reconstituted and starting a second 1015 * background write. 1016 */ 1017 if ((bp->b_flags & B_VMIO) 1018 && !(bp->b_vp->v_tag == VT_NFS && 1019 !vn_isdisk(bp->b_vp, NULL) && 1020 (bp->b_flags & B_DELWRI) && 1021 (bp->b_xflags & BX_BKGRDINPROG)) 1022 ) { 1023 1024 int i, j, resid; 1025 vm_page_t m; 1026 off_t foff; 1027 vm_pindex_t poff; 1028 vm_object_t obj; 1029 struct vnode *vp; 1030 1031 vp = bp->b_vp; 1032 1033 /* 1034 * Get the base offset and length of the buffer. Note that 1035 * for block sizes that are less then PAGE_SIZE, the b_data 1036 * base of the buffer does not represent exactly b_offset and 1037 * neither b_offset nor b_size are necessarily page aligned. 1038 * Instead, the starting position of b_offset is: 1039 * 1040 * b_data + (b_offset & PAGE_MASK) 1041 * 1042 * block sizes less then DEV_BSIZE (usually 512) are not 1043 * supported due to the page granularity bits (m->valid, 1044 * m->dirty, etc...). 1045 * 1046 * See man buf(9) for more information 1047 */ 1048 1049 resid = bp->b_bufsize; 1050 foff = bp->b_offset; 1051 1052 for (i = 0; i < bp->b_npages; i++) { 1053 m = bp->b_pages[i]; 1054 vm_page_flag_clear(m, PG_ZERO); 1055 if (m == bogus_page) { 1056 1057 obj = (vm_object_t) vp->v_object; 1058 poff = OFF_TO_IDX(bp->b_offset); 1059 1060 for (j = i; j < bp->b_npages; j++) { 1061 m = bp->b_pages[j]; 1062 if (m == bogus_page) { 1063 m = vm_page_lookup(obj, poff + j); 1064 #if !defined(MAX_PERF) 1065 if (!m) { 1066 panic("brelse: page missing\n"); 1067 } 1068 #endif 1069 bp->b_pages[j] = m; 1070 } 1071 } 1072 1073 if ((bp->b_flags & B_INVAL) == 0) { 1074 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 1075 } 1076 } 1077 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1078 int poffset = foff & PAGE_MASK; 1079 int presid = resid > (PAGE_SIZE - poffset) ? 1080 (PAGE_SIZE - poffset) : resid; 1081 1082 KASSERT(presid >= 0, ("brelse: extra page")); 1083 vm_page_set_invalid(m, poffset, presid); 1084 } 1085 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1086 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 1087 } 1088 1089 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1090 vfs_vmio_release(bp); 1091 1092 } else if (bp->b_flags & B_VMIO) { 1093 1094 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1095 vfs_vmio_release(bp); 1096 1097 } 1098 1099 #if !defined(MAX_PERF) 1100 if (bp->b_qindex != QUEUE_NONE) 1101 panic("brelse: free buffer onto another queue???"); 1102 #endif 1103 if (BUF_REFCNT(bp) > 1) { 1104 /* Temporary panic to verify exclusive locking */ 1105 /* This panic goes away when we allow shared refs */ 1106 panic("brelse: multiple refs"); 1107 /* do not release to free list */ 1108 BUF_UNLOCK(bp); 1109 splx(s); 1110 return; 1111 } 1112 1113 /* enqueue */ 1114 1115 /* buffers with no memory */ 1116 if (bp->b_bufsize == 0) { 1117 bp->b_flags |= B_INVAL; 1118 bp->b_xflags &= ~BX_BKGRDWRITE; 1119 if (bp->b_xflags & BX_BKGRDINPROG) 1120 panic("losing buffer 1"); 1121 if (bp->b_kvasize) { 1122 bp->b_qindex = QUEUE_EMPTYKVA; 1123 kvawakeup = 1; 1124 } else { 1125 bp->b_qindex = QUEUE_EMPTY; 1126 } 1127 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1128 LIST_REMOVE(bp, b_hash); 1129 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1130 bp->b_dev = NODEV; 1131 kvafreespace += bp->b_kvasize; 1132 /* buffers with junk contents */ 1133 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 1134 bp->b_flags |= B_INVAL; 1135 bp->b_xflags &= ~BX_BKGRDWRITE; 1136 if (bp->b_xflags & BX_BKGRDINPROG) 1137 panic("losing buffer 2"); 1138 bp->b_qindex = QUEUE_CLEAN; 1139 if (bp->b_kvasize) 1140 kvawakeup = 1; 1141 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1142 LIST_REMOVE(bp, b_hash); 1143 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1144 bp->b_dev = NODEV; 1145 1146 /* buffers that are locked */ 1147 } else if (bp->b_flags & B_LOCKED) { 1148 bp->b_qindex = QUEUE_LOCKED; 1149 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 1150 1151 /* remaining buffers */ 1152 } else { 1153 switch(bp->b_flags & (B_DELWRI|B_AGE)) { 1154 case B_DELWRI | B_AGE: 1155 bp->b_qindex = QUEUE_DIRTY; 1156 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1157 break; 1158 case B_DELWRI: 1159 bp->b_qindex = QUEUE_DIRTY; 1160 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1161 break; 1162 case B_AGE: 1163 bp->b_qindex = QUEUE_CLEAN; 1164 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1165 if (bp->b_kvasize) 1166 kvawakeup = 1; 1167 break; 1168 default: 1169 bp->b_qindex = QUEUE_CLEAN; 1170 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1171 if (bp->b_kvasize) 1172 kvawakeup = 1; 1173 break; 1174 } 1175 } 1176 1177 /* 1178 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1179 * on the correct queue. 1180 */ 1181 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) { 1182 bp->b_flags &= ~B_DELWRI; 1183 --numdirtybuffers; 1184 numdirtywakeup(); 1185 } 1186 1187 runningbufspace -= bp->b_bufsize; 1188 1189 /* 1190 * Fixup numfreebuffers count. The bp is on an appropriate queue 1191 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 1192 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1193 * if B_INVAL is set ). 1194 */ 1195 1196 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 1197 bufcountwakeup(); 1198 1199 /* 1200 * Something we can maybe free. 1201 */ 1202 1203 if (bp->b_bufsize) 1204 bufspacewakeup(); 1205 if (kvawakeup) 1206 kvaspacewakeup(); 1207 1208 /* unlock */ 1209 BUF_UNLOCK(bp); 1210 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1211 splx(s); 1212 } 1213 1214 /* 1215 * Release a buffer back to the appropriate queue but do not try to free 1216 * it. 1217 * 1218 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1219 * biodone() to requeue an async I/O on completion. It is also used when 1220 * known good buffers need to be requeued but we think we may need the data 1221 * again soon. 1222 */ 1223 void 1224 bqrelse(struct buf * bp) 1225 { 1226 int s; 1227 1228 s = splbio(); 1229 1230 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1231 1232 #if !defined(MAX_PERF) 1233 if (bp->b_qindex != QUEUE_NONE) 1234 panic("bqrelse: free buffer onto another queue???"); 1235 #endif 1236 if (BUF_REFCNT(bp) > 1) { 1237 /* do not release to free list */ 1238 panic("bqrelse: multiple refs"); 1239 BUF_UNLOCK(bp); 1240 splx(s); 1241 return; 1242 } 1243 if (bp->b_flags & B_LOCKED) { 1244 bp->b_flags &= ~B_ERROR; 1245 bp->b_qindex = QUEUE_LOCKED; 1246 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 1247 /* buffers with stale but valid contents */ 1248 } else if (bp->b_flags & B_DELWRI) { 1249 bp->b_qindex = QUEUE_DIRTY; 1250 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist); 1251 } else { 1252 bp->b_qindex = QUEUE_CLEAN; 1253 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist); 1254 } 1255 1256 runningbufspace -= bp->b_bufsize; 1257 1258 if ((bp->b_flags & B_LOCKED) == 0 && 1259 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) { 1260 bufcountwakeup(); 1261 } 1262 1263 /* 1264 * Something we can maybe wakeup 1265 */ 1266 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1267 bufspacewakeup(); 1268 1269 /* unlock */ 1270 BUF_UNLOCK(bp); 1271 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1272 splx(s); 1273 } 1274 1275 static void 1276 vfs_vmio_release(bp) 1277 struct buf *bp; 1278 { 1279 int i, s; 1280 vm_page_t m; 1281 1282 s = splvm(); 1283 for (i = 0; i < bp->b_npages; i++) { 1284 m = bp->b_pages[i]; 1285 bp->b_pages[i] = NULL; 1286 /* 1287 * In order to keep page LRU ordering consistent, put 1288 * everything on the inactive queue. 1289 */ 1290 vm_page_unwire(m, 0); 1291 /* 1292 * We don't mess with busy pages, it is 1293 * the responsibility of the process that 1294 * busied the pages to deal with them. 1295 */ 1296 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1297 continue; 1298 1299 if (m->wire_count == 0) { 1300 vm_page_flag_clear(m, PG_ZERO); 1301 /* 1302 * Might as well free the page if we can and it has 1303 * no valid data. 1304 */ 1305 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) { 1306 vm_page_busy(m); 1307 vm_page_protect(m, VM_PROT_NONE); 1308 vm_page_free(m); 1309 } 1310 } 1311 } 1312 bufspace -= bp->b_bufsize; 1313 vmiospace -= bp->b_bufsize; 1314 runningbufspace -= bp->b_bufsize; 1315 splx(s); 1316 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 1317 if (bp->b_bufsize) 1318 bufspacewakeup(); 1319 bp->b_npages = 0; 1320 bp->b_bufsize = 0; 1321 bp->b_flags &= ~B_VMIO; 1322 if (bp->b_vp) 1323 brelvp(bp); 1324 } 1325 1326 /* 1327 * Check to see if a block is currently memory resident. 1328 */ 1329 struct buf * 1330 gbincore(struct vnode * vp, daddr_t blkno) 1331 { 1332 struct buf *bp; 1333 struct bufhashhdr *bh; 1334 1335 bh = bufhash(vp, blkno); 1336 1337 /* Search hash chain */ 1338 LIST_FOREACH(bp, bh, b_hash) { 1339 /* hit */ 1340 if (bp->b_vp == vp && bp->b_lblkno == blkno && 1341 (bp->b_flags & B_INVAL) == 0) { 1342 break; 1343 } 1344 } 1345 return (bp); 1346 } 1347 1348 /* 1349 * vfs_bio_awrite: 1350 * 1351 * Implement clustered async writes for clearing out B_DELWRI buffers. 1352 * This is much better then the old way of writing only one buffer at 1353 * a time. Note that we may not be presented with the buffers in the 1354 * correct order, so we search for the cluster in both directions. 1355 */ 1356 int 1357 vfs_bio_awrite(struct buf * bp) 1358 { 1359 int i; 1360 int j; 1361 daddr_t lblkno = bp->b_lblkno; 1362 struct vnode *vp = bp->b_vp; 1363 int s; 1364 int ncl; 1365 struct buf *bpa; 1366 int nwritten; 1367 int size; 1368 int maxcl; 1369 1370 s = splbio(); 1371 /* 1372 * right now we support clustered writing only to regular files. If 1373 * we find a clusterable block we could be in the middle of a cluster 1374 * rather then at the beginning. 1375 */ 1376 if ((vp->v_type == VREG) && 1377 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1378 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1379 1380 size = vp->v_mount->mnt_stat.f_iosize; 1381 maxcl = MAXPHYS / size; 1382 1383 for (i = 1; i < maxcl; i++) { 1384 if ((bpa = gbincore(vp, lblkno + i)) && 1385 BUF_REFCNT(bpa) == 0 && 1386 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1387 (B_DELWRI | B_CLUSTEROK)) && 1388 (bpa->b_bufsize == size)) { 1389 if ((bpa->b_blkno == bpa->b_lblkno) || 1390 (bpa->b_blkno != 1391 bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1392 break; 1393 } else { 1394 break; 1395 } 1396 } 1397 for (j = 1; i + j <= maxcl && j <= lblkno; j++) { 1398 if ((bpa = gbincore(vp, lblkno - j)) && 1399 BUF_REFCNT(bpa) == 0 && 1400 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1401 (B_DELWRI | B_CLUSTEROK)) && 1402 (bpa->b_bufsize == size)) { 1403 if ((bpa->b_blkno == bpa->b_lblkno) || 1404 (bpa->b_blkno != 1405 bp->b_blkno - ((j * size) >> DEV_BSHIFT))) 1406 break; 1407 } else { 1408 break; 1409 } 1410 } 1411 --j; 1412 ncl = i + j; 1413 /* 1414 * this is a possible cluster write 1415 */ 1416 if (ncl != 1) { 1417 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl); 1418 splx(s); 1419 return nwritten; 1420 } 1421 } 1422 1423 BUF_LOCK(bp, LK_EXCLUSIVE); 1424 bremfree(bp); 1425 bp->b_flags |= B_ASYNC; 1426 1427 splx(s); 1428 /* 1429 * default (old) behavior, writing out only one block 1430 * 1431 * XXX returns b_bufsize instead of b_bcount for nwritten? 1432 */ 1433 nwritten = bp->b_bufsize; 1434 (void) VOP_BWRITE(bp->b_vp, bp); 1435 1436 return nwritten; 1437 } 1438 1439 /* 1440 * getnewbuf: 1441 * 1442 * Find and initialize a new buffer header, freeing up existing buffers 1443 * in the bufqueues as necessary. The new buffer is returned locked. 1444 * 1445 * Important: B_INVAL is not set. If the caller wishes to throw the 1446 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1447 * 1448 * We block if: 1449 * We have insufficient buffer headers 1450 * We have insufficient buffer space 1451 * buffer_map is too fragmented ( space reservation fails ) 1452 * If we have to flush dirty buffers ( but we try to avoid this ) 1453 * 1454 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1455 * Instead we ask the buf daemon to do it for us. We attempt to 1456 * avoid piecemeal wakeups of the pageout daemon. 1457 */ 1458 1459 static struct buf * 1460 getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 1461 { 1462 struct buf *bp; 1463 struct buf *nbp; 1464 struct buf *dbp; 1465 int outofspace; 1466 int nqindex; 1467 int defrag = 0; 1468 1469 ++getnewbufcalls; 1470 --getnewbufrestarts; 1471 restart: 1472 ++getnewbufrestarts; 1473 1474 /* 1475 * Calculate whether we are out of buffer space. This state is 1476 * recalculated on every restart. If we are out of space, we 1477 * have to turn off defragmentation. Setting defrag to -1 when 1478 * outofspace is positive means "defrag while freeing buffers". 1479 * The looping conditional will be muffed up if defrag is left 1480 * positive when outofspace is positive. 1481 */ 1482 1483 dbp = NULL; 1484 outofspace = 0; 1485 if (bufspace >= hibufspace) { 1486 if ((curproc && (curproc->p_flag & P_BUFEXHAUST) == 0) || 1487 bufspace >= maxbufspace) { 1488 outofspace = 1; 1489 if (defrag > 0) 1490 defrag = -1; 1491 } 1492 } 1493 1494 /* 1495 * defrag state is semi-persistant. 1 means we are flagged for 1496 * defragging. -1 means we actually defragged something. 1497 */ 1498 /* nop */ 1499 1500 /* 1501 * Setup for scan. If we do not have enough free buffers, 1502 * we setup a degenerate case that immediately fails. Note 1503 * that if we are specially marked process, we are allowed to 1504 * dip into our reserves. 1505 * 1506 * Normally we want to find an EMPTYKVA buffer. That is, a 1507 * buffer with kva already allocated. If there are no EMPTYKVA 1508 * buffers we back up to the truely EMPTY buffers. When defragging 1509 * we do not bother backing up since we have to locate buffers with 1510 * kva to defrag. If we are out of space we skip both EMPTY and 1511 * EMPTYKVA and dig right into the CLEAN queue. 1512 * 1513 * In this manner we avoid scanning unnecessary buffers. It is very 1514 * important for us to do this because the buffer cache is almost 1515 * constantly out of space or in need of defragmentation. 1516 */ 1517 1518 if (curproc && (curproc->p_flag & P_BUFEXHAUST) == 0 && 1519 numfreebuffers < lofreebuffers) { 1520 nqindex = QUEUE_CLEAN; 1521 nbp = NULL; 1522 } else { 1523 nqindex = QUEUE_EMPTYKVA; 1524 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]); 1525 if (nbp == NULL) { 1526 if (defrag <= 0) { 1527 nqindex = QUEUE_EMPTY; 1528 nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1529 } 1530 } 1531 if (outofspace || nbp == NULL) { 1532 nqindex = QUEUE_CLEAN; 1533 nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]); 1534 } 1535 } 1536 1537 /* 1538 * Run scan, possibly freeing data and/or kva mappings on the fly 1539 * depending. 1540 */ 1541 1542 while ((bp = nbp) != NULL) { 1543 int qindex = nqindex; 1544 1545 /* 1546 * Calculate next bp ( we can only use it if we do not block 1547 * or do other fancy things ). 1548 */ 1549 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1550 switch(qindex) { 1551 case QUEUE_EMPTY: 1552 nqindex = QUEUE_EMPTYKVA; 1553 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]))) 1554 break; 1555 /* fall through */ 1556 case QUEUE_EMPTYKVA: 1557 nqindex = QUEUE_CLEAN; 1558 if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]))) 1559 break; 1560 /* fall through */ 1561 case QUEUE_CLEAN: 1562 /* 1563 * nbp is NULL. 1564 */ 1565 break; 1566 } 1567 } 1568 1569 /* 1570 * Sanity Checks 1571 */ 1572 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1573 1574 /* 1575 * Note: we no longer distinguish between VMIO and non-VMIO 1576 * buffers. 1577 */ 1578 1579 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); 1580 1581 /* 1582 * If we are defragging and the buffer isn't useful for fixing 1583 * that problem we continue. If we are out of space and the 1584 * buffer isn't useful for fixing that problem we continue. 1585 */ 1586 1587 if (defrag > 0 && bp->b_kvasize == 0) 1588 continue; 1589 if (outofspace > 0 && bp->b_bufsize == 0) 1590 continue; 1591 1592 /* 1593 * Start freeing the bp. This is somewhat involved. nbp 1594 * remains valid only for QUEUE_EMPTY[KVA] bp's. 1595 */ 1596 1597 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1598 panic("getnewbuf: locked buf"); 1599 bremfree(bp); 1600 1601 if (qindex == QUEUE_CLEAN) { 1602 if (bp->b_flags & B_VMIO) { 1603 bp->b_flags &= ~B_ASYNC; 1604 vfs_vmio_release(bp); 1605 } 1606 if (bp->b_vp) 1607 brelvp(bp); 1608 } 1609 1610 /* 1611 * NOTE: nbp is now entirely invalid. We can only restart 1612 * the scan from this point on. 1613 * 1614 * Get the rest of the buffer freed up. b_kva* is still 1615 * valid after this operation. 1616 */ 1617 1618 if (bp->b_rcred != NOCRED) { 1619 crfree(bp->b_rcred); 1620 bp->b_rcred = NOCRED; 1621 } 1622 if (bp->b_wcred != NOCRED) { 1623 crfree(bp->b_wcred); 1624 bp->b_wcred = NOCRED; 1625 } 1626 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1627 (*bioops.io_deallocate)(bp); 1628 if (bp->b_xflags & BX_BKGRDINPROG) 1629 panic("losing buffer 3"); 1630 LIST_REMOVE(bp, b_hash); 1631 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1632 1633 if (bp->b_bufsize) 1634 allocbuf(bp, 0); 1635 1636 bp->b_flags = 0; 1637 bp->b_xflags = 0; 1638 bp->b_dev = NODEV; 1639 bp->b_vp = NULL; 1640 bp->b_blkno = bp->b_lblkno = 0; 1641 bp->b_offset = NOOFFSET; 1642 bp->b_iodone = 0; 1643 bp->b_error = 0; 1644 bp->b_resid = 0; 1645 bp->b_bcount = 0; 1646 bp->b_npages = 0; 1647 bp->b_dirtyoff = bp->b_dirtyend = 0; 1648 1649 LIST_INIT(&bp->b_dep); 1650 1651 /* 1652 * Ok, now that we have a free buffer, if we are defragging 1653 * we have to recover the kvaspace. If we are out of space 1654 * we have to free the buffer (which we just did), but we 1655 * do not have to recover kva space unless we hit a defrag 1656 * hicup. Being able to avoid freeing the kva space leads 1657 * to a significant reduction in overhead. 1658 */ 1659 1660 if (defrag > 0) { 1661 defrag = -1; 1662 bp->b_flags |= B_INVAL; 1663 bfreekva(bp); 1664 brelse(bp); 1665 goto restart; 1666 } 1667 1668 if (outofspace > 0) { 1669 outofspace = -1; 1670 bp->b_flags |= B_INVAL; 1671 if (defrag < 0) 1672 bfreekva(bp); 1673 brelse(bp); 1674 goto restart; 1675 } 1676 1677 /* 1678 * We are done 1679 */ 1680 break; 1681 } 1682 1683 /* 1684 * If we exhausted our list, sleep as appropriate. We may have to 1685 * wakeup various daemons and write out some dirty buffers. 1686 * 1687 * Generally we are sleeping due to insufficient buffer space. 1688 */ 1689 1690 if (bp == NULL) { 1691 int flags; 1692 char *waitmsg; 1693 1694 if (defrag > 0) { 1695 flags = VFS_BIO_NEED_KVASPACE; 1696 waitmsg = "nbufkv"; 1697 } else if (outofspace > 0) { 1698 waitmsg = "nbufbs"; 1699 flags = VFS_BIO_NEED_BUFSPACE; 1700 } else { 1701 waitmsg = "newbuf"; 1702 flags = VFS_BIO_NEED_ANY; 1703 } 1704 1705 bd_speedup(); /* heeeelp */ 1706 1707 needsbuffer |= flags; 1708 while (needsbuffer & flags) { 1709 if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, 1710 waitmsg, slptimeo)) 1711 return (NULL); 1712 } 1713 } else { 1714 /* 1715 * We finally have a valid bp. We aren't quite out of the 1716 * woods, we still have to reserve kva space. 1717 */ 1718 vm_offset_t addr = 0; 1719 1720 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1721 1722 if (maxsize != bp->b_kvasize) { 1723 bfreekva(bp); 1724 1725 if (vm_map_findspace(buffer_map, 1726 vm_map_min(buffer_map), maxsize, &addr)) { 1727 /* 1728 * Uh oh. Buffer map is to fragmented. Try 1729 * to defragment. 1730 */ 1731 if (defrag <= 0) { 1732 defrag = 1; 1733 bp->b_flags |= B_INVAL; 1734 brelse(bp); 1735 goto restart; 1736 } 1737 /* 1738 * Uh oh. We couldn't seem to defragment 1739 */ 1740 panic("getnewbuf: unreachable code reached"); 1741 } 1742 } 1743 if (addr) { 1744 vm_map_insert(buffer_map, NULL, 0, 1745 addr, addr + maxsize, 1746 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1747 1748 bp->b_kvabase = (caddr_t) addr; 1749 bp->b_kvasize = maxsize; 1750 } 1751 bp->b_data = bp->b_kvabase; 1752 } 1753 return(bp); 1754 } 1755 1756 /* 1757 * waitfreebuffers: 1758 * 1759 * Wait for sufficient free buffers. Only called from normal processes. 1760 */ 1761 1762 static void 1763 waitfreebuffers(int slpflag, int slptimeo) 1764 { 1765 while (numfreebuffers < hifreebuffers) { 1766 if (numfreebuffers >= hifreebuffers) 1767 break; 1768 needsbuffer |= VFS_BIO_NEED_FREE; 1769 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1770 break; 1771 } 1772 } 1773 1774 /* 1775 * buf_daemon: 1776 * 1777 * buffer flushing daemon. Buffers are normally flushed by the 1778 * update daemon but if it cannot keep up this process starts to 1779 * take the load in an attempt to prevent getnewbuf() from blocking. 1780 */ 1781 1782 static struct proc *bufdaemonproc; 1783 static int bd_interval; 1784 static int bd_flushto; 1785 static int bd_flushinc; 1786 1787 static struct kproc_desc buf_kp = { 1788 "bufdaemon", 1789 buf_daemon, 1790 &bufdaemonproc 1791 }; 1792 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp) 1793 1794 static void 1795 buf_daemon() 1796 { 1797 int s; 1798 1799 /* 1800 * This process needs to be suspended prior to shutdown sync. 1801 */ 1802 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, bufdaemonproc, 1803 SHUTDOWN_PRI_LAST); 1804 1805 /* 1806 * This process is allowed to take the buffer cache to the limit 1807 */ 1808 curproc->p_flag |= P_BUFEXHAUST; 1809 s = splbio(); 1810 1811 bd_interval = 5 * hz; /* dynamically adjusted */ 1812 bd_flushto = hidirtybuffers; /* dynamically adjusted */ 1813 bd_flushinc = 1; 1814 1815 for (;;) { 1816 kproc_suspend_loop(bufdaemonproc); 1817 1818 bd_request = 0; 1819 1820 /* 1821 * Do the flush. Limit the number of buffers we flush in one 1822 * go. The failure condition occurs when processes are writing 1823 * buffers faster then we can dispose of them. In this case 1824 * we may be flushing so often that the previous set of flushes 1825 * have not had time to complete, causing us to run out of 1826 * physical buffers and block. 1827 */ 1828 { 1829 int runcount = maxbdrun; 1830 1831 while (numdirtybuffers > bd_flushto && runcount) { 1832 --runcount; 1833 if (flushbufqueues() == 0) 1834 break; 1835 } 1836 } 1837 1838 if (bd_request || 1839 tsleep(&bd_request, PVM, "psleep", bd_interval) == 0) { 1840 /* 1841 * Another request is pending or we were woken up 1842 * without timing out. Flush more. 1843 */ 1844 --bd_flushto; 1845 if (bd_flushto >= numdirtybuffers - 5) { 1846 bd_flushto = numdirtybuffers - 10; 1847 bd_flushinc = 1; 1848 } 1849 if (bd_flushto < 2) 1850 bd_flushto = 2; 1851 } else { 1852 /* 1853 * We slept and timed out, we can slow down. 1854 */ 1855 bd_flushto += bd_flushinc; 1856 if (bd_flushto > hidirtybuffers) 1857 bd_flushto = hidirtybuffers; 1858 ++bd_flushinc; 1859 if (bd_flushinc > hidirtybuffers / 20 + 1) 1860 bd_flushinc = hidirtybuffers / 20 + 1; 1861 } 1862 1863 /* 1864 * Set the interval on a linear scale based on hidirtybuffers 1865 * with a maximum frequency of 1/10 second. 1866 */ 1867 bd_interval = bd_flushto * 5 * hz / hidirtybuffers; 1868 if (bd_interval < hz / 10) 1869 bd_interval = hz / 10; 1870 } 1871 } 1872 1873 /* 1874 * flushbufqueues: 1875 * 1876 * Try to flush a buffer in the dirty queue. We must be careful to 1877 * free up B_INVAL buffers instead of write them, which NFS is 1878 * particularly sensitive to. 1879 */ 1880 1881 static int 1882 flushbufqueues(void) 1883 { 1884 struct buf *bp; 1885 int r = 0; 1886 1887 bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]); 1888 1889 while (bp) { 1890 KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp)); 1891 if ((bp->b_flags & B_DELWRI) != 0 && 1892 (bp->b_xflags & BX_BKGRDINPROG) == 0) { 1893 if (bp->b_flags & B_INVAL) { 1894 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1895 panic("flushbufqueues: locked buf"); 1896 bremfree(bp); 1897 brelse(bp); 1898 ++r; 1899 break; 1900 } 1901 if (LIST_FIRST(&bp->b_dep) != NULL && 1902 bioops.io_countdeps && 1903 (bp->b_flags & B_DEFERRED) == 0 && 1904 (*bioops.io_countdeps)(bp, 0)) { 1905 TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY], 1906 bp, b_freelist); 1907 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], 1908 bp, b_freelist); 1909 bp->b_flags |= B_DEFERRED; 1910 bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]); 1911 continue; 1912 } 1913 vfs_bio_awrite(bp); 1914 ++r; 1915 break; 1916 } 1917 bp = TAILQ_NEXT(bp, b_freelist); 1918 } 1919 return (r); 1920 } 1921 1922 /* 1923 * Check to see if a block is currently memory resident. 1924 */ 1925 struct buf * 1926 incore(struct vnode * vp, daddr_t blkno) 1927 { 1928 struct buf *bp; 1929 1930 int s = splbio(); 1931 bp = gbincore(vp, blkno); 1932 splx(s); 1933 return (bp); 1934 } 1935 1936 /* 1937 * Returns true if no I/O is needed to access the 1938 * associated VM object. This is like incore except 1939 * it also hunts around in the VM system for the data. 1940 */ 1941 1942 int 1943 inmem(struct vnode * vp, daddr_t blkno) 1944 { 1945 vm_object_t obj; 1946 vm_offset_t toff, tinc, size; 1947 vm_page_t m; 1948 vm_ooffset_t off; 1949 1950 if (incore(vp, blkno)) 1951 return 1; 1952 if (vp->v_mount == NULL) 1953 return 0; 1954 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1955 return 0; 1956 1957 obj = vp->v_object; 1958 size = PAGE_SIZE; 1959 if (size > vp->v_mount->mnt_stat.f_iosize) 1960 size = vp->v_mount->mnt_stat.f_iosize; 1961 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 1962 1963 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1964 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1965 if (!m) 1966 return 0; 1967 tinc = size; 1968 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 1969 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 1970 if (vm_page_is_valid(m, 1971 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1972 return 0; 1973 } 1974 return 1; 1975 } 1976 1977 /* 1978 * vfs_setdirty: 1979 * 1980 * Sets the dirty range for a buffer based on the status of the dirty 1981 * bits in the pages comprising the buffer. 1982 * 1983 * The range is limited to the size of the buffer. 1984 * 1985 * This routine is primarily used by NFS, but is generalized for the 1986 * B_VMIO case. 1987 */ 1988 static void 1989 vfs_setdirty(struct buf *bp) 1990 { 1991 int i; 1992 vm_object_t object; 1993 1994 /* 1995 * Degenerate case - empty buffer 1996 */ 1997 1998 if (bp->b_bufsize == 0) 1999 return; 2000 2001 /* 2002 * We qualify the scan for modified pages on whether the 2003 * object has been flushed yet. The OBJ_WRITEABLE flag 2004 * is not cleared simply by protecting pages off. 2005 */ 2006 2007 if ((bp->b_flags & B_VMIO) == 0) 2008 return; 2009 2010 object = bp->b_pages[0]->object; 2011 2012 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 2013 printf("Warning: object %p writeable but not mightbedirty\n", object); 2014 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 2015 printf("Warning: object %p mightbedirty but not writeable\n", object); 2016 2017 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 2018 vm_offset_t boffset; 2019 vm_offset_t eoffset; 2020 2021 /* 2022 * test the pages to see if they have been modified directly 2023 * by users through the VM system. 2024 */ 2025 for (i = 0; i < bp->b_npages; i++) { 2026 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2027 vm_page_test_dirty(bp->b_pages[i]); 2028 } 2029 2030 /* 2031 * Calculate the encompassing dirty range, boffset and eoffset, 2032 * (eoffset - boffset) bytes. 2033 */ 2034 2035 for (i = 0; i < bp->b_npages; i++) { 2036 if (bp->b_pages[i]->dirty) 2037 break; 2038 } 2039 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2040 2041 for (i = bp->b_npages - 1; i >= 0; --i) { 2042 if (bp->b_pages[i]->dirty) { 2043 break; 2044 } 2045 } 2046 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2047 2048 /* 2049 * Fit it to the buffer. 2050 */ 2051 2052 if (eoffset > bp->b_bcount) 2053 eoffset = bp->b_bcount; 2054 2055 /* 2056 * If we have a good dirty range, merge with the existing 2057 * dirty range. 2058 */ 2059 2060 if (boffset < eoffset) { 2061 if (bp->b_dirtyoff > boffset) 2062 bp->b_dirtyoff = boffset; 2063 if (bp->b_dirtyend < eoffset) 2064 bp->b_dirtyend = eoffset; 2065 } 2066 } 2067 } 2068 2069 /* 2070 * getblk: 2071 * 2072 * Get a block given a specified block and offset into a file/device. 2073 * The buffers B_DONE bit will be cleared on return, making it almost 2074 * ready for an I/O initiation. B_INVAL may or may not be set on 2075 * return. The caller should clear B_INVAL prior to initiating a 2076 * READ. 2077 * 2078 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2079 * an existing buffer. 2080 * 2081 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2082 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2083 * and then cleared based on the backing VM. If the previous buffer is 2084 * non-0-sized but invalid, B_CACHE will be cleared. 2085 * 2086 * If getblk() must create a new buffer, the new buffer is returned with 2087 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2088 * case it is returned with B_INVAL clear and B_CACHE set based on the 2089 * backing VM. 2090 * 2091 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos 2092 * B_CACHE bit is clear. 2093 * 2094 * What this means, basically, is that the caller should use B_CACHE to 2095 * determine whether the buffer is fully valid or not and should clear 2096 * B_INVAL prior to issuing a read. If the caller intends to validate 2097 * the buffer by loading its data area with something, the caller needs 2098 * to clear B_INVAL. If the caller does this without issuing an I/O, 2099 * the caller should set B_CACHE ( as an optimization ), else the caller 2100 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2101 * a write attempt or if it was a successfull read. If the caller 2102 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2103 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2104 */ 2105 struct buf * 2106 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 2107 { 2108 struct buf *bp; 2109 int s; 2110 struct bufhashhdr *bh; 2111 2112 #if !defined(MAX_PERF) 2113 if (size > MAXBSIZE) 2114 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 2115 #endif 2116 2117 s = splbio(); 2118 loop: 2119 /* 2120 * Block if we are low on buffers. Certain processes are allowed 2121 * to completely exhaust the buffer cache. 2122 * 2123 * If this check ever becomes a bottleneck it may be better to 2124 * move it into the else, when gbincore() fails. At the moment 2125 * it isn't a problem. 2126 */ 2127 if (!curproc || (curproc->p_flag & P_BUFEXHAUST)) { 2128 if (numfreebuffers == 0) { 2129 if (!curproc) 2130 return NULL; 2131 needsbuffer |= VFS_BIO_NEED_ANY; 2132 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 2133 slptimeo); 2134 } 2135 } else if (numfreebuffers < lofreebuffers) { 2136 waitfreebuffers(slpflag, slptimeo); 2137 } 2138 2139 if ((bp = gbincore(vp, blkno))) { 2140 /* 2141 * Buffer is in-core. If the buffer is not busy, it must 2142 * be on a queue. 2143 */ 2144 2145 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2146 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL, 2147 "getblk", slpflag, slptimeo) == ENOLCK) 2148 goto loop; 2149 splx(s); 2150 return (struct buf *) NULL; 2151 } 2152 2153 /* 2154 * The buffer is locked. B_CACHE is cleared if the buffer is 2155 * invalid. Ohterwise, for a non-VMIO buffer, B_CACHE is set 2156 * and for a VMIO buffer B_CACHE is adjusted according to the 2157 * backing VM cache. 2158 */ 2159 if (bp->b_flags & B_INVAL) 2160 bp->b_flags &= ~B_CACHE; 2161 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 2162 bp->b_flags |= B_CACHE; 2163 bremfree(bp); 2164 2165 /* 2166 * check for size inconsistancies for non-VMIO case. 2167 */ 2168 2169 if (bp->b_bcount != size) { 2170 if ((bp->b_flags & B_VMIO) == 0 || 2171 (size > bp->b_kvasize)) { 2172 if (bp->b_flags & B_DELWRI) { 2173 bp->b_flags |= B_NOCACHE; 2174 VOP_BWRITE(bp->b_vp, bp); 2175 } else { 2176 if ((bp->b_flags & B_VMIO) && 2177 (LIST_FIRST(&bp->b_dep) == NULL)) { 2178 bp->b_flags |= B_RELBUF; 2179 brelse(bp); 2180 } else { 2181 bp->b_flags |= B_NOCACHE; 2182 VOP_BWRITE(bp->b_vp, bp); 2183 } 2184 } 2185 goto loop; 2186 } 2187 } 2188 2189 /* 2190 * If the size is inconsistant in the VMIO case, we can resize 2191 * the buffer. This might lead to B_CACHE getting set or 2192 * cleared. If the size has not changed, B_CACHE remains 2193 * unchanged from its previous state. 2194 */ 2195 2196 if (bp->b_bcount != size) 2197 allocbuf(bp, size); 2198 2199 KASSERT(bp->b_offset != NOOFFSET, 2200 ("getblk: no buffer offset")); 2201 2202 /* 2203 * A buffer with B_DELWRI set and B_CACHE clear must 2204 * be committed before we can return the buffer in 2205 * order to prevent the caller from issuing a read 2206 * ( due to B_CACHE not being set ) and overwriting 2207 * it. 2208 * 2209 * Most callers, including NFS and FFS, need this to 2210 * operate properly either because they assume they 2211 * can issue a read if B_CACHE is not set, or because 2212 * ( for example ) an uncached B_DELWRI might loop due 2213 * to softupdates re-dirtying the buffer. In the latter 2214 * case, B_CACHE is set after the first write completes, 2215 * preventing further loops. 2216 */ 2217 2218 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2219 VOP_BWRITE(bp->b_vp, bp); 2220 goto loop; 2221 } 2222 2223 splx(s); 2224 bp->b_flags &= ~B_DONE; 2225 } else { 2226 /* 2227 * Buffer is not in-core, create new buffer. The buffer 2228 * returned by getnewbuf() is locked. Note that the returned 2229 * buffer is also considered valid (not marked B_INVAL). 2230 */ 2231 int bsize, maxsize, vmio; 2232 off_t offset; 2233 2234 if (vn_isdisk(vp, NULL)) 2235 bsize = DEV_BSIZE; 2236 else if (vp->v_mountedhere) 2237 bsize = vp->v_mountedhere->mnt_stat.f_iosize; 2238 else if (vp->v_mount) 2239 bsize = vp->v_mount->mnt_stat.f_iosize; 2240 else 2241 bsize = size; 2242 2243 offset = (off_t)blkno * bsize; 2244 vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF); 2245 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 2246 maxsize = imax(maxsize, bsize); 2247 2248 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) { 2249 if (slpflag || slptimeo) { 2250 splx(s); 2251 return NULL; 2252 } 2253 goto loop; 2254 } 2255 2256 /* 2257 * This code is used to make sure that a buffer is not 2258 * created while the getnewbuf routine is blocked. 2259 * This can be a problem whether the vnode is locked or not. 2260 * If the buffer is created out from under us, we have to 2261 * throw away the one we just created. There is now window 2262 * race because we are safely running at splbio() from the 2263 * point of the duplicate buffer creation through to here, 2264 * and we've locked the buffer. 2265 */ 2266 if (gbincore(vp, blkno)) { 2267 bp->b_flags |= B_INVAL; 2268 brelse(bp); 2269 goto loop; 2270 } 2271 2272 /* 2273 * Insert the buffer into the hash, so that it can 2274 * be found by incore. 2275 */ 2276 bp->b_blkno = bp->b_lblkno = blkno; 2277 bp->b_offset = offset; 2278 2279 bgetvp(vp, bp); 2280 LIST_REMOVE(bp, b_hash); 2281 bh = bufhash(vp, blkno); 2282 LIST_INSERT_HEAD(bh, bp, b_hash); 2283 2284 /* 2285 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2286 * buffer size starts out as 0, B_CACHE will be set by 2287 * allocbuf() for the VMIO case prior to it testing the 2288 * backing store for validity. 2289 */ 2290 2291 if (vmio) { 2292 bp->b_flags |= B_VMIO; 2293 #if defined(VFS_BIO_DEBUG) 2294 if (vp->v_type != VREG && vp->v_type != VBLK) 2295 printf("getblk: vmioing file type %d???\n", vp->v_type); 2296 #endif 2297 } else { 2298 bp->b_flags &= ~B_VMIO; 2299 } 2300 2301 allocbuf(bp, size); 2302 2303 splx(s); 2304 bp->b_flags &= ~B_DONE; 2305 } 2306 return (bp); 2307 } 2308 2309 /* 2310 * Get an empty, disassociated buffer of given size. The buffer is initially 2311 * set to B_INVAL. 2312 */ 2313 struct buf * 2314 geteblk(int size) 2315 { 2316 struct buf *bp; 2317 int s; 2318 2319 s = splbio(); 2320 while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0); 2321 splx(s); 2322 allocbuf(bp, size); 2323 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2324 return (bp); 2325 } 2326 2327 2328 /* 2329 * This code constitutes the buffer memory from either anonymous system 2330 * memory (in the case of non-VMIO operations) or from an associated 2331 * VM object (in the case of VMIO operations). This code is able to 2332 * resize a buffer up or down. 2333 * 2334 * Note that this code is tricky, and has many complications to resolve 2335 * deadlock or inconsistant data situations. Tread lightly!!! 2336 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2337 * the caller. Calling this code willy nilly can result in the loss of data. 2338 * 2339 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2340 * B_CACHE for the non-VMIO case. 2341 */ 2342 2343 int 2344 allocbuf(struct buf *bp, int size) 2345 { 2346 int newbsize, mbsize; 2347 int i; 2348 2349 #if !defined(MAX_PERF) 2350 if (BUF_REFCNT(bp) == 0) 2351 panic("allocbuf: buffer not busy"); 2352 2353 if (bp->b_kvasize < size) 2354 panic("allocbuf: buffer too small"); 2355 #endif 2356 2357 if ((bp->b_flags & B_VMIO) == 0) { 2358 caddr_t origbuf; 2359 int origbufsize; 2360 /* 2361 * Just get anonymous memory from the kernel. Don't 2362 * mess with B_CACHE. 2363 */ 2364 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2365 #if !defined(NO_B_MALLOC) 2366 if (bp->b_flags & B_MALLOC) 2367 newbsize = mbsize; 2368 else 2369 #endif 2370 newbsize = round_page(size); 2371 2372 if (newbsize < bp->b_bufsize) { 2373 #if !defined(NO_B_MALLOC) 2374 /* 2375 * malloced buffers are not shrunk 2376 */ 2377 if (bp->b_flags & B_MALLOC) { 2378 if (newbsize) { 2379 bp->b_bcount = size; 2380 } else { 2381 free(bp->b_data, M_BIOBUF); 2382 bufspace -= bp->b_bufsize; 2383 bufmallocspace -= bp->b_bufsize; 2384 runningbufspace -= bp->b_bufsize; 2385 if (bp->b_bufsize) 2386 bufspacewakeup(); 2387 bp->b_data = bp->b_kvabase; 2388 bp->b_bufsize = 0; 2389 bp->b_bcount = 0; 2390 bp->b_flags &= ~B_MALLOC; 2391 } 2392 return 1; 2393 } 2394 #endif 2395 vm_hold_free_pages( 2396 bp, 2397 (vm_offset_t) bp->b_data + newbsize, 2398 (vm_offset_t) bp->b_data + bp->b_bufsize); 2399 } else if (newbsize > bp->b_bufsize) { 2400 #if !defined(NO_B_MALLOC) 2401 /* 2402 * We only use malloced memory on the first allocation. 2403 * and revert to page-allocated memory when the buffer 2404 * grows. 2405 */ 2406 if ( (bufmallocspace < maxbufmallocspace) && 2407 (bp->b_bufsize == 0) && 2408 (mbsize <= PAGE_SIZE/2)) { 2409 2410 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2411 bp->b_bufsize = mbsize; 2412 bp->b_bcount = size; 2413 bp->b_flags |= B_MALLOC; 2414 bufspace += mbsize; 2415 bufmallocspace += mbsize; 2416 runningbufspace += bp->b_bufsize; 2417 return 1; 2418 } 2419 #endif 2420 origbuf = NULL; 2421 origbufsize = 0; 2422 #if !defined(NO_B_MALLOC) 2423 /* 2424 * If the buffer is growing on its other-than-first allocation, 2425 * then we revert to the page-allocation scheme. 2426 */ 2427 if (bp->b_flags & B_MALLOC) { 2428 origbuf = bp->b_data; 2429 origbufsize = bp->b_bufsize; 2430 bp->b_data = bp->b_kvabase; 2431 bufspace -= bp->b_bufsize; 2432 bufmallocspace -= bp->b_bufsize; 2433 runningbufspace -= bp->b_bufsize; 2434 if (bp->b_bufsize) 2435 bufspacewakeup(); 2436 bp->b_bufsize = 0; 2437 bp->b_flags &= ~B_MALLOC; 2438 newbsize = round_page(newbsize); 2439 } 2440 #endif 2441 vm_hold_load_pages( 2442 bp, 2443 (vm_offset_t) bp->b_data + bp->b_bufsize, 2444 (vm_offset_t) bp->b_data + newbsize); 2445 #if !defined(NO_B_MALLOC) 2446 if (origbuf) { 2447 bcopy(origbuf, bp->b_data, origbufsize); 2448 free(origbuf, M_BIOBUF); 2449 } 2450 #endif 2451 } 2452 } else { 2453 vm_page_t m; 2454 int desiredpages; 2455 2456 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2457 desiredpages = (size == 0) ? 0 : 2458 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2459 2460 #if !defined(NO_B_MALLOC) 2461 if (bp->b_flags & B_MALLOC) 2462 panic("allocbuf: VMIO buffer can't be malloced"); 2463 #endif 2464 /* 2465 * Set B_CACHE initially if buffer is 0 length or will become 2466 * 0-length. 2467 */ 2468 if (size == 0 || bp->b_bufsize == 0) 2469 bp->b_flags |= B_CACHE; 2470 2471 if (newbsize < bp->b_bufsize) { 2472 /* 2473 * DEV_BSIZE aligned new buffer size is less then the 2474 * DEV_BSIZE aligned existing buffer size. Figure out 2475 * if we have to remove any pages. 2476 */ 2477 if (desiredpages < bp->b_npages) { 2478 for (i = desiredpages; i < bp->b_npages; i++) { 2479 /* 2480 * the page is not freed here -- it 2481 * is the responsibility of 2482 * vnode_pager_setsize 2483 */ 2484 m = bp->b_pages[i]; 2485 KASSERT(m != bogus_page, 2486 ("allocbuf: bogus page found")); 2487 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2488 ; 2489 2490 bp->b_pages[i] = NULL; 2491 vm_page_unwire(m, 0); 2492 } 2493 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2494 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 2495 bp->b_npages = desiredpages; 2496 } 2497 } else if (size > bp->b_bcount) { 2498 /* 2499 * We are growing the buffer, possibly in a 2500 * byte-granular fashion. 2501 */ 2502 struct vnode *vp; 2503 vm_object_t obj; 2504 vm_offset_t toff; 2505 vm_offset_t tinc; 2506 2507 /* 2508 * Step 1, bring in the VM pages from the object, 2509 * allocating them if necessary. We must clear 2510 * B_CACHE if these pages are not valid for the 2511 * range covered by the buffer. 2512 */ 2513 2514 vp = bp->b_vp; 2515 obj = vp->v_object; 2516 2517 while (bp->b_npages < desiredpages) { 2518 vm_page_t m; 2519 vm_pindex_t pi; 2520 2521 pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages; 2522 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2523 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL); 2524 if (m == NULL) { 2525 VM_WAIT; 2526 vm_pageout_deficit += desiredpages - bp->b_npages; 2527 } else { 2528 vm_page_wire(m); 2529 vm_page_wakeup(m); 2530 bp->b_flags &= ~B_CACHE; 2531 bp->b_pages[bp->b_npages] = m; 2532 ++bp->b_npages; 2533 } 2534 continue; 2535 } 2536 2537 /* 2538 * We found a page. If we have to sleep on it, 2539 * retry because it might have gotten freed out 2540 * from under us. 2541 * 2542 * We can only test PG_BUSY here. Blocking on 2543 * m->busy might lead to a deadlock: 2544 * 2545 * vm_fault->getpages->cluster_read->allocbuf 2546 * 2547 */ 2548 2549 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2550 continue; 2551 2552 /* 2553 * We have a good page. Should we wakeup the 2554 * page daemon? 2555 */ 2556 if ((curproc != pageproc) && 2557 ((m->queue - m->pc) == PQ_CACHE) && 2558 ((cnt.v_free_count + cnt.v_cache_count) < 2559 (cnt.v_free_min + cnt.v_cache_min))) { 2560 pagedaemon_wakeup(); 2561 } 2562 vm_page_flag_clear(m, PG_ZERO); 2563 vm_page_wire(m); 2564 bp->b_pages[bp->b_npages] = m; 2565 ++bp->b_npages; 2566 } 2567 2568 /* 2569 * Step 2. We've loaded the pages into the buffer, 2570 * we have to figure out if we can still have B_CACHE 2571 * set. Note that B_CACHE is set according to the 2572 * byte-granular range ( bcount and size ), new the 2573 * aligned range ( newbsize ). 2574 * 2575 * The VM test is against m->valid, which is DEV_BSIZE 2576 * aligned. Needless to say, the validity of the data 2577 * needs to also be DEV_BSIZE aligned. Note that this 2578 * fails with NFS if the server or some other client 2579 * extends the file's EOF. If our buffer is resized, 2580 * B_CACHE may remain set! XXX 2581 */ 2582 2583 toff = bp->b_bcount; 2584 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 2585 2586 while ((bp->b_flags & B_CACHE) && toff < size) { 2587 vm_pindex_t pi; 2588 2589 if (tinc > (size - toff)) 2590 tinc = size - toff; 2591 2592 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 2593 PAGE_SHIFT; 2594 2595 vfs_buf_test_cache( 2596 bp, 2597 bp->b_offset, 2598 toff, 2599 tinc, 2600 bp->b_pages[pi] 2601 ); 2602 toff += tinc; 2603 tinc = PAGE_SIZE; 2604 } 2605 2606 /* 2607 * Step 3, fixup the KVM pmap. Remember that 2608 * bp->b_data is relative to bp->b_offset, but 2609 * bp->b_offset may be offset into the first page. 2610 */ 2611 2612 bp->b_data = (caddr_t) 2613 trunc_page((vm_offset_t)bp->b_data); 2614 pmap_qenter( 2615 (vm_offset_t)bp->b_data, 2616 bp->b_pages, 2617 bp->b_npages 2618 ); 2619 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2620 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 2621 } 2622 } 2623 if (bp->b_flags & B_VMIO) 2624 vmiospace += (newbsize - bp->b_bufsize); 2625 bufspace += (newbsize - bp->b_bufsize); 2626 runningbufspace += (newbsize - bp->b_bufsize); 2627 if (newbsize < bp->b_bufsize) 2628 bufspacewakeup(); 2629 bp->b_bufsize = newbsize; /* actual buffer allocation */ 2630 bp->b_bcount = size; /* requested buffer size */ 2631 return 1; 2632 } 2633 2634 /* 2635 * biowait: 2636 * 2637 * Wait for buffer I/O completion, returning error status. The buffer 2638 * is left locked and B_DONE on return. B_EINTR is converted into a EINTR 2639 * error and cleared. 2640 */ 2641 int 2642 biowait(register struct buf * bp) 2643 { 2644 int s; 2645 2646 s = splbio(); 2647 while ((bp->b_flags & B_DONE) == 0) { 2648 #if defined(NO_SCHEDULE_MODS) 2649 tsleep(bp, PRIBIO, "biowait", 0); 2650 #else 2651 if (bp->b_flags & B_READ) 2652 tsleep(bp, PRIBIO, "biord", 0); 2653 else 2654 tsleep(bp, PRIBIO, "biowr", 0); 2655 #endif 2656 } 2657 splx(s); 2658 if (bp->b_flags & B_EINTR) { 2659 bp->b_flags &= ~B_EINTR; 2660 return (EINTR); 2661 } 2662 if (bp->b_flags & B_ERROR) { 2663 return (bp->b_error ? bp->b_error : EIO); 2664 } else { 2665 return (0); 2666 } 2667 } 2668 2669 /* 2670 * biodone: 2671 * 2672 * Finish I/O on a buffer, optionally calling a completion function. 2673 * This is usually called from an interrupt so process blocking is 2674 * not allowed. 2675 * 2676 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 2677 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 2678 * assuming B_INVAL is clear. 2679 * 2680 * For the VMIO case, we set B_CACHE if the op was a read and no 2681 * read error occured, or if the op was a write. B_CACHE is never 2682 * set if the buffer is invalid or otherwise uncacheable. 2683 * 2684 * biodone does not mess with B_INVAL, allowing the I/O routine or the 2685 * initiator to leave B_INVAL set to brelse the buffer out of existance 2686 * in the biodone routine. 2687 */ 2688 void 2689 biodone(register struct buf * bp) 2690 { 2691 int s; 2692 2693 s = splbio(); 2694 2695 KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp))); 2696 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2697 2698 bp->b_flags |= B_DONE; 2699 2700 if (bp->b_flags & B_FREEBUF) { 2701 brelse(bp); 2702 splx(s); 2703 return; 2704 } 2705 2706 if ((bp->b_flags & B_READ) == 0) { 2707 vwakeup(bp); 2708 } 2709 2710 /* call optional completion function if requested */ 2711 if (bp->b_flags & B_CALL) { 2712 bp->b_flags &= ~B_CALL; 2713 (*bp->b_iodone) (bp); 2714 splx(s); 2715 return; 2716 } 2717 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 2718 (*bioops.io_complete)(bp); 2719 2720 if (bp->b_flags & B_VMIO) { 2721 int i, resid; 2722 vm_ooffset_t foff; 2723 vm_page_t m; 2724 vm_object_t obj; 2725 int iosize; 2726 struct vnode *vp = bp->b_vp; 2727 2728 obj = vp->v_object; 2729 2730 #if defined(VFS_BIO_DEBUG) 2731 if (vp->v_usecount == 0) { 2732 panic("biodone: zero vnode ref count"); 2733 } 2734 2735 if (vp->v_object == NULL) { 2736 panic("biodone: missing VM object"); 2737 } 2738 2739 if ((vp->v_flag & VOBJBUF) == 0) { 2740 panic("biodone: vnode is not setup for merged cache"); 2741 } 2742 #endif 2743 2744 foff = bp->b_offset; 2745 KASSERT(bp->b_offset != NOOFFSET, 2746 ("biodone: no buffer offset")); 2747 2748 #if !defined(MAX_PERF) 2749 if (!obj) { 2750 panic("biodone: no object"); 2751 } 2752 #endif 2753 #if defined(VFS_BIO_DEBUG) 2754 if (obj->paging_in_progress < bp->b_npages) { 2755 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 2756 obj->paging_in_progress, bp->b_npages); 2757 } 2758 #endif 2759 2760 /* 2761 * Set B_CACHE if the op was a normal read and no error 2762 * occured. B_CACHE is set for writes in the b*write() 2763 * routines. 2764 */ 2765 iosize = bp->b_bcount - bp->b_resid; 2766 if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) { 2767 bp->b_flags |= B_CACHE; 2768 } 2769 2770 for (i = 0; i < bp->b_npages; i++) { 2771 int bogusflag = 0; 2772 m = bp->b_pages[i]; 2773 if (m == bogus_page) { 2774 bogusflag = 1; 2775 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2776 if (!m) { 2777 #if defined(VFS_BIO_DEBUG) 2778 printf("biodone: page disappeared\n"); 2779 #endif 2780 vm_object_pip_subtract(obj, 1); 2781 bp->b_flags &= ~B_CACHE; 2782 continue; 2783 } 2784 bp->b_pages[i] = m; 2785 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2786 } 2787 #if defined(VFS_BIO_DEBUG) 2788 if (OFF_TO_IDX(foff) != m->pindex) { 2789 printf( 2790 "biodone: foff(%lu)/m->pindex(%d) mismatch\n", 2791 (unsigned long)foff, m->pindex); 2792 } 2793 #endif 2794 resid = IDX_TO_OFF(m->pindex + 1) - foff; 2795 if (resid > iosize) 2796 resid = iosize; 2797 2798 /* 2799 * In the write case, the valid and clean bits are 2800 * already changed correctly ( see bdwrite() ), so we 2801 * only need to do this here in the read case. 2802 */ 2803 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 2804 vfs_page_set_valid(bp, foff, i, m); 2805 } 2806 vm_page_flag_clear(m, PG_ZERO); 2807 2808 /* 2809 * when debugging new filesystems or buffer I/O methods, this 2810 * is the most common error that pops up. if you see this, you 2811 * have not set the page busy flag correctly!!! 2812 */ 2813 if (m->busy == 0) { 2814 #if !defined(MAX_PERF) 2815 printf("biodone: page busy < 0, " 2816 "pindex: %d, foff: 0x(%x,%x), " 2817 "resid: %d, index: %d\n", 2818 (int) m->pindex, (int)(foff >> 32), 2819 (int) foff & 0xffffffff, resid, i); 2820 #endif 2821 if (!vn_isdisk(vp, NULL)) 2822 #if !defined(MAX_PERF) 2823 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2824 bp->b_vp->v_mount->mnt_stat.f_iosize, 2825 (int) bp->b_lblkno, 2826 bp->b_flags, bp->b_npages); 2827 else 2828 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2829 (int) bp->b_lblkno, 2830 bp->b_flags, bp->b_npages); 2831 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2832 m->valid, m->dirty, m->wire_count); 2833 #endif 2834 panic("biodone: page busy < 0\n"); 2835 } 2836 vm_page_io_finish(m); 2837 vm_object_pip_subtract(obj, 1); 2838 foff += resid; 2839 iosize -= resid; 2840 } 2841 if (obj) 2842 vm_object_pip_wakeupn(obj, 0); 2843 } 2844 /* 2845 * For asynchronous completions, release the buffer now. The brelse 2846 * will do a wakeup there if necessary - so no need to do a wakeup 2847 * here in the async case. The sync case always needs to do a wakeup. 2848 */ 2849 2850 if (bp->b_flags & B_ASYNC) { 2851 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2852 brelse(bp); 2853 else 2854 bqrelse(bp); 2855 } else { 2856 wakeup(bp); 2857 } 2858 splx(s); 2859 } 2860 2861 /* 2862 * This routine is called in lieu of iodone in the case of 2863 * incomplete I/O. This keeps the busy status for pages 2864 * consistant. 2865 */ 2866 void 2867 vfs_unbusy_pages(struct buf * bp) 2868 { 2869 int i; 2870 2871 if (bp->b_flags & B_VMIO) { 2872 struct vnode *vp = bp->b_vp; 2873 vm_object_t obj = vp->v_object; 2874 2875 for (i = 0; i < bp->b_npages; i++) { 2876 vm_page_t m = bp->b_pages[i]; 2877 2878 if (m == bogus_page) { 2879 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2880 #if !defined(MAX_PERF) 2881 if (!m) { 2882 panic("vfs_unbusy_pages: page missing\n"); 2883 } 2884 #endif 2885 bp->b_pages[i] = m; 2886 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2887 } 2888 vm_object_pip_subtract(obj, 1); 2889 vm_page_flag_clear(m, PG_ZERO); 2890 vm_page_io_finish(m); 2891 } 2892 vm_object_pip_wakeupn(obj, 0); 2893 } 2894 } 2895 2896 /* 2897 * vfs_page_set_valid: 2898 * 2899 * Set the valid bits in a page based on the supplied offset. The 2900 * range is restricted to the buffer's size. 2901 * 2902 * This routine is typically called after a read completes. 2903 */ 2904 static void 2905 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2906 { 2907 vm_ooffset_t soff, eoff; 2908 2909 /* 2910 * Start and end offsets in buffer. eoff - soff may not cross a 2911 * page boundry or cross the end of the buffer. The end of the 2912 * buffer, in this case, is our file EOF, not the allocation size 2913 * of the buffer. 2914 */ 2915 soff = off; 2916 eoff = (off + PAGE_SIZE) & ~PAGE_MASK; 2917 if (eoff > bp->b_offset + bp->b_bcount) 2918 eoff = bp->b_offset + bp->b_bcount; 2919 2920 /* 2921 * Set valid range. This is typically the entire buffer and thus the 2922 * entire page. 2923 */ 2924 if (eoff > soff) { 2925 vm_page_set_validclean( 2926 m, 2927 (vm_offset_t) (soff & PAGE_MASK), 2928 (vm_offset_t) (eoff - soff) 2929 ); 2930 } 2931 } 2932 2933 /* 2934 * This routine is called before a device strategy routine. 2935 * It is used to tell the VM system that paging I/O is in 2936 * progress, and treat the pages associated with the buffer 2937 * almost as being PG_BUSY. Also the object paging_in_progress 2938 * flag is handled to make sure that the object doesn't become 2939 * inconsistant. 2940 * 2941 * Since I/O has not been initiated yet, certain buffer flags 2942 * such as B_ERROR or B_INVAL may be in an inconsistant state 2943 * and should be ignored. 2944 */ 2945 void 2946 vfs_busy_pages(struct buf * bp, int clear_modify) 2947 { 2948 int i, bogus; 2949 2950 if (bp->b_flags & B_VMIO) { 2951 struct vnode *vp = bp->b_vp; 2952 vm_object_t obj = vp->v_object; 2953 vm_ooffset_t foff; 2954 2955 foff = bp->b_offset; 2956 KASSERT(bp->b_offset != NOOFFSET, 2957 ("vfs_busy_pages: no buffer offset")); 2958 vfs_setdirty(bp); 2959 2960 retry: 2961 for (i = 0; i < bp->b_npages; i++) { 2962 vm_page_t m = bp->b_pages[i]; 2963 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 2964 goto retry; 2965 } 2966 2967 bogus = 0; 2968 for (i = 0; i < bp->b_npages; i++) { 2969 vm_page_t m = bp->b_pages[i]; 2970 2971 vm_page_flag_clear(m, PG_ZERO); 2972 if ((bp->b_flags & B_CLUSTER) == 0) { 2973 vm_object_pip_add(obj, 1); 2974 vm_page_io_start(m); 2975 } 2976 2977 /* 2978 * When readying a buffer for a read ( i.e 2979 * clear_modify == 0 ), it is important to do 2980 * bogus_page replacement for valid pages in 2981 * partially instantiated buffers. Partially 2982 * instantiated buffers can, in turn, occur when 2983 * reconstituting a buffer from its VM backing store 2984 * base. We only have to do this if B_CACHE is 2985 * clear ( which causes the I/O to occur in the 2986 * first place ). The replacement prevents the read 2987 * I/O from overwriting potentially dirty VM-backed 2988 * pages. XXX bogus page replacement is, uh, bogus. 2989 * It may not work properly with small-block devices. 2990 * We need to find a better way. 2991 */ 2992 2993 vm_page_protect(m, VM_PROT_NONE); 2994 if (clear_modify) 2995 vfs_page_set_valid(bp, foff, i, m); 2996 else if (m->valid == VM_PAGE_BITS_ALL && 2997 (bp->b_flags & B_CACHE) == 0) { 2998 bp->b_pages[i] = bogus_page; 2999 bogus++; 3000 } 3001 foff = (foff + PAGE_SIZE) & ~PAGE_MASK; 3002 } 3003 if (bogus) 3004 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 3005 } 3006 } 3007 3008 /* 3009 * Tell the VM system that the pages associated with this buffer 3010 * are clean. This is used for delayed writes where the data is 3011 * going to go to disk eventually without additional VM intevention. 3012 * 3013 * Note that while we only really need to clean through to b_bcount, we 3014 * just go ahead and clean through to b_bufsize. 3015 */ 3016 static void 3017 vfs_clean_pages(struct buf * bp) 3018 { 3019 int i; 3020 3021 if (bp->b_flags & B_VMIO) { 3022 vm_ooffset_t foff; 3023 3024 foff = bp->b_offset; 3025 KASSERT(bp->b_offset != NOOFFSET, 3026 ("vfs_clean_pages: no buffer offset")); 3027 for (i = 0; i < bp->b_npages; i++) { 3028 vm_page_t m = bp->b_pages[i]; 3029 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK; 3030 vm_ooffset_t eoff = noff; 3031 3032 if (eoff > bp->b_offset + bp->b_bufsize) 3033 eoff = bp->b_offset + bp->b_bufsize; 3034 vfs_page_set_valid(bp, foff, i, m); 3035 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 3036 foff = noff; 3037 } 3038 } 3039 } 3040 3041 /* 3042 * vfs_bio_set_validclean: 3043 * 3044 * Set the range within the buffer to valid and clean. The range is 3045 * relative to the beginning of the buffer, b_offset. Note that b_offset 3046 * itself may be offset from the beginning of the first page. 3047 */ 3048 3049 void 3050 vfs_bio_set_validclean(struct buf *bp, int base, int size) 3051 { 3052 if (bp->b_flags & B_VMIO) { 3053 int i; 3054 int n; 3055 3056 /* 3057 * Fixup base to be relative to beginning of first page. 3058 * Set initial n to be the maximum number of bytes in the 3059 * first page that can be validated. 3060 */ 3061 3062 base += (bp->b_offset & PAGE_MASK); 3063 n = PAGE_SIZE - (base & PAGE_MASK); 3064 3065 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) { 3066 vm_page_t m = bp->b_pages[i]; 3067 3068 if (n > size) 3069 n = size; 3070 3071 vm_page_set_validclean(m, base & PAGE_MASK, n); 3072 base += n; 3073 size -= n; 3074 n = PAGE_SIZE; 3075 } 3076 } 3077 } 3078 3079 /* 3080 * vfs_bio_clrbuf: 3081 * 3082 * clear a buffer. This routine essentially fakes an I/O, so we need 3083 * to clear B_ERROR and B_INVAL. 3084 * 3085 * Note that while we only theoretically need to clear through b_bcount, 3086 * we go ahead and clear through b_bufsize. 3087 */ 3088 3089 void 3090 vfs_bio_clrbuf(struct buf *bp) { 3091 int i, mask = 0; 3092 caddr_t sa, ea; 3093 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 3094 bp->b_flags &= ~(B_INVAL|B_ERROR); 3095 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 3096 (bp->b_offset & PAGE_MASK) == 0) { 3097 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 3098 if (((bp->b_pages[0]->flags & PG_ZERO) == 0) && 3099 ((bp->b_pages[0]->valid & mask) != mask)) { 3100 bzero(bp->b_data, bp->b_bufsize); 3101 } 3102 bp->b_pages[0]->valid |= mask; 3103 bp->b_resid = 0; 3104 return; 3105 } 3106 ea = sa = bp->b_data; 3107 for(i=0;i<bp->b_npages;i++,sa=ea) { 3108 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 3109 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 3110 ea = (caddr_t)(vm_offset_t)ulmin( 3111 (u_long)(vm_offset_t)ea, 3112 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 3113 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 3114 if ((bp->b_pages[i]->valid & mask) == mask) 3115 continue; 3116 if ((bp->b_pages[i]->valid & mask) == 0) { 3117 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 3118 bzero(sa, ea - sa); 3119 } 3120 } else { 3121 for (; sa < ea; sa += DEV_BSIZE, j++) { 3122 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 3123 (bp->b_pages[i]->valid & (1<<j)) == 0) 3124 bzero(sa, DEV_BSIZE); 3125 } 3126 } 3127 bp->b_pages[i]->valid |= mask; 3128 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 3129 } 3130 bp->b_resid = 0; 3131 } else { 3132 clrbuf(bp); 3133 } 3134 } 3135 3136 /* 3137 * vm_hold_load_pages and vm_hold_unload pages get pages into 3138 * a buffers address space. The pages are anonymous and are 3139 * not associated with a file object. 3140 */ 3141 void 3142 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 3143 { 3144 vm_offset_t pg; 3145 vm_page_t p; 3146 int index; 3147 3148 to = round_page(to); 3149 from = round_page(from); 3150 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3151 3152 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3153 3154 tryagain: 3155 3156 p = vm_page_alloc(kernel_object, 3157 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 3158 VM_ALLOC_NORMAL); 3159 if (!p) { 3160 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 3161 VM_WAIT; 3162 goto tryagain; 3163 } 3164 vm_page_wire(p); 3165 p->valid = VM_PAGE_BITS_ALL; 3166 vm_page_flag_clear(p, PG_ZERO); 3167 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 3168 bp->b_pages[index] = p; 3169 vm_page_wakeup(p); 3170 } 3171 bp->b_npages = index; 3172 } 3173 3174 void 3175 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 3176 { 3177 vm_offset_t pg; 3178 vm_page_t p; 3179 int index, newnpages; 3180 3181 from = round_page(from); 3182 to = round_page(to); 3183 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3184 3185 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3186 p = bp->b_pages[index]; 3187 if (p && (index < bp->b_npages)) { 3188 #if !defined(MAX_PERF) 3189 if (p->busy) { 3190 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 3191 bp->b_blkno, bp->b_lblkno); 3192 } 3193 #endif 3194 bp->b_pages[index] = NULL; 3195 pmap_kremove(pg); 3196 vm_page_busy(p); 3197 vm_page_unwire(p, 0); 3198 vm_page_free(p); 3199 } 3200 } 3201 bp->b_npages = newnpages; 3202 } 3203 3204 3205 #include "opt_ddb.h" 3206 #ifdef DDB 3207 #include <ddb/ddb.h> 3208 3209 DB_SHOW_COMMAND(buffer, db_show_buffer) 3210 { 3211 /* get args */ 3212 struct buf *bp = (struct buf *)addr; 3213 3214 if (!have_addr) { 3215 db_printf("usage: show buffer <addr>\n"); 3216 return; 3217 } 3218 3219 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 3220 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 3221 "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, " 3222 "b_blkno = %d, b_pblkno = %d\n", 3223 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 3224 major(bp->b_dev), minor(bp->b_dev), 3225 bp->b_data, bp->b_blkno, bp->b_pblkno); 3226 if (bp->b_npages) { 3227 int i; 3228 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 3229 for (i = 0; i < bp->b_npages; i++) { 3230 vm_page_t m; 3231 m = bp->b_pages[i]; 3232 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 3233 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 3234 if ((i + 1) < bp->b_npages) 3235 db_printf(","); 3236 } 3237 db_printf("\n"); 3238 } 3239 } 3240 #endif /* DDB */ 3241