1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #ifndef _SYS_BUF_H_
38 #define _SYS_BUF_H_
39
40 #include <sys/_exterr.h>
41 #include <sys/bufobj.h>
42 #include <sys/queue.h>
43 #include <sys/lock.h>
44 #include <sys/lockmgr.h>
45 #include <vm/uma.h>
46
47 struct bio;
48 struct buf;
49 struct bufobj;
50 struct mount;
51 struct vnode;
52 struct uio;
53
54 /*
55 * To avoid including <ufs/ffs/softdep.h>
56 */
57 LIST_HEAD(workhead, worklist);
58 /*
59 * These are currently used only by the soft dependency code, hence
60 * are stored once in a global variable. If other subsystems wanted
61 * to use these hooks, a pointer to a set of bio_ops could be added
62 * to each buffer.
63 */
64 extern struct bio_ops {
65 void (*io_start)(struct buf *);
66 void (*io_complete)(struct buf *);
67 void (*io_deallocate)(struct buf *);
68 int (*io_countdeps)(struct buf *, int);
69 } bioops;
70
71 struct vm_object;
72 struct vm_page;
73
74 typedef uint32_t b_xflags_t;
75
76 /*
77 * The buffer header describes an I/O operation in the kernel.
78 *
79 * NOTES:
80 * b_bufsize, b_bcount. b_bufsize is the allocation size of the
81 * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the
82 * originally requested buffer size and can serve as a bounds check
83 * against EOF. For most, but not all uses, b_bcount == b_bufsize.
84 *
85 * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned
86 * ranges of dirty data that need to be written to backing store.
87 * The range is typically clipped at b_bcount ( not b_bufsize ).
88 *
89 * b_resid. Number of bytes remaining in I/O. After an I/O operation
90 * completes, b_resid is usually 0 indicating 100% success.
91 *
92 * All fields are protected by the buffer lock except those marked:
93 * V - Protected by owning bufobj lock
94 * Q - Protected by the buf queue lock
95 * D - Protected by an dependency implementation specific lock
96 */
97 struct buf {
98 struct bufobj *b_bufobj;
99 long b_bcount;
100 void *b_caller1;
101 caddr_t b_data;
102 uint16_t b_iocmd; /* BIO_* bio_cmd from bio.h */
103 uint16_t b_ioflags; /* BIO_* bio_flags from bio.h */
104 off_t b_iooffset;
105 long b_resid;
106 void (*b_iodone)(struct buf *);
107 void (*b_ckhashcalc)(struct buf *);
108 uint64_t b_ckhash; /* B_CKHASH requested check-hash */
109 daddr_t b_blkno; /* Underlying physical block number. */
110 off_t b_offset; /* Offset into file. */
111 TAILQ_ENTRY(buf) b_bobufs; /* (V) Buffer's associated vnode. */
112 uint32_t b_vflags; /* (V) BV_* flags */
113 uint8_t b_qindex; /* (Q) buffer queue index */
114 uint8_t b_domain; /* (Q) buf domain this resides in */
115 uint16_t b_subqueue; /* (Q) per-cpu q if any */
116 uint32_t b_flags; /* B_* flags. */
117 b_xflags_t b_xflags; /* extra flags */
118 struct lock b_lock; /* Buffer lock */
119 long b_bufsize; /* Allocated buffer size. */
120 int b_runningbufspace; /* when I/O is running, pipelining */
121 int b_kvasize; /* size of kva for buffer */
122 int b_dirtyoff; /* Offset in buffer of dirty region. */
123 int b_dirtyend; /* Offset of end of dirty region. */
124 caddr_t b_kvabase; /* base kva for buffer */
125 daddr_t b_lblkno; /* Logical block number. */
126 struct vnode *b_vp; /* Device vnode. */
127 struct ucred *b_rcred; /* Read credentials reference. */
128 struct ucred *b_wcred; /* Write credentials reference. */
129 union {
130 TAILQ_ENTRY(buf) b_freelist; /* (Q) */
131 struct {
132 void (*b_pgiodone)(void *, struct vm_page **,
133 int, int);
134 int b_pgbefore;
135 int b_pgafter;
136 };
137 };
138 union cluster_info {
139 TAILQ_HEAD(cluster_list_head, buf) cluster_head;
140 TAILQ_ENTRY(buf) cluster_entry;
141 } b_cluster;
142 int b_npages;
143 struct workhead b_dep; /* (D) List of filesystem dependencies. */
144 void *b_fsprivate1;
145 void *b_fsprivate2;
146 void *b_fsprivate3;
147
148 #if defined(FULL_BUF_TRACKING)
149 #define BUF_TRACKING_SIZE 32
150 #define BUF_TRACKING_ENTRY(x) ((x) & (BUF_TRACKING_SIZE - 1))
151 const char *b_io_tracking[BUF_TRACKING_SIZE];
152 uint32_t b_io_tcnt;
153 #elif defined(BUF_TRACKING)
154 const char *b_io_tracking;
155 #endif
156 struct kexterr b_exterr;
157 struct vm_page *b_pages[];
158 };
159
160 #define b_object b_bufobj->bo_object
161 #define b_error b_exterr.error
162
163 /*
164 * These flags are kept in b_flags.
165 *
166 * Notes:
167 *
168 * B_ASYNC VOP calls on bp's are usually async whether or not
169 * B_ASYNC is set, but some subsystems, such as NFS, like
170 * to know what is best for the caller so they can
171 * optimize the I/O.
172 *
173 * B_PAGING Indicates that bp is being used by the paging system or
174 * some paging system and that the bp is not linked into
175 * the b_vp's clean/dirty linked lists or ref counts.
176 * Buffer vp reassignments are illegal in this case.
177 *
178 * B_CACHE This may only be set if the buffer is entirely valid.
179 * The situation where B_DELWRI is set and B_CACHE is
180 * clear MUST be committed to disk by getblk() so
181 * B_DELWRI can also be cleared. See the comments for
182 * getblk() in kern/vfs_bio.c. If B_CACHE is clear,
183 * the caller is expected to clear BIO_ERROR and B_INVAL,
184 * set BIO_READ, and initiate an I/O.
185 *
186 * The 'entire buffer' is defined to be the range from
187 * 0 through b_bcount.
188 *
189 * B_MALLOC Request that the buffer be allocated from the malloc
190 * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
191 *
192 * B_CLUSTEROK This flag is typically set for B_DELWRI buffers
193 * by filesystems that allow clustering when the buffer
194 * is fully dirty and indicates that it may be clustered
195 * with other adjacent dirty buffers. Note the clustering
196 * may not be used with the stage 1 data write under NFS
197 * but may be used for the commit rpc portion.
198 *
199 * B_INVALONERR This flag is set on dirty buffers. It specifies that a
200 * write error should forcibly invalidate the buffer
201 * contents. This flag should be used with caution, as it
202 * discards data. It is incompatible with B_ASYNC.
203 *
204 * B_VMIO Indicates that the buffer is tied into an VM object.
205 * The buffer's data is always PAGE_SIZE aligned even
206 * if b_bufsize and b_bcount are not. ( b_bufsize is
207 * always at least DEV_BSIZE aligned, though ).
208 *
209 * B_DIRECT Hint that we should attempt to completely free
210 * the pages underlying the buffer. B_DIRECT is
211 * sticky until the buffer is released and typically
212 * only has an effect when B_RELBUF is also set.
213 *
214 */
215
216 #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
217 #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
218 #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
219 #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */
220 #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */
221 #define B_CACHE 0x00000020 /* Bread found us in the cache. */
222 #define B_VALIDSUSPWRT 0x00000040 /* Valid write during suspension. */
223 #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
224 #define B_CKHASH 0x00000100 /* checksum hash calculated on read */
225 #define B_DONE 0x00000200 /* I/O completed. */
226 #define B_EINTR 0x00000400 /* I/O was interrupted */
227 #define B_NOREUSE 0x00000800 /* Contents not reused once released. */
228 #define B_REUSE 0x00001000 /* Contents reused, second chance. */
229 #define B_INVAL 0x00002000 /* Does not contain valid info. */
230 #define B_BARRIER 0x00004000 /* Write this and all preceding first. */
231 #define B_NOCACHE 0x00008000 /* Do not cache block after use. */
232 #define B_MALLOC 0x00010000 /* malloced b_data */
233 #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
234 #define B_INVALONERR 0x00040000 /* Invalidate on write error. */
235 #define B_IOSTARTED 0x00080000 /* buf_start() called */
236 #define B_00100000 0x00100000 /* Available flag. */
237 #define B_MAXPHYS 0x00200000 /* nitems(b_pages[]) = atop(MAXPHYS). */
238 #define B_RELBUF 0x00400000 /* Release VMIO buffer. */
239 #define B_FS_FLAG1 0x00800000 /* Available flag for FS use. */
240 #define B_NOCOPY 0x01000000 /* Don't copy-on-write this buf. */
241 #define B_INFREECNT 0x02000000 /* buf is counted in numfreebufs */
242 #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */
243 #define B_MANAGED 0x08000000 /* Managed by FS. */
244 #define B_RAM 0x10000000 /* Read ahead mark (flag) */
245 #define B_VMIO 0x20000000 /* VMIO flag */
246 #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
247 #define B_REMFREE 0x80000000 /* Delayed bremfree */
248
249 #define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \
250 "\33paging\32infreecnt\31nocopy\30b23\27relbuf\26maxphys\25b20" \
251 "\24iostarted\23invalonerr\22clusterok\21malloc\20nocache\17b14" \
252 "\16inval\15reuse\14noreuse\13eintr\12done\11b8\10delwri" \
253 "\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age"
254
255 /*
256 * These flags are kept in b_xflags.
257 *
258 * BX_FSPRIV reserves a set of eight flags that may be used by individual
259 * filesystems for their own purpose. Their specific definitions are
260 * found in the header files for each filesystem that uses them.
261 */
262 #define BX_VNDIRTY 0x00000001 /* On vnode dirty list */
263 #define BX_VNCLEAN 0x00000002 /* On vnode clean list */
264 #define BX_CVTENXIO 0x00000004 /* Convert errors to ENXIO */
265 #define BX_BKGRDWRITE 0x00000010 /* Do writes in background */
266 #define BX_BKGRDMARKER 0x00000020 /* Mark buffer for splay tree */
267 #define BX_ALTDATA 0x00000040 /* Holds extended data */
268 #define BX_FSPRIV 0x00FF0000 /* Filesystem-specific flags mask */
269
270 #define PRINT_BUF_XFLAGS "\20\7altdata\6bkgrdmarker\5bkgrdwrite\3cvtenxio" \
271 "\2clean\1dirty"
272
273 #define NOOFFSET (-1LL) /* No buffer offset calculated yet */
274
275 /*
276 * These flags are kept in b_vflags.
277 */
278 #define BV_SCANNED 0x00000001 /* VOP_FSYNC funcs mark written bufs */
279 #define BV_BKGRDINPROG 0x00000002 /* Background write in progress */
280 #define BV_BKGRDWAIT 0x00000004 /* Background write waiting */
281 #define BV_BKGRDERR 0x00000008 /* Error from background write */
282
283 #define PRINT_BUF_VFLAGS "\20\4bkgrderr\3bkgrdwait\2bkgrdinprog\1scanned"
284
285 #ifdef _KERNEL
286
287 #ifndef NSWBUF_MIN
288 #define NSWBUF_MIN 16
289 #endif
290
291 /*
292 * Buffer locking
293 */
294 #include <sys/proc.h> /* XXX for curthread */
295 #include <sys/mutex.h>
296
297 /*
298 * Initialize a lock.
299 */
300 #define BUF_LOCKINIT(bp, wmesg) \
301 lockinit(&(bp)->b_lock, PVFS, wmesg, 0, LK_NEW)
302 /*
303 *
304 * Get a lock sleeping non-interruptably until it becomes available.
305 */
306 #define BUF_LOCK(bp, locktype, interlock) \
307 _lockmgr_args_rw(&(bp)->b_lock, (locktype), (interlock), \
308 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
309 LOCK_FILE, LOCK_LINE)
310
311 /*
312 * Get a lock sleeping with specified interruptably and timeout.
313 */
314 #define BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo) \
315 _lockmgr_args_rw(&(bp)->b_lock, (locktype) | LK_TIMELOCK, \
316 (interlock), (wmesg), PVFS | (catch), (timo), \
317 LOCK_FILE, LOCK_LINE)
318
319 /*
320 * Release a lock. Only the acquiring process may free the lock unless
321 * it has been handed off to biodone.
322 */
323 #define BUF_UNLOCK(bp) do { \
324 KASSERT(((bp)->b_flags & B_REMFREE) == 0, \
325 ("BUF_UNLOCK %p while B_REMFREE is still set.", (bp))); \
326 \
327 BUF_UNLOCK_RAW((bp)); \
328 } while (0)
329 #define BUF_UNLOCK_RAW(bp) do { \
330 (void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL, \
331 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
332 LOCK_FILE, LOCK_LINE); \
333 } while (0)
334
335 /*
336 * Check if a buffer lock is recursed.
337 */
338 #define BUF_LOCKRECURSED(bp) \
339 lockmgr_recursed(&(bp)->b_lock)
340
341 /*
342 * Check if a buffer lock is currently held.
343 */
344 #define BUF_ISLOCKED(bp) \
345 lockstatus(&(bp)->b_lock)
346
347 /*
348 * Check if a buffer lock is currently held by LK_KERNPROC.
349 */
350 #define BUF_DISOWNED(bp) \
351 lockmgr_disowned(&(bp)->b_lock)
352
353 /*
354 * Free a buffer lock.
355 */
356 #define BUF_LOCKFREE(bp) \
357 lockdestroy(&(bp)->b_lock)
358
359 /*
360 * Print informations on a buffer lock.
361 */
362 #define BUF_LOCKPRINTINFO(bp) \
363 lockmgr_printinfo(&(bp)->b_lock)
364
365 /*
366 * Buffer lock assertions.
367 */
368 #if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
369 #define BUF_ASSERT_LOCKED(bp) \
370 _lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
371 #define BUF_ASSERT_SLOCKED(bp) \
372 _lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
373 #define BUF_ASSERT_XLOCKED(bp) \
374 _lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
375 #define BUF_ASSERT_UNLOCKED(bp) \
376 _lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
377 #else
378 #define BUF_ASSERT_LOCKED(bp)
379 #define BUF_ASSERT_SLOCKED(bp)
380 #define BUF_ASSERT_XLOCKED(bp)
381 #define BUF_ASSERT_UNLOCKED(bp)
382 #endif
383
384 #ifdef _SYS_PROC_H_ /* Avoid #include <sys/proc.h> pollution */
385 /*
386 * When initiating asynchronous I/O, change ownership of the lock to the
387 * kernel. Once done, the lock may legally released by biodone. The
388 * original owning process can no longer acquire it recursively, but must
389 * wait until the I/O is completed and the lock has been freed by biodone.
390 */
391 #define BUF_KERNPROC(bp) \
392 _lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE)
393 #endif
394
395 #define BUF_EXTERR_FROM_CURTHR(bp) \
396 bp->b_exterr = curthread->td_kexterr
397
398 #define BUF_EXTERR_TO_CURTHR(bp) \
399 curthread->td_kexterr = bp->b_exterr
400
401 #endif /* _KERNEL */
402
403 struct buf_queue_head {
404 TAILQ_HEAD(buf_queue, buf) queue;
405 daddr_t last_pblkno;
406 struct buf *insert_point;
407 struct buf *switch_point;
408 };
409
410 /*
411 * This structure describes a clustered I/O.
412 */
413 struct cluster_save {
414 long bs_bcount; /* Saved b_bcount. */
415 long bs_bufsize; /* Saved b_bufsize. */
416 int bs_nchildren; /* Number of associated buffers. */
417 struct buf **bs_children; /* List of associated buffers. */
418 };
419
420 /*
421 * Vnode clustering tracker
422 */
423 struct vn_clusterw {
424 daddr_t v_cstart; /* v start block of cluster */
425 daddr_t v_lasta; /* v last allocation */
426 daddr_t v_lastw; /* v last write */
427 int v_clen; /* v length of cur. cluster */
428 };
429
430 #ifdef _KERNEL
431
432 static __inline int
bwrite(struct buf * bp)433 bwrite(struct buf *bp)
434 {
435
436 KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp));
437 KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp));
438 KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL,
439 ("bwrite: no bop_write bp=%p", bp));
440 return (BO_WRITE(bp->b_bufobj, bp));
441 }
442
443 static __inline void
bstrategy(struct buf * bp)444 bstrategy(struct buf *bp)
445 {
446
447 KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp));
448 KASSERT(bp->b_bufobj->bo_ops != NULL,
449 ("bstrategy: no bo_ops bp=%p", bp));
450 KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL,
451 ("bstrategy: no bop_strategy bp=%p", bp));
452 BO_STRATEGY(bp->b_bufobj, bp);
453 }
454
455 static __inline void
buf_start(struct buf * bp)456 buf_start(struct buf *bp)
457 {
458 KASSERT((bp->b_flags & B_IOSTARTED) == 0,
459 ("recursed buf_start %p", bp));
460 bp->b_flags |= B_IOSTARTED;
461 if (bioops.io_start)
462 (*bioops.io_start)(bp);
463 }
464
465 static __inline void
buf_complete(struct buf * bp)466 buf_complete(struct buf *bp)
467 {
468 if ((bp->b_flags & B_IOSTARTED) != 0) {
469 bp->b_flags &= ~B_IOSTARTED;
470 if (bioops.io_complete)
471 (*bioops.io_complete)(bp);
472 }
473 }
474
475 static __inline void
buf_deallocate(struct buf * bp)476 buf_deallocate(struct buf *bp)
477 {
478 if (bioops.io_deallocate)
479 (*bioops.io_deallocate)(bp);
480 }
481
482 static __inline int
buf_countdeps(struct buf * bp,int i)483 buf_countdeps(struct buf *bp, int i)
484 {
485 if (bioops.io_countdeps)
486 return ((*bioops.io_countdeps)(bp, i));
487 else
488 return (0);
489 }
490
491 static __inline void
buf_track(struct buf * bp __unused,const char * location __unused)492 buf_track(struct buf *bp __unused, const char *location __unused)
493 {
494
495 #if defined(FULL_BUF_TRACKING)
496 bp->b_io_tracking[BUF_TRACKING_ENTRY(bp->b_io_tcnt++)] = location;
497 #elif defined(BUF_TRACKING)
498 bp->b_io_tracking = location;
499 #endif
500 }
501
502 #endif /* _KERNEL */
503
504 /*
505 * Zero out the buffer's data area.
506 */
507 #define clrbuf(bp) { \
508 bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
509 (bp)->b_resid = 0; \
510 }
511
512 /*
513 * Flags for getblk's last parameter.
514 */
515 #define GB_LOCK_NOWAIT 0x0001 /* Fail if we block on a buf lock. */
516 #define GB_NOCREAT 0x0002 /* Don't create a buf if not found. */
517 #define GB_NOWAIT_BD 0x0004 /* Do not wait for bufdaemon. */
518 #define GB_UNMAPPED 0x0008 /* Do not mmap buffer pages. */
519 #define GB_KVAALLOC 0x0010 /* But allocate KVA. */
520 #define GB_CKHASH 0x0020 /* If reading, calc checksum hash */
521 #define GB_NOSPARSE 0x0040 /* Do not instantiate holes */
522 #define GB_CVTENXIO 0x0080 /* Convert errors to ENXIO */
523 #define GB_NOWITNESS 0x0100 /* Do not record for WITNESS */
524
525 #ifdef _KERNEL
526 extern int nbuf; /* The number of buffer headers */
527 extern u_long maxswzone; /* Max KVA for swap structures */
528 extern u_long maxbcache; /* Max KVA for buffer cache */
529 extern int maxbcachebuf; /* Max buffer cache block size */
530 extern long hibufspace;
531 extern int dirtybufthresh;
532 extern int bdwriteskip;
533 extern int dirtybufferflushes;
534 extern int altbufferflushes;
535 extern int nswbuf; /* Number of swap I/O buffer headers. */
536 extern caddr_t __read_mostly unmapped_buf; /* Data address for unmapped
537 buffers. */
538
539 static inline int
buf_mapped(struct buf * bp)540 buf_mapped(struct buf *bp)
541 {
542
543 return (bp->b_data != unmapped_buf);
544 }
545
546 long runningbufclaim(struct buf *, int);
547 void runningbufwakeup(struct buf *);
548 void waitrunningbufspace(void);
549 caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
550 void bufinit(void);
551 void bufshutdown(int);
552 void bdata2bio(struct buf *bp, struct bio *bip);
553 void bwillwrite(void);
554 int buf_dirty_count_severe(void);
555 void bremfree(struct buf *);
556 void bremfreef(struct buf *); /* XXX Force bremfree, only for nfs. */
557 #define bread(vp, blkno, size, cred, bpp) \
558 breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, 0, \
559 NULL, bpp)
560 #define bread_gb(vp, blkno, size, cred, gbflags, bpp) \
561 breadn_flags(vp, blkno, blkno, size, NULL, NULL, 0, cred, \
562 gbflags, NULL, bpp)
563 #define breadn(vp, blkno, size, rablkno, rabsize, cnt, cred, bpp) \
564 breadn_flags(vp, blkno, blkno, size, rablkno, rabsize, cnt, cred, \
565 0, NULL, bpp)
566 int breadn_flags(struct vnode *, daddr_t, daddr_t, int, daddr_t *, int *,
567 int, struct ucred *, int, void (*)(struct buf *), struct buf **);
568 void bdwrite(struct buf *);
569 void bawrite(struct buf *);
570 void babarrierwrite(struct buf *);
571 int bbarrierwrite(struct buf *);
572 void bdirty(struct buf *);
573 void bundirty(struct buf *);
574 void bufstrategy(struct bufobj *, struct buf *);
575 void brelse(struct buf *);
576 void bqrelse(struct buf *);
577 int vfs_bio_awrite(struct buf *);
578 void vfs_busy_pages_acquire(struct buf *bp);
579 void vfs_busy_pages_release(struct buf *bp);
580 struct buf *incore(struct bufobj *, daddr_t);
581 bool inmem(struct vnode *, daddr_t);
582 struct buf *gbincore(struct bufobj *, daddr_t);
583 struct buf *gbincore_unlocked(struct bufobj *, daddr_t);
584 struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
585 int getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
586 int slpflag, int slptimeo, int flags, struct buf **bpp);
587 struct buf *geteblk(int, int);
588 int bufwait(struct buf *);
589 int bufwrite(struct buf *);
590 void bufdone(struct buf *);
591 void bd_speedup(void);
592
593 extern uma_zone_t pbuf_zone;
594 uma_zone_t pbuf_zsecond_create(const char *name, int max);
595
596 struct vn_clusterw;
597
598 void cluster_init_vn(struct vn_clusterw *vnc);
599 int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
600 struct ucred *, long, int, int, struct buf **);
601 int cluster_wbuild(struct vnode *, long, daddr_t, int, int);
602 void cluster_write(struct vnode *, struct vn_clusterw *, struct buf *,
603 u_quad_t, int, int);
604 void vfs_bio_brelse(struct buf *bp, int ioflags);
605 void vfs_bio_bzero_buf(struct buf *bp, int base, int size);
606 void vfs_bio_clrbuf(struct buf *);
607 void vfs_bio_set_flags(struct buf *bp, int ioflags);
608 void vfs_bio_set_valid(struct buf *, int base, int size);
609 void vfs_busy_pages(struct buf *, int clear_modify);
610 void vfs_unbusy_pages(struct buf *);
611 int vmapbuf(struct buf *, void *, size_t, int);
612 void vunmapbuf(struct buf *);
613 void brelvp(struct buf *);
614 int bgetvp(struct vnode *, struct buf *) __result_use_check;
615 void pbgetbo(struct bufobj *bo, struct buf *bp);
616 void pbgetvp(struct vnode *, struct buf *);
617 void pbrelbo(struct buf *);
618 void pbrelvp(struct buf *);
619 int allocbuf(struct buf *bp, int size);
620 void reassignbuf(struct buf *);
621 void bwait(struct buf *, u_char, const char *);
622 void bdone(struct buf *);
623
624 typedef daddr_t (vbg_get_lblkno_t)(struct vnode *, vm_ooffset_t);
625 typedef int (vbg_get_blksize_t)(struct vnode *, daddr_t, long *);
626 int vfs_bio_getpages(struct vnode *vp, struct vm_page **ma, int count,
627 int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
628 vbg_get_blksize_t get_blksize);
629
630 #endif /* _KERNEL */
631
632 #endif /* !_SYS_BUF_H_ */
633