xref: /freebsd/sys/geom/geom_io.c (revision ee0fe82ee2892f5ece189db0eab38913aaab5f0f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10  * and NAI Labs, the Security Research Division of Network Associates, Inc.
11  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12  * DARPA CHATS research program.
13  *
14  * Portions of this software were developed by Konstantin Belousov
15  * under sponsorship from the FreeBSD Foundation.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. The names of the authors may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/bio.h>
50 #include <sys/ktr.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/stack.h>
54 #include <sys/sysctl.h>
55 #include <sys/vmem.h>
56 #include <machine/stdarg.h>
57 
58 #include <sys/errno.h>
59 #include <geom/geom.h>
60 #include <geom/geom_int.h>
61 #include <sys/devicestat.h>
62 
63 #include <vm/uma.h>
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
71 
72 static int	g_io_transient_map_bio(struct bio *bp);
73 
74 static struct g_bioq g_bio_run_down;
75 static struct g_bioq g_bio_run_up;
76 
77 /*
78  * Pace is a hint that we've had some trouble recently allocating
79  * bios, so we should back off trying to send I/O down the stack
80  * a bit to let the problem resolve. When pacing, we also turn
81  * off direct dispatch to also reduce memory pressure from I/Os
82  * there, at the expxense of some added latency while the memory
83  * pressures exist. See g_io_schedule_down() for more details
84  * and limitations.
85  */
86 static volatile u_int __read_mostly pace;
87 
88 static uma_zone_t __read_mostly biozone;
89 
90 /*
91  * The head of the list of classifiers used in g_io_request.
92  * Use g_register_classifier() and g_unregister_classifier()
93  * to add/remove entries to the list.
94  * Classifiers are invoked in registration order.
95  */
96 static TAILQ_HEAD(, g_classifier_hook) g_classifier_tailq __read_mostly =
97     TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
98 
99 #include <machine/atomic.h>
100 
101 static void
102 g_bioq_lock(struct g_bioq *bq)
103 {
104 
105 	mtx_lock(&bq->bio_queue_lock);
106 }
107 
108 static void
109 g_bioq_unlock(struct g_bioq *bq)
110 {
111 
112 	mtx_unlock(&bq->bio_queue_lock);
113 }
114 
115 #if 0
116 static void
117 g_bioq_destroy(struct g_bioq *bq)
118 {
119 
120 	mtx_destroy(&bq->bio_queue_lock);
121 }
122 #endif
123 
124 static void
125 g_bioq_init(struct g_bioq *bq)
126 {
127 
128 	TAILQ_INIT(&bq->bio_queue);
129 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
130 }
131 
132 static struct bio *
133 g_bioq_first(struct g_bioq *bq)
134 {
135 	struct bio *bp;
136 
137 	bp = TAILQ_FIRST(&bq->bio_queue);
138 	if (bp != NULL) {
139 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
140 		    ("Bio not on queue bp=%p target %p", bp, bq));
141 		bp->bio_flags &= ~BIO_ONQUEUE;
142 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
143 		bq->bio_queue_length--;
144 	}
145 	return (bp);
146 }
147 
148 struct bio *
149 g_new_bio(void)
150 {
151 	struct bio *bp;
152 
153 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
154 #ifdef KTR
155 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
156 		struct stack st;
157 
158 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
159 		stack_save(&st);
160 		CTRSTACK(KTR_GEOM, &st, 3);
161 	}
162 #endif
163 	return (bp);
164 }
165 
166 struct bio *
167 g_alloc_bio(void)
168 {
169 	struct bio *bp;
170 
171 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
172 #ifdef KTR
173 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
174 		struct stack st;
175 
176 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
177 		stack_save(&st);
178 		CTRSTACK(KTR_GEOM, &st, 3);
179 	}
180 #endif
181 	return (bp);
182 }
183 
184 void
185 g_destroy_bio(struct bio *bp)
186 {
187 #ifdef KTR
188 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
189 		struct stack st;
190 
191 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
192 		stack_save(&st);
193 		CTRSTACK(KTR_GEOM, &st, 3);
194 	}
195 #endif
196 	uma_zfree(biozone, bp);
197 }
198 
199 struct bio *
200 g_clone_bio(struct bio *bp)
201 {
202 	struct bio *bp2;
203 
204 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
205 	if (bp2 != NULL) {
206 		bp2->bio_parent = bp;
207 		bp2->bio_cmd = bp->bio_cmd;
208 		/*
209 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
210 		 *  ordering restrictions, so this flag needs to be cloned.
211 		 *  BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
212 		 *  indicate which way the buffer is passed.
213 		 *  Other bio flags are not suitable for cloning.
214 		 */
215 		bp2->bio_flags = bp->bio_flags &
216 		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
217 		bp2->bio_length = bp->bio_length;
218 		bp2->bio_offset = bp->bio_offset;
219 		bp2->bio_data = bp->bio_data;
220 		bp2->bio_ma = bp->bio_ma;
221 		bp2->bio_ma_n = bp->bio_ma_n;
222 		bp2->bio_ma_offset = bp->bio_ma_offset;
223 		bp2->bio_attribute = bp->bio_attribute;
224 		if (bp->bio_cmd == BIO_ZONE)
225 			bcopy(&bp->bio_zone, &bp2->bio_zone,
226 			    sizeof(bp->bio_zone));
227 		/* Inherit classification info from the parent */
228 		bp2->bio_classifier1 = bp->bio_classifier1;
229 		bp2->bio_classifier2 = bp->bio_classifier2;
230 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
231 		bp2->bio_track_bp = bp->bio_track_bp;
232 #endif
233 		bp->bio_children++;
234 	}
235 #ifdef KTR
236 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
237 		struct stack st;
238 
239 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
240 		stack_save(&st);
241 		CTRSTACK(KTR_GEOM, &st, 3);
242 	}
243 #endif
244 	return(bp2);
245 }
246 
247 struct bio *
248 g_duplicate_bio(struct bio *bp)
249 {
250 	struct bio *bp2;
251 
252 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
253 	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
254 	bp2->bio_parent = bp;
255 	bp2->bio_cmd = bp->bio_cmd;
256 	bp2->bio_length = bp->bio_length;
257 	bp2->bio_offset = bp->bio_offset;
258 	bp2->bio_data = bp->bio_data;
259 	bp2->bio_ma = bp->bio_ma;
260 	bp2->bio_ma_n = bp->bio_ma_n;
261 	bp2->bio_ma_offset = bp->bio_ma_offset;
262 	bp2->bio_attribute = bp->bio_attribute;
263 	bp->bio_children++;
264 #ifdef KTR
265 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
266 		struct stack st;
267 
268 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
269 		stack_save(&st);
270 		CTRSTACK(KTR_GEOM, &st, 3);
271 	}
272 #endif
273 	return(bp2);
274 }
275 
276 void
277 g_reset_bio(struct bio *bp)
278 {
279 
280 	bzero(bp, sizeof(*bp));
281 }
282 
283 void
284 g_io_init()
285 {
286 
287 	g_bioq_init(&g_bio_run_down);
288 	g_bioq_init(&g_bio_run_up);
289 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
290 	    NULL, NULL,
291 	    NULL, NULL,
292 	    0, 0);
293 }
294 
295 int
296 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
297 {
298 	struct bio *bp;
299 	int error;
300 
301 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
302 	bp = g_alloc_bio();
303 	bp->bio_cmd = BIO_GETATTR;
304 	bp->bio_done = NULL;
305 	bp->bio_attribute = attr;
306 	bp->bio_length = *len;
307 	bp->bio_data = ptr;
308 	g_io_request(bp, cp);
309 	error = biowait(bp, "ggetattr");
310 	*len = bp->bio_completed;
311 	g_destroy_bio(bp);
312 	return (error);
313 }
314 
315 int
316 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
317 {
318 	struct bio *bp;
319 	int error;
320 
321 	g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
322 	bp = g_alloc_bio();
323 	bp->bio_cmd = BIO_ZONE;
324 	bp->bio_done = NULL;
325 	/*
326 	 * XXX KDM need to handle report zone data.
327 	 */
328 	bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
329 	if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
330 		bp->bio_length =
331 		    zone_args->zone_params.report.entries_allocated *
332 		    sizeof(struct disk_zone_rep_entry);
333 	else
334 		bp->bio_length = 0;
335 
336 	g_io_request(bp, cp);
337 	error = biowait(bp, "gzone");
338 	bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
339 	g_destroy_bio(bp);
340 	return (error);
341 }
342 
343 /*
344  * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
345  * the upper layers have detected a resource shortage. The lower layers are
346  * advised to stop delaying I/O that they might be holding for performance
347  * reasons and to schedule it (non-trims) or complete it successfully (trims) as
348  * quickly as it can. bio_length is the amount of the shortage.  This call
349  * should be non-blocking. bio_resid is used to communicate back if the lower
350  * layers couldn't find bio_length worth of I/O to schedule or discard. A length
351  * of 0 means to do as much as you can (schedule the h/w queues full, discard
352  * all trims). flags are a hint from the upper layers to the lower layers what
353  * operation should be done.
354  */
355 int
356 g_io_speedup(size_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
357 {
358 	struct bio *bp;
359 	int error;
360 
361 	KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
362 	    ("Invalid flags passed to g_io_speedup: %#x", flags));
363 	g_trace(G_T_BIO, "bio_speedup(%s, %zu, %#x)", cp->provider->name,
364 	    shortage, flags);
365 	bp = g_new_bio();
366 	if (bp == NULL)
367 		return (ENOMEM);
368 	bp->bio_cmd = BIO_SPEEDUP;
369 	bp->bio_length = shortage;
370 	bp->bio_done = NULL;
371 	bp->bio_flags |= flags;
372 	g_io_request(bp, cp);
373 	error = biowait(bp, "gflush");
374 	*resid = bp->bio_resid;
375 	g_destroy_bio(bp);
376 	return (error);
377 }
378 
379 int
380 g_io_flush(struct g_consumer *cp)
381 {
382 	struct bio *bp;
383 	int error;
384 
385 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
386 	bp = g_alloc_bio();
387 	bp->bio_cmd = BIO_FLUSH;
388 	bp->bio_flags |= BIO_ORDERED;
389 	bp->bio_done = NULL;
390 	bp->bio_attribute = NULL;
391 	bp->bio_offset = cp->provider->mediasize;
392 	bp->bio_length = 0;
393 	bp->bio_data = NULL;
394 	g_io_request(bp, cp);
395 	error = biowait(bp, "gflush");
396 	g_destroy_bio(bp);
397 	return (error);
398 }
399 
400 static int
401 g_io_check(struct bio *bp)
402 {
403 	struct g_consumer *cp;
404 	struct g_provider *pp;
405 	off_t excess;
406 	int error;
407 
408 	biotrack(bp, __func__);
409 
410 	cp = bp->bio_from;
411 	pp = bp->bio_to;
412 
413 	/* Fail if access counters dont allow the operation */
414 	switch(bp->bio_cmd) {
415 	case BIO_READ:
416 	case BIO_GETATTR:
417 		if (cp->acr == 0)
418 			return (EPERM);
419 		break;
420 	case BIO_WRITE:
421 	case BIO_DELETE:
422 	case BIO_FLUSH:
423 		if (cp->acw == 0)
424 			return (EPERM);
425 		break;
426 	case BIO_ZONE:
427 		if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
428 		    (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
429 			if (cp->acr == 0)
430 				return (EPERM);
431 		} else if (cp->acw == 0)
432 			return (EPERM);
433 		break;
434 	default:
435 		return (EPERM);
436 	}
437 	/* if provider is marked for error, don't disturb. */
438 	if (pp->error)
439 		return (pp->error);
440 	if (cp->flags & G_CF_ORPHAN)
441 		return (ENXIO);
442 
443 	switch(bp->bio_cmd) {
444 	case BIO_READ:
445 	case BIO_WRITE:
446 	case BIO_DELETE:
447 		/* Zero sectorsize or mediasize is probably a lack of media. */
448 		if (pp->sectorsize == 0 || pp->mediasize == 0)
449 			return (ENXIO);
450 		/* Reject I/O not on sector boundary */
451 		if (bp->bio_offset % pp->sectorsize)
452 			return (EINVAL);
453 		/* Reject I/O not integral sector long */
454 		if (bp->bio_length % pp->sectorsize)
455 			return (EINVAL);
456 		/* Reject requests before or past the end of media. */
457 		if (bp->bio_offset < 0)
458 			return (EIO);
459 		if (bp->bio_offset > pp->mediasize)
460 			return (EIO);
461 
462 		/* Truncate requests to the end of providers media. */
463 		excess = bp->bio_offset + bp->bio_length;
464 		if (excess > bp->bio_to->mediasize) {
465 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
466 			    round_page(bp->bio_ma_offset +
467 			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
468 			    ("excess bio %p too short", bp));
469 			excess -= bp->bio_to->mediasize;
470 			bp->bio_length -= excess;
471 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
472 				bp->bio_ma_n = round_page(bp->bio_ma_offset +
473 				    bp->bio_length) / PAGE_SIZE;
474 			}
475 			if (excess > 0)
476 				CTR3(KTR_GEOM, "g_down truncated bio "
477 				    "%p provider %s by %d", bp,
478 				    bp->bio_to->name, excess);
479 		}
480 
481 		/* Deliver zero length transfers right here. */
482 		if (bp->bio_length == 0) {
483 			CTR2(KTR_GEOM, "g_down terminated 0-length "
484 			    "bp %p provider %s", bp, bp->bio_to->name);
485 			return (0);
486 		}
487 
488 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
489 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
490 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
491 			if ((error = g_io_transient_map_bio(bp)) >= 0)
492 				return (error);
493 		}
494 		break;
495 	default:
496 		break;
497 	}
498 	return (EJUSTRETURN);
499 }
500 
501 /*
502  * bio classification support.
503  *
504  * g_register_classifier() and g_unregister_classifier()
505  * are used to add/remove a classifier from the list.
506  * The list is protected using the g_bio_run_down lock,
507  * because the classifiers are called in this path.
508  *
509  * g_io_request() passes bio's that are not already classified
510  * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
511  * Classifiers can store their result in the two fields
512  * bio_classifier1 and bio_classifier2.
513  * A classifier that updates one of the fields should
514  * return a non-zero value.
515  * If no classifier updates the field, g_run_classifiers() sets
516  * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
517  */
518 
519 int
520 g_register_classifier(struct g_classifier_hook *hook)
521 {
522 
523 	g_bioq_lock(&g_bio_run_down);
524 	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
525 	g_bioq_unlock(&g_bio_run_down);
526 
527 	return (0);
528 }
529 
530 void
531 g_unregister_classifier(struct g_classifier_hook *hook)
532 {
533 	struct g_classifier_hook *entry;
534 
535 	g_bioq_lock(&g_bio_run_down);
536 	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
537 		if (entry == hook) {
538 			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
539 			break;
540 		}
541 	}
542 	g_bioq_unlock(&g_bio_run_down);
543 }
544 
545 static void
546 g_run_classifiers(struct bio *bp)
547 {
548 	struct g_classifier_hook *hook;
549 	int classified = 0;
550 
551 	biotrack(bp, __func__);
552 
553 	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
554 		classified |= hook->func(hook->arg, bp);
555 
556 	if (!classified)
557 		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
558 }
559 
560 void
561 g_io_request(struct bio *bp, struct g_consumer *cp)
562 {
563 	struct g_provider *pp;
564 	struct mtx *mtxp;
565 	int direct, error, first;
566 	uint8_t cmd;
567 
568 	biotrack(bp, __func__);
569 
570 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
571 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
572 	pp = cp->provider;
573 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
574 #ifdef DIAGNOSTIC
575 	KASSERT(bp->bio_driver1 == NULL,
576 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
577 	KASSERT(bp->bio_driver2 == NULL,
578 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
579 	KASSERT(bp->bio_pflags == 0,
580 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
581 	/*
582 	 * Remember consumer's private fields, so we can detect if they were
583 	 * modified by the provider.
584 	 */
585 	bp->_bio_caller1 = bp->bio_caller1;
586 	bp->_bio_caller2 = bp->bio_caller2;
587 	bp->_bio_cflags = bp->bio_cflags;
588 #endif
589 
590 	cmd = bp->bio_cmd;
591 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
592 		KASSERT(bp->bio_data != NULL,
593 		    ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
594 	}
595 	if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
596 		KASSERT(bp->bio_data == NULL,
597 		    ("non-NULL bp->data in g_io_request(cmd=%hu)",
598 		    bp->bio_cmd));
599 	}
600 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
601 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
602 		    ("wrong offset %jd for sectorsize %u",
603 		    bp->bio_offset, cp->provider->sectorsize));
604 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
605 		    ("wrong length %jd for sectorsize %u",
606 		    bp->bio_length, cp->provider->sectorsize));
607 	}
608 
609 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
610 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
611 
612 	bp->bio_from = cp;
613 	bp->bio_to = pp;
614 	bp->bio_error = 0;
615 	bp->bio_completed = 0;
616 
617 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
618 	    ("Bio already on queue bp=%p", bp));
619 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
620 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
621 		binuptime(&bp->bio_t0);
622 	else
623 		getbinuptime(&bp->bio_t0);
624 
625 #ifdef GET_STACK_USAGE
626 	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
627 	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
628 	    !g_is_geom_thread(curthread) &&
629 	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
630 	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
631 	    pace == 0;
632 	if (direct) {
633 		/* Block direct execution if less then half of stack left. */
634 		size_t	st, su;
635 		GET_STACK_USAGE(st, su);
636 		if (su * 2 > st)
637 			direct = 0;
638 	}
639 #else
640 	direct = 0;
641 #endif
642 
643 	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
644 		g_bioq_lock(&g_bio_run_down);
645 		g_run_classifiers(bp);
646 		g_bioq_unlock(&g_bio_run_down);
647 	}
648 
649 	/*
650 	 * The statistics collection is lockless, as such, but we
651 	 * can not update one instance of the statistics from more
652 	 * than one thread at a time, so grab the lock first.
653 	 */
654 	mtxp = mtx_pool_find(mtxpool_sleep, pp);
655 	mtx_lock(mtxp);
656 	if (g_collectstats & G_STATS_PROVIDERS)
657 		devstat_start_transaction(pp->stat, &bp->bio_t0);
658 	if (g_collectstats & G_STATS_CONSUMERS)
659 		devstat_start_transaction(cp->stat, &bp->bio_t0);
660 	pp->nstart++;
661 	cp->nstart++;
662 	mtx_unlock(mtxp);
663 
664 	if (direct) {
665 		error = g_io_check(bp);
666 		if (error >= 0) {
667 			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
668 			    "provider %s returned %d", bp, bp->bio_to->name,
669 			    error);
670 			g_io_deliver(bp, error);
671 			return;
672 		}
673 		bp->bio_to->geom->start(bp);
674 	} else {
675 		g_bioq_lock(&g_bio_run_down);
676 		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
677 		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
678 		bp->bio_flags |= BIO_ONQUEUE;
679 		g_bio_run_down.bio_queue_length++;
680 		g_bioq_unlock(&g_bio_run_down);
681 		/* Pass it on down. */
682 		if (first)
683 			wakeup(&g_wait_down);
684 	}
685 }
686 
687 void
688 g_io_deliver(struct bio *bp, int error)
689 {
690 	struct bintime now;
691 	struct g_consumer *cp;
692 	struct g_provider *pp;
693 	struct mtx *mtxp;
694 	int direct, first;
695 
696 	biotrack(bp, __func__);
697 
698 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
699 	pp = bp->bio_to;
700 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
701 	cp = bp->bio_from;
702 	if (cp == NULL) {
703 		bp->bio_error = error;
704 		bp->bio_done(bp);
705 		return;
706 	}
707 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
708 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
709 #ifdef DIAGNOSTIC
710 	/*
711 	 * Some classes - GJournal in particular - can modify bio's
712 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
713 	 * flag means it's an expected behaviour for that particular geom.
714 	 */
715 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
716 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
717 		    ("bio_caller1 used by the provider %s", pp->name));
718 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
719 		    ("bio_caller2 used by the provider %s", pp->name));
720 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
721 		    ("bio_cflags used by the provider %s", pp->name));
722 	}
723 #endif
724 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
725 	KASSERT(bp->bio_completed <= bp->bio_length,
726 	    ("bio_completed can't be greater than bio_length"));
727 
728 	g_trace(G_T_BIO,
729 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
730 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
731 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
732 
733 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
734 	    ("Bio already on queue bp=%p", bp));
735 
736 	/*
737 	 * XXX: next two doesn't belong here
738 	 */
739 	bp->bio_bcount = bp->bio_length;
740 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
741 
742 #ifdef GET_STACK_USAGE
743 	direct = (pp->flags & G_PF_DIRECT_SEND) &&
744 		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
745 		 !g_is_geom_thread(curthread);
746 	if (direct) {
747 		/* Block direct execution if less then half of stack left. */
748 		size_t	st, su;
749 		GET_STACK_USAGE(st, su);
750 		if (su * 2 > st)
751 			direct = 0;
752 	}
753 #else
754 	direct = 0;
755 #endif
756 
757 	/*
758 	 * The statistics collection is lockless, as such, but we
759 	 * can not update one instance of the statistics from more
760 	 * than one thread at a time, so grab the lock first.
761 	 */
762 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
763 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
764 		binuptime(&now);
765 	mtxp = mtx_pool_find(mtxpool_sleep, cp);
766 	mtx_lock(mtxp);
767 	if (g_collectstats & G_STATS_PROVIDERS)
768 		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
769 	if (g_collectstats & G_STATS_CONSUMERS)
770 		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
771 	cp->nend++;
772 	pp->nend++;
773 	mtx_unlock(mtxp);
774 
775 	if (error != ENOMEM) {
776 		bp->bio_error = error;
777 		if (direct) {
778 			biodone(bp);
779 		} else {
780 			g_bioq_lock(&g_bio_run_up);
781 			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
782 			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
783 			bp->bio_flags |= BIO_ONQUEUE;
784 			g_bio_run_up.bio_queue_length++;
785 			g_bioq_unlock(&g_bio_run_up);
786 			if (first)
787 				wakeup(&g_wait_up);
788 		}
789 		return;
790 	}
791 
792 	if (bootverbose)
793 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
794 	bp->bio_children = 0;
795 	bp->bio_inbed = 0;
796 	bp->bio_driver1 = NULL;
797 	bp->bio_driver2 = NULL;
798 	bp->bio_pflags = 0;
799 	g_io_request(bp, cp);
800 	pace = 1;
801 	return;
802 }
803 
804 SYSCTL_DECL(_kern_geom);
805 
806 static long transient_maps;
807 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
808     &transient_maps, 0,
809     "Total count of the transient mapping requests");
810 u_int transient_map_retries = 10;
811 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
812     &transient_map_retries, 0,
813     "Max count of retries used before giving up on creating transient map");
814 int transient_map_hard_failures;
815 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
816     &transient_map_hard_failures, 0,
817     "Failures to establish the transient mapping due to retry attempts "
818     "exhausted");
819 int transient_map_soft_failures;
820 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
821     &transient_map_soft_failures, 0,
822     "Count of retried failures to establish the transient mapping");
823 int inflight_transient_maps;
824 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
825     &inflight_transient_maps, 0,
826     "Current count of the active transient maps");
827 
828 static int
829 g_io_transient_map_bio(struct bio *bp)
830 {
831 	vm_offset_t addr;
832 	long size;
833 	u_int retried;
834 
835 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
836 
837 	size = round_page(bp->bio_ma_offset + bp->bio_length);
838 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
839 	addr = 0;
840 	retried = 0;
841 	atomic_add_long(&transient_maps, 1);
842 retry:
843 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
844 		if (transient_map_retries != 0 &&
845 		    retried >= transient_map_retries) {
846 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
847 			    bp, bp->bio_to->name);
848 			atomic_add_int(&transient_map_hard_failures, 1);
849 			return (EDEADLK/* XXXKIB */);
850 		} else {
851 			/*
852 			 * Naive attempt to quisce the I/O to get more
853 			 * in-flight requests completed and defragment
854 			 * the transient_arena.
855 			 */
856 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
857 			    bp, bp->bio_to->name, retried);
858 			pause("g_d_tra", hz / 10);
859 			retried++;
860 			atomic_add_int(&transient_map_soft_failures, 1);
861 			goto retry;
862 		}
863 	}
864 	atomic_add_int(&inflight_transient_maps, 1);
865 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
866 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
867 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
868 	bp->bio_flags &= ~BIO_UNMAPPED;
869 	return (EJUSTRETURN);
870 }
871 
872 void
873 g_io_schedule_down(struct thread *tp __unused)
874 {
875 	struct bio *bp;
876 	int error;
877 
878 	for(;;) {
879 		g_bioq_lock(&g_bio_run_down);
880 		bp = g_bioq_first(&g_bio_run_down);
881 		if (bp == NULL) {
882 			CTR0(KTR_GEOM, "g_down going to sleep");
883 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
884 			    PRIBIO | PDROP, "-", 0);
885 			continue;
886 		}
887 		CTR0(KTR_GEOM, "g_down has work to do");
888 		g_bioq_unlock(&g_bio_run_down);
889 		biotrack(bp, __func__);
890 		if (pace != 0) {
891 			/*
892 			 * There has been at least one memory allocation
893 			 * failure since the last I/O completed. Pause 1ms to
894 			 * give the system a chance to free up memory. We only
895 			 * do this once because a large number of allocations
896 			 * can fail in the direct dispatch case and there's no
897 			 * relationship between the number of these failures and
898 			 * the length of the outage. If there's still an outage,
899 			 * we'll pause again and again until it's
900 			 * resolved. Older versions paused longer and once per
901 			 * allocation failure. This was OK for a single threaded
902 			 * g_down, but with direct dispatch would lead to max of
903 			 * 10 IOPs for minutes at a time when transient memory
904 			 * issues prevented allocation for a batch of requests
905 			 * from the upper layers.
906 			 *
907 			 * XXX This pacing is really lame. It needs to be solved
908 			 * by other methods. This is OK only because the worst
909 			 * case scenario is so rare. In the worst case scenario
910 			 * all memory is tied up waiting for I/O to complete
911 			 * which can never happen since we can't allocate bios
912 			 * for that I/O.
913 			 */
914 			CTR0(KTR_GEOM, "g_down pacing self");
915 			pause("g_down", min(hz/1000, 1));
916 			pace = 0;
917 		}
918 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
919 		    bp->bio_to->name);
920 		error = g_io_check(bp);
921 		if (error >= 0) {
922 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
923 			    "%s returned %d", bp, bp->bio_to->name, error);
924 			g_io_deliver(bp, error);
925 			continue;
926 		}
927 		THREAD_NO_SLEEPING();
928 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
929 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
930 		    bp->bio_length);
931 		bp->bio_to->geom->start(bp);
932 		THREAD_SLEEPING_OK();
933 	}
934 }
935 
936 void
937 g_io_schedule_up(struct thread *tp __unused)
938 {
939 	struct bio *bp;
940 
941 	for(;;) {
942 		g_bioq_lock(&g_bio_run_up);
943 		bp = g_bioq_first(&g_bio_run_up);
944 		if (bp == NULL) {
945 			CTR0(KTR_GEOM, "g_up going to sleep");
946 			msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
947 			    PRIBIO | PDROP, "-", 0);
948 			continue;
949 		}
950 		g_bioq_unlock(&g_bio_run_up);
951 		THREAD_NO_SLEEPING();
952 		CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
953 		    "%jd len %ld", bp, bp->bio_to->name,
954 		    bp->bio_offset, bp->bio_length);
955 		biodone(bp);
956 		THREAD_SLEEPING_OK();
957 	}
958 }
959 
960 void *
961 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
962 {
963 	struct bio *bp;
964 	void *ptr;
965 	int errorc;
966 
967 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
968 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
969 	    (intmax_t)length));
970 
971 	bp = g_alloc_bio();
972 	bp->bio_cmd = BIO_READ;
973 	bp->bio_done = NULL;
974 	bp->bio_offset = offset;
975 	bp->bio_length = length;
976 	ptr = g_malloc(length, M_WAITOK);
977 	bp->bio_data = ptr;
978 	g_io_request(bp, cp);
979 	errorc = biowait(bp, "gread");
980 	if (error != NULL)
981 		*error = errorc;
982 	g_destroy_bio(bp);
983 	if (errorc) {
984 		g_free(ptr);
985 		ptr = NULL;
986 	}
987 	return (ptr);
988 }
989 
990 /*
991  * A read function for use by ffs_sbget when used by GEOM-layer routines.
992  */
993 int
994 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
995 {
996 	struct g_consumer *cp;
997 
998 	KASSERT(*bufp == NULL,
999 	    ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
1000 
1001 	cp = (struct g_consumer *)devfd;
1002 	/*
1003 	 * Take care not to issue an invalid I/O request. The offset of
1004 	 * the superblock candidate must be multiples of the provider's
1005 	 * sector size, otherwise an FFS can't exist on the provider
1006 	 * anyway.
1007 	 */
1008 	if (loc % cp->provider->sectorsize != 0)
1009 		return (ENOENT);
1010 	*bufp = g_read_data(cp, loc, size, NULL);
1011 	if (*bufp == NULL)
1012 		return (ENOENT);
1013 	return (0);
1014 }
1015 
1016 int
1017 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
1018 {
1019 	struct bio *bp;
1020 	int error;
1021 
1022 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
1023 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
1024 	    (intmax_t)length));
1025 
1026 	bp = g_alloc_bio();
1027 	bp->bio_cmd = BIO_WRITE;
1028 	bp->bio_done = NULL;
1029 	bp->bio_offset = offset;
1030 	bp->bio_length = length;
1031 	bp->bio_data = ptr;
1032 	g_io_request(bp, cp);
1033 	error = biowait(bp, "gwrite");
1034 	g_destroy_bio(bp);
1035 	return (error);
1036 }
1037 
1038 /*
1039  * A write function for use by ffs_sbput when used by GEOM-layer routines.
1040  */
1041 int
1042 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
1043 {
1044 
1045 	return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
1046 }
1047 
1048 int
1049 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
1050 {
1051 	struct bio *bp;
1052 	int error;
1053 
1054 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
1055 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
1056 
1057 	bp = g_alloc_bio();
1058 	bp->bio_cmd = BIO_DELETE;
1059 	bp->bio_done = NULL;
1060 	bp->bio_offset = offset;
1061 	bp->bio_length = length;
1062 	bp->bio_data = NULL;
1063 	g_io_request(bp, cp);
1064 	error = biowait(bp, "gdelete");
1065 	g_destroy_bio(bp);
1066 	return (error);
1067 }
1068 
1069 void
1070 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
1071     ...)
1072 {
1073 #ifndef PRINTF_BUFR_SIZE
1074 #define PRINTF_BUFR_SIZE 64
1075 #endif
1076 	char bufr[PRINTF_BUFR_SIZE];
1077 	struct sbuf sb, *sbp __unused;
1078 	va_list ap;
1079 
1080 	sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
1081 	KASSERT(sbp != NULL, ("sbuf_new misused?"));
1082 
1083 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1084 
1085 	sbuf_cat(&sb, prefix);
1086 	g_format_bio(&sb, bp);
1087 
1088 	va_start(ap, fmtsuffix);
1089 	sbuf_vprintf(&sb, fmtsuffix, ap);
1090 	va_end(ap);
1091 
1092 	sbuf_nl_terminate(&sb);
1093 
1094 	sbuf_finish(&sb);
1095 	sbuf_delete(&sb);
1096 }
1097 
1098 void
1099 g_format_bio(struct sbuf *sb, const struct bio *bp)
1100 {
1101 	const char *pname, *cmd = NULL;
1102 
1103 	if (bp->bio_to != NULL)
1104 		pname = bp->bio_to->name;
1105 	else
1106 		pname = "[unknown]";
1107 
1108 	switch (bp->bio_cmd) {
1109 	case BIO_GETATTR:
1110 		cmd = "GETATTR";
1111 		sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1112 		    bp->bio_attribute);
1113 		return;
1114 	case BIO_FLUSH:
1115 		cmd = "FLUSH";
1116 		sbuf_printf(sb, "%s[%s]", pname, cmd);
1117 		return;
1118 	case BIO_ZONE: {
1119 		char *subcmd = NULL;
1120 		cmd = "ZONE";
1121 		switch (bp->bio_zone.zone_cmd) {
1122 		case DISK_ZONE_OPEN:
1123 			subcmd = "OPEN";
1124 			break;
1125 		case DISK_ZONE_CLOSE:
1126 			subcmd = "CLOSE";
1127 			break;
1128 		case DISK_ZONE_FINISH:
1129 			subcmd = "FINISH";
1130 			break;
1131 		case DISK_ZONE_RWP:
1132 			subcmd = "RWP";
1133 			break;
1134 		case DISK_ZONE_REPORT_ZONES:
1135 			subcmd = "REPORT ZONES";
1136 			break;
1137 		case DISK_ZONE_GET_PARAMS:
1138 			subcmd = "GET PARAMS";
1139 			break;
1140 		default:
1141 			subcmd = "UNKNOWN";
1142 			break;
1143 		}
1144 		sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1145 		return;
1146 	}
1147 	case BIO_READ:
1148 		cmd = "READ";
1149 		break;
1150 	case BIO_WRITE:
1151 		cmd = "WRITE";
1152 		break;
1153 	case BIO_DELETE:
1154 		cmd = "DELETE";
1155 		break;
1156 	default:
1157 		cmd = "UNKNOWN";
1158 		sbuf_printf(sb, "%s[%s()]", pname, cmd);
1159 		return;
1160 	}
1161 	sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1162 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1163 }
1164