xref: /freebsd/sys/geom/geom_io.c (revision ffc1cc95e78ec05a3e1a0aed869e33a44d9f6641)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10  * and NAI Labs, the Security Research Division of Network Associates, Inc.
11  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12  * DARPA CHATS research program.
13  *
14  * Portions of this software were developed by Konstantin Belousov
15  * under sponsorship from the FreeBSD Foundation.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. The names of the authors may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/bio.h>
50 #include <sys/ktr.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/stack.h>
54 #include <sys/sysctl.h>
55 #include <sys/vmem.h>
56 #include <machine/stdarg.h>
57 
58 #include <sys/errno.h>
59 #include <geom/geom.h>
60 #include <geom/geom_int.h>
61 #include <sys/devicestat.h>
62 
63 #include <vm/uma.h>
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
71 
72 static int	g_io_transient_map_bio(struct bio *bp);
73 
74 static struct g_bioq g_bio_run_down;
75 static struct g_bioq g_bio_run_up;
76 
77 /*
78  * Pace is a hint that we've had some trouble recently allocating
79  * bios, so we should back off trying to send I/O down the stack
80  * a bit to let the problem resolve. When pacing, we also turn
81  * off direct dispatch to also reduce memory pressure from I/Os
82  * there, at the expxense of some added latency while the memory
83  * pressures exist. See g_io_schedule_down() for more details
84  * and limitations.
85  */
86 static volatile u_int __read_mostly pace;
87 
88 static uma_zone_t __read_mostly biozone;
89 
90 #include <machine/atomic.h>
91 
92 static void
93 g_bioq_lock(struct g_bioq *bq)
94 {
95 
96 	mtx_lock(&bq->bio_queue_lock);
97 }
98 
99 static void
100 g_bioq_unlock(struct g_bioq *bq)
101 {
102 
103 	mtx_unlock(&bq->bio_queue_lock);
104 }
105 
106 #if 0
107 static void
108 g_bioq_destroy(struct g_bioq *bq)
109 {
110 
111 	mtx_destroy(&bq->bio_queue_lock);
112 }
113 #endif
114 
115 static void
116 g_bioq_init(struct g_bioq *bq)
117 {
118 
119 	TAILQ_INIT(&bq->bio_queue);
120 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
121 }
122 
123 static struct bio *
124 g_bioq_first(struct g_bioq *bq)
125 {
126 	struct bio *bp;
127 
128 	bp = TAILQ_FIRST(&bq->bio_queue);
129 	if (bp != NULL) {
130 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
131 		    ("Bio not on queue bp=%p target %p", bp, bq));
132 		bp->bio_flags &= ~BIO_ONQUEUE;
133 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
134 		bq->bio_queue_length--;
135 	}
136 	return (bp);
137 }
138 
139 struct bio *
140 g_new_bio(void)
141 {
142 	struct bio *bp;
143 
144 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
145 #ifdef KTR
146 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
147 		struct stack st;
148 
149 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
150 		stack_save(&st);
151 		CTRSTACK(KTR_GEOM, &st, 3);
152 	}
153 #endif
154 	return (bp);
155 }
156 
157 struct bio *
158 g_alloc_bio(void)
159 {
160 	struct bio *bp;
161 
162 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
163 #ifdef KTR
164 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
165 		struct stack st;
166 
167 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
168 		stack_save(&st);
169 		CTRSTACK(KTR_GEOM, &st, 3);
170 	}
171 #endif
172 	return (bp);
173 }
174 
175 void
176 g_destroy_bio(struct bio *bp)
177 {
178 #ifdef KTR
179 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
180 		struct stack st;
181 
182 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
183 		stack_save(&st);
184 		CTRSTACK(KTR_GEOM, &st, 3);
185 	}
186 #endif
187 	uma_zfree(biozone, bp);
188 }
189 
190 struct bio *
191 g_clone_bio(struct bio *bp)
192 {
193 	struct bio *bp2;
194 
195 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
196 	if (bp2 != NULL) {
197 		bp2->bio_parent = bp;
198 		bp2->bio_cmd = bp->bio_cmd;
199 		/*
200 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
201 		 *  ordering restrictions, so this flag needs to be cloned.
202 		 *  BIO_UNMAPPED, BIO_VLIST, and BIO_SWAP should be inherited,
203 		 *  to properly indicate which way the buffer is passed.
204 		 *  Other bio flags are not suitable for cloning.
205 		 */
206 		bp2->bio_flags = bp->bio_flags &
207 		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
208 		bp2->bio_length = bp->bio_length;
209 		bp2->bio_offset = bp->bio_offset;
210 		bp2->bio_data = bp->bio_data;
211 		bp2->bio_ma = bp->bio_ma;
212 		bp2->bio_ma_n = bp->bio_ma_n;
213 		bp2->bio_ma_offset = bp->bio_ma_offset;
214 		bp2->bio_attribute = bp->bio_attribute;
215 		if (bp->bio_cmd == BIO_ZONE)
216 			bcopy(&bp->bio_zone, &bp2->bio_zone,
217 			    sizeof(bp->bio_zone));
218 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
219 		bp2->bio_track_bp = bp->bio_track_bp;
220 #endif
221 		bp->bio_children++;
222 	}
223 #ifdef KTR
224 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
225 		struct stack st;
226 
227 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
228 		stack_save(&st);
229 		CTRSTACK(KTR_GEOM, &st, 3);
230 	}
231 #endif
232 	return(bp2);
233 }
234 
235 struct bio *
236 g_duplicate_bio(struct bio *bp)
237 {
238 	struct bio *bp2;
239 
240 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
241 	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
242 	bp2->bio_parent = bp;
243 	bp2->bio_cmd = bp->bio_cmd;
244 	bp2->bio_length = bp->bio_length;
245 	bp2->bio_offset = bp->bio_offset;
246 	bp2->bio_data = bp->bio_data;
247 	bp2->bio_ma = bp->bio_ma;
248 	bp2->bio_ma_n = bp->bio_ma_n;
249 	bp2->bio_ma_offset = bp->bio_ma_offset;
250 	bp2->bio_attribute = bp->bio_attribute;
251 	bp->bio_children++;
252 #ifdef KTR
253 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
254 		struct stack st;
255 
256 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
257 		stack_save(&st);
258 		CTRSTACK(KTR_GEOM, &st, 3);
259 	}
260 #endif
261 	return(bp2);
262 }
263 
264 void
265 g_reset_bio(struct bio *bp)
266 {
267 
268 	bzero(bp, sizeof(*bp));
269 }
270 
271 void
272 g_io_init()
273 {
274 
275 	g_bioq_init(&g_bio_run_down);
276 	g_bioq_init(&g_bio_run_up);
277 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
278 	    NULL, NULL,
279 	    NULL, NULL,
280 	    0, 0);
281 }
282 
283 int
284 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
285 {
286 	struct bio *bp;
287 	int error;
288 
289 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
290 	bp = g_alloc_bio();
291 	bp->bio_cmd = BIO_GETATTR;
292 	bp->bio_done = NULL;
293 	bp->bio_attribute = attr;
294 	bp->bio_length = *len;
295 	bp->bio_data = ptr;
296 	g_io_request(bp, cp);
297 	error = biowait(bp, "ggetattr");
298 	*len = bp->bio_completed;
299 	g_destroy_bio(bp);
300 	return (error);
301 }
302 
303 int
304 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
305 {
306 	struct bio *bp;
307 	int error;
308 
309 	g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
310 	bp = g_alloc_bio();
311 	bp->bio_cmd = BIO_ZONE;
312 	bp->bio_done = NULL;
313 	/*
314 	 * XXX KDM need to handle report zone data.
315 	 */
316 	bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
317 	if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
318 		bp->bio_length =
319 		    zone_args->zone_params.report.entries_allocated *
320 		    sizeof(struct disk_zone_rep_entry);
321 	else
322 		bp->bio_length = 0;
323 
324 	g_io_request(bp, cp);
325 	error = biowait(bp, "gzone");
326 	bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
327 	g_destroy_bio(bp);
328 	return (error);
329 }
330 
331 /*
332  * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
333  * the upper layers have detected a resource shortage. The lower layers are
334  * advised to stop delaying I/O that they might be holding for performance
335  * reasons and to schedule it (non-trims) or complete it successfully (trims) as
336  * quickly as it can. bio_length is the amount of the shortage.  This call
337  * should be non-blocking. bio_resid is used to communicate back if the lower
338  * layers couldn't find bio_length worth of I/O to schedule or discard. A length
339  * of 0 means to do as much as you can (schedule the h/w queues full, discard
340  * all trims). flags are a hint from the upper layers to the lower layers what
341  * operation should be done.
342  */
343 int
344 g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
345 {
346 	struct bio *bp;
347 	int error;
348 
349 	KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
350 	    ("Invalid flags passed to g_io_speedup: %#x", flags));
351 	g_trace(G_T_BIO, "bio_speedup(%s, %jd, %#x)", cp->provider->name,
352 	    (intmax_t)shortage, flags);
353 	bp = g_new_bio();
354 	if (bp == NULL)
355 		return (ENOMEM);
356 	bp->bio_cmd = BIO_SPEEDUP;
357 	bp->bio_length = shortage;
358 	bp->bio_done = NULL;
359 	bp->bio_flags |= flags;
360 	g_io_request(bp, cp);
361 	error = biowait(bp, "gflush");
362 	*resid = bp->bio_resid;
363 	g_destroy_bio(bp);
364 	return (error);
365 }
366 
367 int
368 g_io_flush(struct g_consumer *cp)
369 {
370 	struct bio *bp;
371 	int error;
372 
373 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
374 	bp = g_alloc_bio();
375 	bp->bio_cmd = BIO_FLUSH;
376 	bp->bio_flags |= BIO_ORDERED;
377 	bp->bio_done = NULL;
378 	bp->bio_attribute = NULL;
379 	bp->bio_offset = cp->provider->mediasize;
380 	bp->bio_length = 0;
381 	bp->bio_data = NULL;
382 	g_io_request(bp, cp);
383 	error = biowait(bp, "gflush");
384 	g_destroy_bio(bp);
385 	return (error);
386 }
387 
388 static int
389 g_io_check(struct bio *bp)
390 {
391 	struct g_consumer *cp;
392 	struct g_provider *pp;
393 	off_t excess;
394 	int error;
395 
396 	biotrack(bp, __func__);
397 
398 	cp = bp->bio_from;
399 	pp = bp->bio_to;
400 
401 	/* Fail if access counters dont allow the operation */
402 	switch(bp->bio_cmd) {
403 	case BIO_READ:
404 	case BIO_GETATTR:
405 		if (cp->acr == 0)
406 			return (EPERM);
407 		break;
408 	case BIO_WRITE:
409 	case BIO_DELETE:
410 	case BIO_SPEEDUP:
411 	case BIO_FLUSH:
412 		if (cp->acw == 0)
413 			return (EPERM);
414 		break;
415 	case BIO_ZONE:
416 		if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
417 		    (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
418 			if (cp->acr == 0)
419 				return (EPERM);
420 		} else if (cp->acw == 0)
421 			return (EPERM);
422 		break;
423 	default:
424 		return (EPERM);
425 	}
426 	/* if provider is marked for error, don't disturb. */
427 	if (pp->error)
428 		return (pp->error);
429 	if (cp->flags & G_CF_ORPHAN)
430 		return (ENXIO);
431 
432 	switch(bp->bio_cmd) {
433 	case BIO_READ:
434 	case BIO_WRITE:
435 	case BIO_DELETE:
436 		/* Zero sectorsize or mediasize is probably a lack of media. */
437 		if (pp->sectorsize == 0 || pp->mediasize == 0)
438 			return (ENXIO);
439 		/* Reject I/O not on sector boundary */
440 		if (bp->bio_offset % pp->sectorsize)
441 			return (EINVAL);
442 		/* Reject I/O not integral sector long */
443 		if (bp->bio_length % pp->sectorsize)
444 			return (EINVAL);
445 		/* Reject requests before or past the end of media. */
446 		if (bp->bio_offset < 0)
447 			return (EIO);
448 		if (bp->bio_offset > pp->mediasize)
449 			return (EIO);
450 
451 		/* Truncate requests to the end of providers media. */
452 		excess = bp->bio_offset + bp->bio_length;
453 		if (excess > bp->bio_to->mediasize) {
454 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
455 			    round_page(bp->bio_ma_offset +
456 			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
457 			    ("excess bio %p too short", bp));
458 			excess -= bp->bio_to->mediasize;
459 			bp->bio_length -= excess;
460 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
461 				bp->bio_ma_n = round_page(bp->bio_ma_offset +
462 				    bp->bio_length) / PAGE_SIZE;
463 			}
464 			if (excess > 0)
465 				CTR3(KTR_GEOM, "g_down truncated bio "
466 				    "%p provider %s by %d", bp,
467 				    bp->bio_to->name, excess);
468 		}
469 
470 		/* Deliver zero length transfers right here. */
471 		if (bp->bio_length == 0) {
472 			CTR2(KTR_GEOM, "g_down terminated 0-length "
473 			    "bp %p provider %s", bp, bp->bio_to->name);
474 			return (0);
475 		}
476 
477 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
478 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
479 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
480 			if ((error = g_io_transient_map_bio(bp)) >= 0)
481 				return (error);
482 		}
483 		break;
484 	default:
485 		break;
486 	}
487 	return (EJUSTRETURN);
488 }
489 
490 void
491 g_io_request(struct bio *bp, struct g_consumer *cp)
492 {
493 	struct g_provider *pp;
494 	int direct, error, first;
495 	uint8_t cmd;
496 
497 	biotrack(bp, __func__);
498 
499 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
500 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
501 	pp = cp->provider;
502 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
503 #ifdef DIAGNOSTIC
504 	KASSERT(bp->bio_driver1 == NULL,
505 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
506 	KASSERT(bp->bio_driver2 == NULL,
507 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
508 	KASSERT(bp->bio_pflags == 0,
509 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
510 	/*
511 	 * Remember consumer's private fields, so we can detect if they were
512 	 * modified by the provider.
513 	 */
514 	bp->_bio_caller1 = bp->bio_caller1;
515 	bp->_bio_caller2 = bp->bio_caller2;
516 	bp->_bio_cflags = bp->bio_cflags;
517 #endif
518 
519 	cmd = bp->bio_cmd;
520 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
521 		KASSERT(bp->bio_data != NULL,
522 		    ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
523 	}
524 	if (cmd == BIO_DELETE || cmd == BIO_FLUSH || cmd == BIO_SPEEDUP) {
525 		KASSERT(bp->bio_data == NULL,
526 		    ("non-NULL bp->data in g_io_request(cmd=%hu)",
527 		    bp->bio_cmd));
528 	}
529 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
530 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
531 		    ("wrong offset %jd for sectorsize %u",
532 		    bp->bio_offset, cp->provider->sectorsize));
533 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
534 		    ("wrong length %jd for sectorsize %u",
535 		    bp->bio_length, cp->provider->sectorsize));
536 	}
537 
538 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
539 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
540 
541 	bp->bio_from = cp;
542 	bp->bio_to = pp;
543 	bp->bio_error = 0;
544 	bp->bio_completed = 0;
545 
546 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
547 	    ("Bio already on queue bp=%p", bp));
548 
549 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
550 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
551 		binuptime(&bp->bio_t0);
552 	else
553 		getbinuptime(&bp->bio_t0);
554 	if (g_collectstats & G_STATS_CONSUMERS)
555 		devstat_start_transaction_bio_t0(cp->stat, bp);
556 	if (g_collectstats & G_STATS_PROVIDERS)
557 		devstat_start_transaction_bio_t0(pp->stat, bp);
558 #ifdef INVARIANTS
559 	atomic_add_int(&cp->nstart, 1);
560 #endif
561 
562 	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
563 	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
564 	    curthread != g_down_td &&
565 	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
566 	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
567 	    pace == 0;
568 	if (direct) {
569 		/* Block direct execution if less then half of stack left. */
570 		size_t	st, su;
571 		GET_STACK_USAGE(st, su);
572 		if (su * 2 > st)
573 			direct = 0;
574 	}
575 
576 	if (direct) {
577 		error = g_io_check(bp);
578 		if (error >= 0) {
579 			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
580 			    "provider %s returned %d", bp, bp->bio_to->name,
581 			    error);
582 			g_io_deliver(bp, error);
583 			return;
584 		}
585 		bp->bio_to->geom->start(bp);
586 	} else {
587 		g_bioq_lock(&g_bio_run_down);
588 		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
589 		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
590 		bp->bio_flags |= BIO_ONQUEUE;
591 		g_bio_run_down.bio_queue_length++;
592 		g_bioq_unlock(&g_bio_run_down);
593 		/* Pass it on down. */
594 		if (first)
595 			wakeup(&g_wait_down);
596 	}
597 }
598 
599 void
600 g_io_deliver(struct bio *bp, int error)
601 {
602 	struct bintime now;
603 	struct g_consumer *cp;
604 	struct g_provider *pp;
605 	struct mtx *mtxp;
606 	int direct, first;
607 
608 	biotrack(bp, __func__);
609 
610 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
611 	pp = bp->bio_to;
612 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
613 	cp = bp->bio_from;
614 	if (cp == NULL) {
615 		bp->bio_error = error;
616 		bp->bio_done(bp);
617 		return;
618 	}
619 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
620 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
621 #ifdef DIAGNOSTIC
622 	/*
623 	 * Some classes - GJournal in particular - can modify bio's
624 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
625 	 * flag means it's an expected behaviour for that particular geom.
626 	 */
627 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
628 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
629 		    ("bio_caller1 used by the provider %s", pp->name));
630 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
631 		    ("bio_caller2 used by the provider %s", pp->name));
632 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
633 		    ("bio_cflags used by the provider %s", pp->name));
634 	}
635 #endif
636 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
637 	KASSERT(bp->bio_completed <= bp->bio_length,
638 	    ("bio_completed can't be greater than bio_length"));
639 
640 	g_trace(G_T_BIO,
641 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
642 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
643 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
644 
645 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
646 	    ("Bio already on queue bp=%p", bp));
647 
648 	/*
649 	 * XXX: next two doesn't belong here
650 	 */
651 	bp->bio_bcount = bp->bio_length;
652 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
653 
654 	direct = (pp->flags & G_PF_DIRECT_SEND) &&
655 		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
656 		 curthread != g_up_td;
657 	if (direct) {
658 		/* Block direct execution if less then half of stack left. */
659 		size_t	st, su;
660 		GET_STACK_USAGE(st, su);
661 		if (su * 2 > st)
662 			direct = 0;
663 	}
664 
665 	/*
666 	 * The statistics collection is lockless, as such, but we
667 	 * can not update one instance of the statistics from more
668 	 * than one thread at a time, so grab the lock first.
669 	 */
670 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
671 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
672 		binuptime(&now);
673 	mtxp = mtx_pool_find(mtxpool_sleep, pp);
674 	mtx_lock(mtxp);
675 	if (g_collectstats & G_STATS_PROVIDERS)
676 		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
677 	if (g_collectstats & G_STATS_CONSUMERS)
678 		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
679 #ifdef INVARIANTS
680 	cp->nend++;
681 #endif
682 	mtx_unlock(mtxp);
683 
684 	if (error != ENOMEM) {
685 		bp->bio_error = error;
686 		if (direct) {
687 			biodone(bp);
688 		} else {
689 			g_bioq_lock(&g_bio_run_up);
690 			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
691 			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
692 			bp->bio_flags |= BIO_ONQUEUE;
693 			g_bio_run_up.bio_queue_length++;
694 			g_bioq_unlock(&g_bio_run_up);
695 			if (first)
696 				wakeup(&g_wait_up);
697 		}
698 		return;
699 	}
700 
701 	if (bootverbose)
702 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
703 	bp->bio_children = 0;
704 	bp->bio_inbed = 0;
705 	bp->bio_driver1 = NULL;
706 	bp->bio_driver2 = NULL;
707 	bp->bio_pflags = 0;
708 	g_io_request(bp, cp);
709 	pace = 1;
710 	return;
711 }
712 
713 SYSCTL_DECL(_kern_geom);
714 
715 static long transient_maps;
716 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
717     &transient_maps, 0,
718     "Total count of the transient mapping requests");
719 u_int transient_map_retries = 10;
720 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
721     &transient_map_retries, 0,
722     "Max count of retries used before giving up on creating transient map");
723 int transient_map_hard_failures;
724 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
725     &transient_map_hard_failures, 0,
726     "Failures to establish the transient mapping due to retry attempts "
727     "exhausted");
728 int transient_map_soft_failures;
729 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
730     &transient_map_soft_failures, 0,
731     "Count of retried failures to establish the transient mapping");
732 int inflight_transient_maps;
733 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
734     &inflight_transient_maps, 0,
735     "Current count of the active transient maps");
736 
737 static int
738 g_io_transient_map_bio(struct bio *bp)
739 {
740 	vm_offset_t addr;
741 	long size;
742 	u_int retried;
743 
744 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
745 
746 	size = round_page(bp->bio_ma_offset + bp->bio_length);
747 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
748 	addr = 0;
749 	retried = 0;
750 	atomic_add_long(&transient_maps, 1);
751 retry:
752 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
753 		if (transient_map_retries != 0 &&
754 		    retried >= transient_map_retries) {
755 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
756 			    bp, bp->bio_to->name);
757 			atomic_add_int(&transient_map_hard_failures, 1);
758 			return (EDEADLK/* XXXKIB */);
759 		} else {
760 			/*
761 			 * Naive attempt to quisce the I/O to get more
762 			 * in-flight requests completed and defragment
763 			 * the transient_arena.
764 			 */
765 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
766 			    bp, bp->bio_to->name, retried);
767 			pause("g_d_tra", hz / 10);
768 			retried++;
769 			atomic_add_int(&transient_map_soft_failures, 1);
770 			goto retry;
771 		}
772 	}
773 	atomic_add_int(&inflight_transient_maps, 1);
774 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
775 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
776 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
777 	bp->bio_flags &= ~BIO_UNMAPPED;
778 	return (EJUSTRETURN);
779 }
780 
781 void
782 g_io_schedule_down(struct thread *tp __unused)
783 {
784 	struct bio *bp;
785 	int error;
786 
787 	for(;;) {
788 		g_bioq_lock(&g_bio_run_down);
789 		bp = g_bioq_first(&g_bio_run_down);
790 		if (bp == NULL) {
791 			CTR0(KTR_GEOM, "g_down going to sleep");
792 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
793 			    PRIBIO | PDROP, "-", 0);
794 			continue;
795 		}
796 		CTR0(KTR_GEOM, "g_down has work to do");
797 		g_bioq_unlock(&g_bio_run_down);
798 		biotrack(bp, __func__);
799 		if (pace != 0) {
800 			/*
801 			 * There has been at least one memory allocation
802 			 * failure since the last I/O completed. Pause 1ms to
803 			 * give the system a chance to free up memory. We only
804 			 * do this once because a large number of allocations
805 			 * can fail in the direct dispatch case and there's no
806 			 * relationship between the number of these failures and
807 			 * the length of the outage. If there's still an outage,
808 			 * we'll pause again and again until it's
809 			 * resolved. Older versions paused longer and once per
810 			 * allocation failure. This was OK for a single threaded
811 			 * g_down, but with direct dispatch would lead to max of
812 			 * 10 IOPs for minutes at a time when transient memory
813 			 * issues prevented allocation for a batch of requests
814 			 * from the upper layers.
815 			 *
816 			 * XXX This pacing is really lame. It needs to be solved
817 			 * by other methods. This is OK only because the worst
818 			 * case scenario is so rare. In the worst case scenario
819 			 * all memory is tied up waiting for I/O to complete
820 			 * which can never happen since we can't allocate bios
821 			 * for that I/O.
822 			 */
823 			CTR0(KTR_GEOM, "g_down pacing self");
824 			pause("g_down", min(hz/1000, 1));
825 			pace = 0;
826 		}
827 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
828 		    bp->bio_to->name);
829 		error = g_io_check(bp);
830 		if (error >= 0) {
831 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
832 			    "%s returned %d", bp, bp->bio_to->name, error);
833 			g_io_deliver(bp, error);
834 			continue;
835 		}
836 		THREAD_NO_SLEEPING();
837 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
838 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
839 		    bp->bio_length);
840 		bp->bio_to->geom->start(bp);
841 		THREAD_SLEEPING_OK();
842 	}
843 }
844 
845 void
846 g_io_schedule_up(struct thread *tp __unused)
847 {
848 	struct bio *bp;
849 
850 	for(;;) {
851 		g_bioq_lock(&g_bio_run_up);
852 		bp = g_bioq_first(&g_bio_run_up);
853 		if (bp == NULL) {
854 			CTR0(KTR_GEOM, "g_up going to sleep");
855 			msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
856 			    PRIBIO | PDROP, "-", 0);
857 			continue;
858 		}
859 		g_bioq_unlock(&g_bio_run_up);
860 		THREAD_NO_SLEEPING();
861 		CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
862 		    "%jd len %ld", bp, bp->bio_to->name,
863 		    bp->bio_offset, bp->bio_length);
864 		biodone(bp);
865 		THREAD_SLEEPING_OK();
866 	}
867 }
868 
869 void *
870 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
871 {
872 	struct bio *bp;
873 	void *ptr;
874 	int errorc;
875 
876 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
877 	    length <= maxphys, ("g_read_data(): invalid length %jd",
878 	    (intmax_t)length));
879 
880 	bp = g_alloc_bio();
881 	bp->bio_cmd = BIO_READ;
882 	bp->bio_done = NULL;
883 	bp->bio_offset = offset;
884 	bp->bio_length = length;
885 	ptr = g_malloc(length, M_WAITOK);
886 	bp->bio_data = ptr;
887 	g_io_request(bp, cp);
888 	errorc = biowait(bp, "gread");
889 	if (errorc == 0 && bp->bio_completed != length)
890 		errorc = EIO;
891 	if (error != NULL)
892 		*error = errorc;
893 	g_destroy_bio(bp);
894 	if (errorc) {
895 		g_free(ptr);
896 		ptr = NULL;
897 	}
898 	return (ptr);
899 }
900 
901 /*
902  * A read function for use by ffs_sbget when used by GEOM-layer routines.
903  */
904 int
905 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
906 {
907 	struct g_consumer *cp;
908 
909 	KASSERT(*bufp == NULL,
910 	    ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
911 
912 	cp = (struct g_consumer *)devfd;
913 	/*
914 	 * Take care not to issue an invalid I/O request. The offset of
915 	 * the superblock candidate must be multiples of the provider's
916 	 * sector size, otherwise an FFS can't exist on the provider
917 	 * anyway.
918 	 */
919 	if (loc % cp->provider->sectorsize != 0)
920 		return (ENOENT);
921 	*bufp = g_read_data(cp, loc, size, NULL);
922 	if (*bufp == NULL)
923 		return (ENOENT);
924 	return (0);
925 }
926 
927 int
928 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
929 {
930 	struct bio *bp;
931 	int error;
932 
933 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
934 	    length <= maxphys, ("g_write_data(): invalid length %jd",
935 	    (intmax_t)length));
936 
937 	bp = g_alloc_bio();
938 	bp->bio_cmd = BIO_WRITE;
939 	bp->bio_done = NULL;
940 	bp->bio_offset = offset;
941 	bp->bio_length = length;
942 	bp->bio_data = ptr;
943 	g_io_request(bp, cp);
944 	error = biowait(bp, "gwrite");
945 	if (error == 0 && bp->bio_completed != length)
946 		error = EIO;
947 	g_destroy_bio(bp);
948 	return (error);
949 }
950 
951 /*
952  * A write function for use by ffs_sbput when used by GEOM-layer routines.
953  */
954 int
955 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
956 {
957 
958 	return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
959 }
960 
961 int
962 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
963 {
964 	struct bio *bp;
965 	int error;
966 
967 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
968 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
969 
970 	bp = g_alloc_bio();
971 	bp->bio_cmd = BIO_DELETE;
972 	bp->bio_done = NULL;
973 	bp->bio_offset = offset;
974 	bp->bio_length = length;
975 	bp->bio_data = NULL;
976 	g_io_request(bp, cp);
977 	error = biowait(bp, "gdelete");
978 	if (error == 0 && bp->bio_completed != length)
979 		error = EIO;
980 	g_destroy_bio(bp);
981 	return (error);
982 }
983 
984 void
985 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
986     ...)
987 {
988 #ifndef PRINTF_BUFR_SIZE
989 #define PRINTF_BUFR_SIZE 64
990 #endif
991 	char bufr[PRINTF_BUFR_SIZE];
992 	struct sbuf sb, *sbp __unused;
993 	va_list ap;
994 
995 	sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
996 	KASSERT(sbp != NULL, ("sbuf_new misused?"));
997 
998 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
999 
1000 	sbuf_cat(&sb, prefix);
1001 	g_format_bio(&sb, bp);
1002 
1003 	va_start(ap, fmtsuffix);
1004 	sbuf_vprintf(&sb, fmtsuffix, ap);
1005 	va_end(ap);
1006 
1007 	sbuf_nl_terminate(&sb);
1008 
1009 	sbuf_finish(&sb);
1010 	sbuf_delete(&sb);
1011 }
1012 
1013 void
1014 g_format_bio(struct sbuf *sb, const struct bio *bp)
1015 {
1016 	const char *pname, *cmd = NULL;
1017 
1018 	if (bp->bio_to != NULL)
1019 		pname = bp->bio_to->name;
1020 	else
1021 		pname = "[unknown]";
1022 
1023 	switch (bp->bio_cmd) {
1024 	case BIO_GETATTR:
1025 		cmd = "GETATTR";
1026 		sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1027 		    bp->bio_attribute);
1028 		return;
1029 	case BIO_FLUSH:
1030 		cmd = "FLUSH";
1031 		sbuf_printf(sb, "%s[%s]", pname, cmd);
1032 		return;
1033 	case BIO_ZONE: {
1034 		char *subcmd = NULL;
1035 		cmd = "ZONE";
1036 		switch (bp->bio_zone.zone_cmd) {
1037 		case DISK_ZONE_OPEN:
1038 			subcmd = "OPEN";
1039 			break;
1040 		case DISK_ZONE_CLOSE:
1041 			subcmd = "CLOSE";
1042 			break;
1043 		case DISK_ZONE_FINISH:
1044 			subcmd = "FINISH";
1045 			break;
1046 		case DISK_ZONE_RWP:
1047 			subcmd = "RWP";
1048 			break;
1049 		case DISK_ZONE_REPORT_ZONES:
1050 			subcmd = "REPORT ZONES";
1051 			break;
1052 		case DISK_ZONE_GET_PARAMS:
1053 			subcmd = "GET PARAMS";
1054 			break;
1055 		default:
1056 			subcmd = "UNKNOWN";
1057 			break;
1058 		}
1059 		sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1060 		return;
1061 	}
1062 	case BIO_READ:
1063 		cmd = "READ";
1064 		break;
1065 	case BIO_WRITE:
1066 		cmd = "WRITE";
1067 		break;
1068 	case BIO_DELETE:
1069 		cmd = "DELETE";
1070 		break;
1071 	default:
1072 		cmd = "UNKNOWN";
1073 		sbuf_printf(sb, "%s[%s()]", pname, cmd);
1074 		return;
1075 	}
1076 	sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1077 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1078 }
1079