xref: /freebsd/sys/geom/geom_io.c (revision 41059135ce931c0f1014a999ffabc6bc470ce856)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8  * and NAI Labs, the Security Research Division of Network Associates, Inc.
9  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10  * DARPA CHATS research program.
11  *
12  * Portions of this software were developed by Konstantin Belousov
13  * under sponsorship from the FreeBSD Foundation.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. The names of the authors may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/bio.h>
48 #include <sys/ktr.h>
49 #include <sys/proc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
52 #include <sys/vmem.h>
53 
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
58 
59 #include <vm/uma.h>
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 
68 static int	g_io_transient_map_bio(struct bio *bp);
69 
70 static struct g_bioq g_bio_run_down;
71 static struct g_bioq g_bio_run_up;
72 
73 /*
74  * Pace is a hint that we've had some trouble recently allocating
75  * bios, so we should back off trying to send I/O down the stack
76  * a bit to let the problem resolve. When pacing, we also turn
77  * off direct dispatch to also reduce memory pressure from I/Os
78  * there, at the expxense of some added latency while the memory
79  * pressures exist. See g_io_schedule_down() for more details
80  * and limitations.
81  */
82 static volatile u_int pace;
83 
84 static uma_zone_t	biozone;
85 
86 /*
87  * The head of the list of classifiers used in g_io_request.
88  * Use g_register_classifier() and g_unregister_classifier()
89  * to add/remove entries to the list.
90  * Classifiers are invoked in registration order.
91  */
92 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
93     g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
94 
95 #include <machine/atomic.h>
96 
97 static void
98 g_bioq_lock(struct g_bioq *bq)
99 {
100 
101 	mtx_lock(&bq->bio_queue_lock);
102 }
103 
104 static void
105 g_bioq_unlock(struct g_bioq *bq)
106 {
107 
108 	mtx_unlock(&bq->bio_queue_lock);
109 }
110 
111 #if 0
112 static void
113 g_bioq_destroy(struct g_bioq *bq)
114 {
115 
116 	mtx_destroy(&bq->bio_queue_lock);
117 }
118 #endif
119 
120 static void
121 g_bioq_init(struct g_bioq *bq)
122 {
123 
124 	TAILQ_INIT(&bq->bio_queue);
125 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
126 }
127 
128 static struct bio *
129 g_bioq_first(struct g_bioq *bq)
130 {
131 	struct bio *bp;
132 
133 	bp = TAILQ_FIRST(&bq->bio_queue);
134 	if (bp != NULL) {
135 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
136 		    ("Bio not on queue bp=%p target %p", bp, bq));
137 		bp->bio_flags &= ~BIO_ONQUEUE;
138 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
139 		bq->bio_queue_length--;
140 	}
141 	return (bp);
142 }
143 
144 struct bio *
145 g_new_bio(void)
146 {
147 	struct bio *bp;
148 
149 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
150 #ifdef KTR
151 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
152 		struct stack st;
153 
154 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
155 		stack_save(&st);
156 		CTRSTACK(KTR_GEOM, &st, 3, 0);
157 	}
158 #endif
159 	return (bp);
160 }
161 
162 struct bio *
163 g_alloc_bio(void)
164 {
165 	struct bio *bp;
166 
167 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
168 #ifdef KTR
169 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
170 		struct stack st;
171 
172 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
173 		stack_save(&st);
174 		CTRSTACK(KTR_GEOM, &st, 3, 0);
175 	}
176 #endif
177 	return (bp);
178 }
179 
180 void
181 g_destroy_bio(struct bio *bp)
182 {
183 #ifdef KTR
184 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
185 		struct stack st;
186 
187 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
188 		stack_save(&st);
189 		CTRSTACK(KTR_GEOM, &st, 3, 0);
190 	}
191 #endif
192 	uma_zfree(biozone, bp);
193 }
194 
195 struct bio *
196 g_clone_bio(struct bio *bp)
197 {
198 	struct bio *bp2;
199 
200 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
201 	if (bp2 != NULL) {
202 		bp2->bio_parent = bp;
203 		bp2->bio_cmd = bp->bio_cmd;
204 		/*
205 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
206 		 *  ordering restrictions, so this flag needs to be cloned.
207 		 *  BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
208 		 *  indicate which way the buffer is passed.
209 		 *  Other bio flags are not suitable for cloning.
210 		 */
211 		bp2->bio_flags = bp->bio_flags &
212 		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
213 		bp2->bio_length = bp->bio_length;
214 		bp2->bio_offset = bp->bio_offset;
215 		bp2->bio_data = bp->bio_data;
216 		bp2->bio_ma = bp->bio_ma;
217 		bp2->bio_ma_n = bp->bio_ma_n;
218 		bp2->bio_ma_offset = bp->bio_ma_offset;
219 		bp2->bio_attribute = bp->bio_attribute;
220 		if (bp->bio_cmd == BIO_ZONE)
221 			bcopy(&bp->bio_zone, &bp2->bio_zone,
222 			    sizeof(bp->bio_zone));
223 		/* Inherit classification info from the parent */
224 		bp2->bio_classifier1 = bp->bio_classifier1;
225 		bp2->bio_classifier2 = bp->bio_classifier2;
226 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
227 		bp2->bio_track_bp = bp->bio_track_bp;
228 #endif
229 		bp->bio_children++;
230 	}
231 #ifdef KTR
232 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
233 		struct stack st;
234 
235 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
236 		stack_save(&st);
237 		CTRSTACK(KTR_GEOM, &st, 3, 0);
238 	}
239 #endif
240 	return(bp2);
241 }
242 
243 struct bio *
244 g_duplicate_bio(struct bio *bp)
245 {
246 	struct bio *bp2;
247 
248 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
249 	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
250 	bp2->bio_parent = bp;
251 	bp2->bio_cmd = bp->bio_cmd;
252 	bp2->bio_length = bp->bio_length;
253 	bp2->bio_offset = bp->bio_offset;
254 	bp2->bio_data = bp->bio_data;
255 	bp2->bio_ma = bp->bio_ma;
256 	bp2->bio_ma_n = bp->bio_ma_n;
257 	bp2->bio_ma_offset = bp->bio_ma_offset;
258 	bp2->bio_attribute = bp->bio_attribute;
259 	bp->bio_children++;
260 #ifdef KTR
261 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
262 		struct stack st;
263 
264 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
265 		stack_save(&st);
266 		CTRSTACK(KTR_GEOM, &st, 3, 0);
267 	}
268 #endif
269 	return(bp2);
270 }
271 
272 void
273 g_reset_bio(struct bio *bp)
274 {
275 
276 	bzero(bp, sizeof(*bp));
277 }
278 
279 void
280 g_io_init()
281 {
282 
283 	g_bioq_init(&g_bio_run_down);
284 	g_bioq_init(&g_bio_run_up);
285 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
286 	    NULL, NULL,
287 	    NULL, NULL,
288 	    0, 0);
289 }
290 
291 int
292 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
293 {
294 	struct bio *bp;
295 	int error;
296 
297 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
298 	bp = g_alloc_bio();
299 	bp->bio_cmd = BIO_GETATTR;
300 	bp->bio_done = NULL;
301 	bp->bio_attribute = attr;
302 	bp->bio_length = *len;
303 	bp->bio_data = ptr;
304 	g_io_request(bp, cp);
305 	error = biowait(bp, "ggetattr");
306 	*len = bp->bio_completed;
307 	g_destroy_bio(bp);
308 	return (error);
309 }
310 
311 int
312 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
313 {
314 	struct bio *bp;
315 	int error;
316 
317 	g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
318 	bp = g_alloc_bio();
319 	bp->bio_cmd = BIO_ZONE;
320 	bp->bio_done = NULL;
321 	/*
322 	 * XXX KDM need to handle report zone data.
323 	 */
324 	bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
325 	if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
326 		bp->bio_length =
327 		    zone_args->zone_params.report.entries_allocated *
328 		    sizeof(struct disk_zone_rep_entry);
329 	else
330 		bp->bio_length = 0;
331 
332 	g_io_request(bp, cp);
333 	error = biowait(bp, "gzone");
334 	bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
335 	g_destroy_bio(bp);
336 	return (error);
337 }
338 
339 int
340 g_io_flush(struct g_consumer *cp)
341 {
342 	struct bio *bp;
343 	int error;
344 
345 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
346 	bp = g_alloc_bio();
347 	bp->bio_cmd = BIO_FLUSH;
348 	bp->bio_flags |= BIO_ORDERED;
349 	bp->bio_done = NULL;
350 	bp->bio_attribute = NULL;
351 	bp->bio_offset = cp->provider->mediasize;
352 	bp->bio_length = 0;
353 	bp->bio_data = NULL;
354 	g_io_request(bp, cp);
355 	error = biowait(bp, "gflush");
356 	g_destroy_bio(bp);
357 	return (error);
358 }
359 
360 static int
361 g_io_check(struct bio *bp)
362 {
363 	struct g_consumer *cp;
364 	struct g_provider *pp;
365 	off_t excess;
366 	int error;
367 
368 	biotrack(bp, __func__);
369 
370 	cp = bp->bio_from;
371 	pp = bp->bio_to;
372 
373 	/* Fail if access counters dont allow the operation */
374 	switch(bp->bio_cmd) {
375 	case BIO_READ:
376 	case BIO_GETATTR:
377 		if (cp->acr == 0)
378 			return (EPERM);
379 		break;
380 	case BIO_WRITE:
381 	case BIO_DELETE:
382 	case BIO_FLUSH:
383 		if (cp->acw == 0)
384 			return (EPERM);
385 		break;
386 	case BIO_ZONE:
387 		if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
388 		    (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
389 			if (cp->acr == 0)
390 				return (EPERM);
391 		} else if (cp->acw == 0)
392 			return (EPERM);
393 		break;
394 	default:
395 		return (EPERM);
396 	}
397 	/* if provider is marked for error, don't disturb. */
398 	if (pp->error)
399 		return (pp->error);
400 	if (cp->flags & G_CF_ORPHAN)
401 		return (ENXIO);
402 
403 	switch(bp->bio_cmd) {
404 	case BIO_READ:
405 	case BIO_WRITE:
406 	case BIO_DELETE:
407 		/* Zero sectorsize or mediasize is probably a lack of media. */
408 		if (pp->sectorsize == 0 || pp->mediasize == 0)
409 			return (ENXIO);
410 		/* Reject I/O not on sector boundary */
411 		if (bp->bio_offset % pp->sectorsize)
412 			return (EINVAL);
413 		/* Reject I/O not integral sector long */
414 		if (bp->bio_length % pp->sectorsize)
415 			return (EINVAL);
416 		/* Reject requests before or past the end of media. */
417 		if (bp->bio_offset < 0)
418 			return (EIO);
419 		if (bp->bio_offset > pp->mediasize)
420 			return (EIO);
421 
422 		/* Truncate requests to the end of providers media. */
423 		excess = bp->bio_offset + bp->bio_length;
424 		if (excess > bp->bio_to->mediasize) {
425 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
426 			    round_page(bp->bio_ma_offset +
427 			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
428 			    ("excess bio %p too short", bp));
429 			excess -= bp->bio_to->mediasize;
430 			bp->bio_length -= excess;
431 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
432 				bp->bio_ma_n = round_page(bp->bio_ma_offset +
433 				    bp->bio_length) / PAGE_SIZE;
434 			}
435 			if (excess > 0)
436 				CTR3(KTR_GEOM, "g_down truncated bio "
437 				    "%p provider %s by %d", bp,
438 				    bp->bio_to->name, excess);
439 		}
440 
441 		/* Deliver zero length transfers right here. */
442 		if (bp->bio_length == 0) {
443 			CTR2(KTR_GEOM, "g_down terminated 0-length "
444 			    "bp %p provider %s", bp, bp->bio_to->name);
445 			return (0);
446 		}
447 
448 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
449 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
450 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
451 			if ((error = g_io_transient_map_bio(bp)) >= 0)
452 				return (error);
453 		}
454 		break;
455 	default:
456 		break;
457 	}
458 	return (EJUSTRETURN);
459 }
460 
461 /*
462  * bio classification support.
463  *
464  * g_register_classifier() and g_unregister_classifier()
465  * are used to add/remove a classifier from the list.
466  * The list is protected using the g_bio_run_down lock,
467  * because the classifiers are called in this path.
468  *
469  * g_io_request() passes bio's that are not already classified
470  * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
471  * Classifiers can store their result in the two fields
472  * bio_classifier1 and bio_classifier2.
473  * A classifier that updates one of the fields should
474  * return a non-zero value.
475  * If no classifier updates the field, g_run_classifiers() sets
476  * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
477  */
478 
479 int
480 g_register_classifier(struct g_classifier_hook *hook)
481 {
482 
483 	g_bioq_lock(&g_bio_run_down);
484 	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
485 	g_bioq_unlock(&g_bio_run_down);
486 
487 	return (0);
488 }
489 
490 void
491 g_unregister_classifier(struct g_classifier_hook *hook)
492 {
493 	struct g_classifier_hook *entry;
494 
495 	g_bioq_lock(&g_bio_run_down);
496 	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
497 		if (entry == hook) {
498 			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
499 			break;
500 		}
501 	}
502 	g_bioq_unlock(&g_bio_run_down);
503 }
504 
505 static void
506 g_run_classifiers(struct bio *bp)
507 {
508 	struct g_classifier_hook *hook;
509 	int classified = 0;
510 
511 	biotrack(bp, __func__);
512 
513 	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
514 		classified |= hook->func(hook->arg, bp);
515 
516 	if (!classified)
517 		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
518 }
519 
520 void
521 g_io_request(struct bio *bp, struct g_consumer *cp)
522 {
523 	struct g_provider *pp;
524 	struct mtx *mtxp;
525 	int direct, error, first;
526 	uint8_t cmd;
527 
528 	biotrack(bp, __func__);
529 
530 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
531 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
532 	pp = cp->provider;
533 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
534 #ifdef DIAGNOSTIC
535 	KASSERT(bp->bio_driver1 == NULL,
536 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
537 	KASSERT(bp->bio_driver2 == NULL,
538 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
539 	KASSERT(bp->bio_pflags == 0,
540 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
541 	/*
542 	 * Remember consumer's private fields, so we can detect if they were
543 	 * modified by the provider.
544 	 */
545 	bp->_bio_caller1 = bp->bio_caller1;
546 	bp->_bio_caller2 = bp->bio_caller2;
547 	bp->_bio_cflags = bp->bio_cflags;
548 #endif
549 
550 	cmd = bp->bio_cmd;
551 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
552 		KASSERT(bp->bio_data != NULL,
553 		    ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
554 	}
555 	if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
556 		KASSERT(bp->bio_data == NULL,
557 		    ("non-NULL bp->data in g_io_request(cmd=%hu)",
558 		    bp->bio_cmd));
559 	}
560 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
561 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
562 		    ("wrong offset %jd for sectorsize %u",
563 		    bp->bio_offset, cp->provider->sectorsize));
564 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
565 		    ("wrong length %jd for sectorsize %u",
566 		    bp->bio_length, cp->provider->sectorsize));
567 	}
568 
569 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
570 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
571 
572 	bp->bio_from = cp;
573 	bp->bio_to = pp;
574 	bp->bio_error = 0;
575 	bp->bio_completed = 0;
576 
577 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
578 	    ("Bio already on queue bp=%p", bp));
579 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
580 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
581 		binuptime(&bp->bio_t0);
582 	else
583 		getbinuptime(&bp->bio_t0);
584 
585 #ifdef GET_STACK_USAGE
586 	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
587 	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
588 	    !g_is_geom_thread(curthread) &&
589 	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
590 	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
591 	    pace == 0;
592 	if (direct) {
593 		/* Block direct execution if less then half of stack left. */
594 		size_t	st, su;
595 		GET_STACK_USAGE(st, su);
596 		if (su * 2 > st)
597 			direct = 0;
598 	}
599 #else
600 	direct = 0;
601 #endif
602 
603 	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
604 		g_bioq_lock(&g_bio_run_down);
605 		g_run_classifiers(bp);
606 		g_bioq_unlock(&g_bio_run_down);
607 	}
608 
609 	/*
610 	 * The statistics collection is lockless, as such, but we
611 	 * can not update one instance of the statistics from more
612 	 * than one thread at a time, so grab the lock first.
613 	 */
614 	mtxp = mtx_pool_find(mtxpool_sleep, pp);
615 	mtx_lock(mtxp);
616 	if (g_collectstats & G_STATS_PROVIDERS)
617 		devstat_start_transaction(pp->stat, &bp->bio_t0);
618 	if (g_collectstats & G_STATS_CONSUMERS)
619 		devstat_start_transaction(cp->stat, &bp->bio_t0);
620 	pp->nstart++;
621 	cp->nstart++;
622 	mtx_unlock(mtxp);
623 
624 	if (direct) {
625 		error = g_io_check(bp);
626 		if (error >= 0) {
627 			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
628 			    "provider %s returned %d", bp, bp->bio_to->name,
629 			    error);
630 			g_io_deliver(bp, error);
631 			return;
632 		}
633 		bp->bio_to->geom->start(bp);
634 	} else {
635 		g_bioq_lock(&g_bio_run_down);
636 		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
637 		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
638 		bp->bio_flags |= BIO_ONQUEUE;
639 		g_bio_run_down.bio_queue_length++;
640 		g_bioq_unlock(&g_bio_run_down);
641 		/* Pass it on down. */
642 		if (first)
643 			wakeup(&g_wait_down);
644 	}
645 }
646 
647 void
648 g_io_deliver(struct bio *bp, int error)
649 {
650 	struct bintime now;
651 	struct g_consumer *cp;
652 	struct g_provider *pp;
653 	struct mtx *mtxp;
654 	int direct, first;
655 
656 	biotrack(bp, __func__);
657 
658 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
659 	pp = bp->bio_to;
660 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
661 	cp = bp->bio_from;
662 	if (cp == NULL) {
663 		bp->bio_error = error;
664 		bp->bio_done(bp);
665 		return;
666 	}
667 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
668 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
669 #ifdef DIAGNOSTIC
670 	/*
671 	 * Some classes - GJournal in particular - can modify bio's
672 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
673 	 * flag means it's an expected behaviour for that particular geom.
674 	 */
675 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
676 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
677 		    ("bio_caller1 used by the provider %s", pp->name));
678 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
679 		    ("bio_caller2 used by the provider %s", pp->name));
680 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
681 		    ("bio_cflags used by the provider %s", pp->name));
682 	}
683 #endif
684 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
685 	KASSERT(bp->bio_completed <= bp->bio_length,
686 	    ("bio_completed can't be greater than bio_length"));
687 
688 	g_trace(G_T_BIO,
689 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
690 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
691 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
692 
693 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
694 	    ("Bio already on queue bp=%p", bp));
695 
696 	/*
697 	 * XXX: next two doesn't belong here
698 	 */
699 	bp->bio_bcount = bp->bio_length;
700 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
701 
702 #ifdef GET_STACK_USAGE
703 	direct = (pp->flags & G_PF_DIRECT_SEND) &&
704 		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
705 		 !g_is_geom_thread(curthread);
706 	if (direct) {
707 		/* Block direct execution if less then half of stack left. */
708 		size_t	st, su;
709 		GET_STACK_USAGE(st, su);
710 		if (su * 2 > st)
711 			direct = 0;
712 	}
713 #else
714 	direct = 0;
715 #endif
716 
717 	/*
718 	 * The statistics collection is lockless, as such, but we
719 	 * can not update one instance of the statistics from more
720 	 * than one thread at a time, so grab the lock first.
721 	 */
722 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
723 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
724 		binuptime(&now);
725 	mtxp = mtx_pool_find(mtxpool_sleep, cp);
726 	mtx_lock(mtxp);
727 	if (g_collectstats & G_STATS_PROVIDERS)
728 		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
729 	if (g_collectstats & G_STATS_CONSUMERS)
730 		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
731 	cp->nend++;
732 	pp->nend++;
733 	mtx_unlock(mtxp);
734 
735 	if (error != ENOMEM) {
736 		bp->bio_error = error;
737 		if (direct) {
738 			biodone(bp);
739 		} else {
740 			g_bioq_lock(&g_bio_run_up);
741 			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
742 			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
743 			bp->bio_flags |= BIO_ONQUEUE;
744 			g_bio_run_up.bio_queue_length++;
745 			g_bioq_unlock(&g_bio_run_up);
746 			if (first)
747 				wakeup(&g_wait_up);
748 		}
749 		return;
750 	}
751 
752 	if (bootverbose)
753 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
754 	bp->bio_children = 0;
755 	bp->bio_inbed = 0;
756 	bp->bio_driver1 = NULL;
757 	bp->bio_driver2 = NULL;
758 	bp->bio_pflags = 0;
759 	g_io_request(bp, cp);
760 	pace = 1;
761 	return;
762 }
763 
764 SYSCTL_DECL(_kern_geom);
765 
766 static long transient_maps;
767 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
768     &transient_maps, 0,
769     "Total count of the transient mapping requests");
770 u_int transient_map_retries = 10;
771 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
772     &transient_map_retries, 0,
773     "Max count of retries used before giving up on creating transient map");
774 int transient_map_hard_failures;
775 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
776     &transient_map_hard_failures, 0,
777     "Failures to establish the transient mapping due to retry attempts "
778     "exhausted");
779 int transient_map_soft_failures;
780 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
781     &transient_map_soft_failures, 0,
782     "Count of retried failures to establish the transient mapping");
783 int inflight_transient_maps;
784 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
785     &inflight_transient_maps, 0,
786     "Current count of the active transient maps");
787 
788 static int
789 g_io_transient_map_bio(struct bio *bp)
790 {
791 	vm_offset_t addr;
792 	long size;
793 	u_int retried;
794 
795 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
796 
797 	size = round_page(bp->bio_ma_offset + bp->bio_length);
798 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
799 	addr = 0;
800 	retried = 0;
801 	atomic_add_long(&transient_maps, 1);
802 retry:
803 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
804 		if (transient_map_retries != 0 &&
805 		    retried >= transient_map_retries) {
806 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
807 			    bp, bp->bio_to->name);
808 			atomic_add_int(&transient_map_hard_failures, 1);
809 			return (EDEADLK/* XXXKIB */);
810 		} else {
811 			/*
812 			 * Naive attempt to quisce the I/O to get more
813 			 * in-flight requests completed and defragment
814 			 * the transient_arena.
815 			 */
816 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
817 			    bp, bp->bio_to->name, retried);
818 			pause("g_d_tra", hz / 10);
819 			retried++;
820 			atomic_add_int(&transient_map_soft_failures, 1);
821 			goto retry;
822 		}
823 	}
824 	atomic_add_int(&inflight_transient_maps, 1);
825 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
826 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
827 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
828 	bp->bio_flags &= ~BIO_UNMAPPED;
829 	return (EJUSTRETURN);
830 }
831 
832 void
833 g_io_schedule_down(struct thread *tp __unused)
834 {
835 	struct bio *bp;
836 	int error;
837 
838 	for(;;) {
839 		g_bioq_lock(&g_bio_run_down);
840 		bp = g_bioq_first(&g_bio_run_down);
841 		if (bp == NULL) {
842 			CTR0(KTR_GEOM, "g_down going to sleep");
843 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
844 			    PRIBIO | PDROP, "-", 0);
845 			continue;
846 		}
847 		CTR0(KTR_GEOM, "g_down has work to do");
848 		g_bioq_unlock(&g_bio_run_down);
849 		biotrack(bp, __func__);
850 		if (pace != 0) {
851 			/*
852 			 * There has been at least one memory allocation
853 			 * failure since the last I/O completed. Pause 1ms to
854 			 * give the system a chance to free up memory. We only
855 			 * do this once because a large number of allocations
856 			 * can fail in the direct dispatch case and there's no
857 			 * relationship between the number of these failures and
858 			 * the length of the outage. If there's still an outage,
859 			 * we'll pause again and again until it's
860 			 * resolved. Older versions paused longer and once per
861 			 * allocation failure. This was OK for a single threaded
862 			 * g_down, but with direct dispatch would lead to max of
863 			 * 10 IOPs for minutes at a time when transient memory
864 			 * issues prevented allocation for a batch of requests
865 			 * from the upper layers.
866 			 *
867 			 * XXX This pacing is really lame. It needs to be solved
868 			 * by other methods. This is OK only because the worst
869 			 * case scenario is so rare. In the worst case scenario
870 			 * all memory is tied up waiting for I/O to complete
871 			 * which can never happen since we can't allocate bios
872 			 * for that I/O.
873 			 */
874 			CTR0(KTR_GEOM, "g_down pacing self");
875 			pause("g_down", min(hz/1000, 1));
876 			pace = 0;
877 		}
878 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
879 		    bp->bio_to->name);
880 		error = g_io_check(bp);
881 		if (error >= 0) {
882 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
883 			    "%s returned %d", bp, bp->bio_to->name, error);
884 			g_io_deliver(bp, error);
885 			continue;
886 		}
887 		THREAD_NO_SLEEPING();
888 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
889 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
890 		    bp->bio_length);
891 		bp->bio_to->geom->start(bp);
892 		THREAD_SLEEPING_OK();
893 	}
894 }
895 
896 void
897 g_io_schedule_up(struct thread *tp __unused)
898 {
899 	struct bio *bp;
900 
901 	for(;;) {
902 		g_bioq_lock(&g_bio_run_up);
903 		bp = g_bioq_first(&g_bio_run_up);
904 		if (bp == NULL) {
905 			CTR0(KTR_GEOM, "g_up going to sleep");
906 			msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
907 			    PRIBIO | PDROP, "-", 0);
908 			continue;
909 		}
910 		g_bioq_unlock(&g_bio_run_up);
911 		THREAD_NO_SLEEPING();
912 		CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
913 		    "%jd len %ld", bp, bp->bio_to->name,
914 		    bp->bio_offset, bp->bio_length);
915 		biodone(bp);
916 		THREAD_SLEEPING_OK();
917 	}
918 }
919 
920 void *
921 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
922 {
923 	struct bio *bp;
924 	void *ptr;
925 	int errorc;
926 
927 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
928 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
929 	    (intmax_t)length));
930 
931 	bp = g_alloc_bio();
932 	bp->bio_cmd = BIO_READ;
933 	bp->bio_done = NULL;
934 	bp->bio_offset = offset;
935 	bp->bio_length = length;
936 	ptr = g_malloc(length, M_WAITOK);
937 	bp->bio_data = ptr;
938 	g_io_request(bp, cp);
939 	errorc = biowait(bp, "gread");
940 	if (error != NULL)
941 		*error = errorc;
942 	g_destroy_bio(bp);
943 	if (errorc) {
944 		g_free(ptr);
945 		ptr = NULL;
946 	}
947 	return (ptr);
948 }
949 
950 int
951 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
952 {
953 	struct bio *bp;
954 	int error;
955 
956 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
957 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
958 	    (intmax_t)length));
959 
960 	bp = g_alloc_bio();
961 	bp->bio_cmd = BIO_WRITE;
962 	bp->bio_done = NULL;
963 	bp->bio_offset = offset;
964 	bp->bio_length = length;
965 	bp->bio_data = ptr;
966 	g_io_request(bp, cp);
967 	error = biowait(bp, "gwrite");
968 	g_destroy_bio(bp);
969 	return (error);
970 }
971 
972 int
973 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
974 {
975 	struct bio *bp;
976 	int error;
977 
978 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
979 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
980 
981 	bp = g_alloc_bio();
982 	bp->bio_cmd = BIO_DELETE;
983 	bp->bio_done = NULL;
984 	bp->bio_offset = offset;
985 	bp->bio_length = length;
986 	bp->bio_data = NULL;
987 	g_io_request(bp, cp);
988 	error = biowait(bp, "gdelete");
989 	g_destroy_bio(bp);
990 	return (error);
991 }
992 
993 void
994 g_print_bio(struct bio *bp)
995 {
996 	const char *pname, *cmd = NULL;
997 
998 	if (bp->bio_to != NULL)
999 		pname = bp->bio_to->name;
1000 	else
1001 		pname = "[unknown]";
1002 
1003 	switch (bp->bio_cmd) {
1004 	case BIO_GETATTR:
1005 		cmd = "GETATTR";
1006 		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
1007 		return;
1008 	case BIO_FLUSH:
1009 		cmd = "FLUSH";
1010 		printf("%s[%s]", pname, cmd);
1011 		return;
1012 	case BIO_ZONE: {
1013 		char *subcmd = NULL;
1014 		cmd = "ZONE";
1015 		switch (bp->bio_zone.zone_cmd) {
1016 		case DISK_ZONE_OPEN:
1017 			subcmd = "OPEN";
1018 			break;
1019 		case DISK_ZONE_CLOSE:
1020 			subcmd = "CLOSE";
1021 			break;
1022 		case DISK_ZONE_FINISH:
1023 			subcmd = "FINISH";
1024 			break;
1025 		case DISK_ZONE_RWP:
1026 			subcmd = "RWP";
1027 			break;
1028 		case DISK_ZONE_REPORT_ZONES:
1029 			subcmd = "REPORT ZONES";
1030 			break;
1031 		case DISK_ZONE_GET_PARAMS:
1032 			subcmd = "GET PARAMS";
1033 			break;
1034 		default:
1035 			subcmd = "UNKNOWN";
1036 			break;
1037 		}
1038 		printf("%s[%s,%s]", pname, cmd, subcmd);
1039 		return;
1040 	}
1041 	case BIO_READ:
1042 		cmd = "READ";
1043 		break;
1044 	case BIO_WRITE:
1045 		cmd = "WRITE";
1046 		break;
1047 	case BIO_DELETE:
1048 		cmd = "DELETE";
1049 		break;
1050 	default:
1051 		cmd = "UNKNOWN";
1052 		printf("%s[%s()]", pname, cmd);
1053 		return;
1054 	}
1055 	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1056 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1057 }
1058