xref: /freebsd/sys/geom/geom_io.c (revision f5e9c916afed4a948fe5c03bfaee038d165e12ab)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8  * and NAI Labs, the Security Research Division of Network Associates, Inc.
9  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10  * DARPA CHATS research program.
11  *
12  * Portions of this software were developed by Konstantin Belousov
13  * under sponsorship from the FreeBSD Foundation.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. The names of the authors may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/bio.h>
48 #include <sys/ktr.h>
49 #include <sys/proc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
52 #include <sys/vmem.h>
53 
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
58 
59 #include <vm/uma.h>
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 
68 static int	g_io_transient_map_bio(struct bio *bp);
69 
70 static struct g_bioq g_bio_run_down;
71 static struct g_bioq g_bio_run_up;
72 static struct g_bioq g_bio_run_task;
73 
74 /*
75  * Pace is a hint that we've had some trouble recently allocating
76  * bios, so we should back off trying to send I/O down the stack
77  * a bit to let the problem resolve. When pacing, we also turn
78  * off direct dispatch to also reduce memory pressure from I/Os
79  * there, at the expxense of some added latency while the memory
80  * pressures exist. See g_io_schedule_down() for more details
81  * and limitations.
82  */
83 static volatile u_int pace;
84 
85 static uma_zone_t	biozone;
86 
87 /*
88  * The head of the list of classifiers used in g_io_request.
89  * Use g_register_classifier() and g_unregister_classifier()
90  * to add/remove entries to the list.
91  * Classifiers are invoked in registration order.
92  */
93 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
94     g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
95 
96 #include <machine/atomic.h>
97 
98 static void
99 g_bioq_lock(struct g_bioq *bq)
100 {
101 
102 	mtx_lock(&bq->bio_queue_lock);
103 }
104 
105 static void
106 g_bioq_unlock(struct g_bioq *bq)
107 {
108 
109 	mtx_unlock(&bq->bio_queue_lock);
110 }
111 
112 #if 0
113 static void
114 g_bioq_destroy(struct g_bioq *bq)
115 {
116 
117 	mtx_destroy(&bq->bio_queue_lock);
118 }
119 #endif
120 
121 static void
122 g_bioq_init(struct g_bioq *bq)
123 {
124 
125 	TAILQ_INIT(&bq->bio_queue);
126 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
127 }
128 
129 static struct bio *
130 g_bioq_first(struct g_bioq *bq)
131 {
132 	struct bio *bp;
133 
134 	bp = TAILQ_FIRST(&bq->bio_queue);
135 	if (bp != NULL) {
136 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
137 		    ("Bio not on queue bp=%p target %p", bp, bq));
138 		bp->bio_flags &= ~BIO_ONQUEUE;
139 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
140 		bq->bio_queue_length--;
141 	}
142 	return (bp);
143 }
144 
145 struct bio *
146 g_new_bio(void)
147 {
148 	struct bio *bp;
149 
150 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
151 #ifdef KTR
152 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
153 		struct stack st;
154 
155 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
156 		stack_save(&st);
157 		CTRSTACK(KTR_GEOM, &st, 3, 0);
158 	}
159 #endif
160 	return (bp);
161 }
162 
163 struct bio *
164 g_alloc_bio(void)
165 {
166 	struct bio *bp;
167 
168 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
169 #ifdef KTR
170 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
171 		struct stack st;
172 
173 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
174 		stack_save(&st);
175 		CTRSTACK(KTR_GEOM, &st, 3, 0);
176 	}
177 #endif
178 	return (bp);
179 }
180 
181 void
182 g_destroy_bio(struct bio *bp)
183 {
184 #ifdef KTR
185 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
186 		struct stack st;
187 
188 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
189 		stack_save(&st);
190 		CTRSTACK(KTR_GEOM, &st, 3, 0);
191 	}
192 #endif
193 	uma_zfree(biozone, bp);
194 }
195 
196 struct bio *
197 g_clone_bio(struct bio *bp)
198 {
199 	struct bio *bp2;
200 
201 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
202 	if (bp2 != NULL) {
203 		bp2->bio_parent = bp;
204 		bp2->bio_cmd = bp->bio_cmd;
205 		/*
206 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
207 		 *  ordering restrictions, so this flag needs to be cloned.
208 		 *  BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
209 		 *  indicate which way the buffer is passed.
210 		 *  Other bio flags are not suitable for cloning.
211 		 */
212 		bp2->bio_flags = bp->bio_flags &
213 		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
214 		bp2->bio_length = bp->bio_length;
215 		bp2->bio_offset = bp->bio_offset;
216 		bp2->bio_data = bp->bio_data;
217 		bp2->bio_ma = bp->bio_ma;
218 		bp2->bio_ma_n = bp->bio_ma_n;
219 		bp2->bio_ma_offset = bp->bio_ma_offset;
220 		bp2->bio_attribute = bp->bio_attribute;
221 		/* Inherit classification info from the parent */
222 		bp2->bio_classifier1 = bp->bio_classifier1;
223 		bp2->bio_classifier2 = bp->bio_classifier2;
224 		bp->bio_children++;
225 	}
226 #ifdef KTR
227 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
228 		struct stack st;
229 
230 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
231 		stack_save(&st);
232 		CTRSTACK(KTR_GEOM, &st, 3, 0);
233 	}
234 #endif
235 	return(bp2);
236 }
237 
238 struct bio *
239 g_duplicate_bio(struct bio *bp)
240 {
241 	struct bio *bp2;
242 
243 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
244 	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
245 	bp2->bio_parent = bp;
246 	bp2->bio_cmd = bp->bio_cmd;
247 	bp2->bio_length = bp->bio_length;
248 	bp2->bio_offset = bp->bio_offset;
249 	bp2->bio_data = bp->bio_data;
250 	bp2->bio_ma = bp->bio_ma;
251 	bp2->bio_ma_n = bp->bio_ma_n;
252 	bp2->bio_ma_offset = bp->bio_ma_offset;
253 	bp2->bio_attribute = bp->bio_attribute;
254 	bp->bio_children++;
255 #ifdef KTR
256 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
257 		struct stack st;
258 
259 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
260 		stack_save(&st);
261 		CTRSTACK(KTR_GEOM, &st, 3, 0);
262 	}
263 #endif
264 	return(bp2);
265 }
266 
267 void
268 g_reset_bio(struct bio *bp)
269 {
270 
271 	bzero(bp, sizeof(*bp));
272 }
273 
274 void
275 g_io_init()
276 {
277 
278 	g_bioq_init(&g_bio_run_down);
279 	g_bioq_init(&g_bio_run_up);
280 	g_bioq_init(&g_bio_run_task);
281 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
282 	    NULL, NULL,
283 	    NULL, NULL,
284 	    0, 0);
285 }
286 
287 int
288 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
289 {
290 	struct bio *bp;
291 	int error;
292 
293 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
294 	bp = g_alloc_bio();
295 	bp->bio_cmd = BIO_GETATTR;
296 	bp->bio_done = NULL;
297 	bp->bio_attribute = attr;
298 	bp->bio_length = *len;
299 	bp->bio_data = ptr;
300 	g_io_request(bp, cp);
301 	error = biowait(bp, "ggetattr");
302 	*len = bp->bio_completed;
303 	g_destroy_bio(bp);
304 	return (error);
305 }
306 
307 int
308 g_io_flush(struct g_consumer *cp)
309 {
310 	struct bio *bp;
311 	int error;
312 
313 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
314 	bp = g_alloc_bio();
315 	bp->bio_cmd = BIO_FLUSH;
316 	bp->bio_flags |= BIO_ORDERED;
317 	bp->bio_done = NULL;
318 	bp->bio_attribute = NULL;
319 	bp->bio_offset = cp->provider->mediasize;
320 	bp->bio_length = 0;
321 	bp->bio_data = NULL;
322 	g_io_request(bp, cp);
323 	error = biowait(bp, "gflush");
324 	g_destroy_bio(bp);
325 	return (error);
326 }
327 
328 static int
329 g_io_check(struct bio *bp)
330 {
331 	struct g_consumer *cp;
332 	struct g_provider *pp;
333 	off_t excess;
334 	int error;
335 
336 	cp = bp->bio_from;
337 	pp = bp->bio_to;
338 
339 	/* Fail if access counters dont allow the operation */
340 	switch(bp->bio_cmd) {
341 	case BIO_READ:
342 	case BIO_GETATTR:
343 		if (cp->acr == 0)
344 			return (EPERM);
345 		break;
346 	case BIO_WRITE:
347 	case BIO_DELETE:
348 	case BIO_FLUSH:
349 		if (cp->acw == 0)
350 			return (EPERM);
351 		break;
352 	default:
353 		return (EPERM);
354 	}
355 	/* if provider is marked for error, don't disturb. */
356 	if (pp->error)
357 		return (pp->error);
358 	if (cp->flags & G_CF_ORPHAN)
359 		return (ENXIO);
360 
361 	switch(bp->bio_cmd) {
362 	case BIO_READ:
363 	case BIO_WRITE:
364 	case BIO_DELETE:
365 		/* Zero sectorsize or mediasize is probably a lack of media. */
366 		if (pp->sectorsize == 0 || pp->mediasize == 0)
367 			return (ENXIO);
368 		/* Reject I/O not on sector boundary */
369 		if (bp->bio_offset % pp->sectorsize)
370 			return (EINVAL);
371 		/* Reject I/O not integral sector long */
372 		if (bp->bio_length % pp->sectorsize)
373 			return (EINVAL);
374 		/* Reject requests before or past the end of media. */
375 		if (bp->bio_offset < 0)
376 			return (EIO);
377 		if (bp->bio_offset > pp->mediasize)
378 			return (EIO);
379 
380 		/* Truncate requests to the end of providers media. */
381 		excess = bp->bio_offset + bp->bio_length;
382 		if (excess > bp->bio_to->mediasize) {
383 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
384 			    round_page(bp->bio_ma_offset +
385 			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
386 			    ("excess bio %p too short", bp));
387 			excess -= bp->bio_to->mediasize;
388 			bp->bio_length -= excess;
389 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
390 				bp->bio_ma_n = round_page(bp->bio_ma_offset +
391 				    bp->bio_length) / PAGE_SIZE;
392 			}
393 			if (excess > 0)
394 				CTR3(KTR_GEOM, "g_down truncated bio "
395 				    "%p provider %s by %d", bp,
396 				    bp->bio_to->name, excess);
397 		}
398 
399 		/* Deliver zero length transfers right here. */
400 		if (bp->bio_length == 0) {
401 			CTR2(KTR_GEOM, "g_down terminated 0-length "
402 			    "bp %p provider %s", bp, bp->bio_to->name);
403 			return (0);
404 		}
405 
406 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
407 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
408 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
409 			if ((error = g_io_transient_map_bio(bp)) >= 0)
410 				return (error);
411 		}
412 		break;
413 	default:
414 		break;
415 	}
416 	return (EJUSTRETURN);
417 }
418 
419 /*
420  * bio classification support.
421  *
422  * g_register_classifier() and g_unregister_classifier()
423  * are used to add/remove a classifier from the list.
424  * The list is protected using the g_bio_run_down lock,
425  * because the classifiers are called in this path.
426  *
427  * g_io_request() passes bio's that are not already classified
428  * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
429  * Classifiers can store their result in the two fields
430  * bio_classifier1 and bio_classifier2.
431  * A classifier that updates one of the fields should
432  * return a non-zero value.
433  * If no classifier updates the field, g_run_classifiers() sets
434  * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
435  */
436 
437 int
438 g_register_classifier(struct g_classifier_hook *hook)
439 {
440 
441 	g_bioq_lock(&g_bio_run_down);
442 	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
443 	g_bioq_unlock(&g_bio_run_down);
444 
445 	return (0);
446 }
447 
448 void
449 g_unregister_classifier(struct g_classifier_hook *hook)
450 {
451 	struct g_classifier_hook *entry;
452 
453 	g_bioq_lock(&g_bio_run_down);
454 	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
455 		if (entry == hook) {
456 			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
457 			break;
458 		}
459 	}
460 	g_bioq_unlock(&g_bio_run_down);
461 }
462 
463 static void
464 g_run_classifiers(struct bio *bp)
465 {
466 	struct g_classifier_hook *hook;
467 	int classified = 0;
468 
469 	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
470 		classified |= hook->func(hook->arg, bp);
471 
472 	if (!classified)
473 		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
474 }
475 
476 void
477 g_io_request(struct bio *bp, struct g_consumer *cp)
478 {
479 	struct g_provider *pp;
480 	struct mtx *mtxp;
481 	int direct, error, first;
482 
483 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
484 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
485 	pp = cp->provider;
486 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
487 #ifdef DIAGNOSTIC
488 	KASSERT(bp->bio_driver1 == NULL,
489 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
490 	KASSERT(bp->bio_driver2 == NULL,
491 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
492 	KASSERT(bp->bio_pflags == 0,
493 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
494 	/*
495 	 * Remember consumer's private fields, so we can detect if they were
496 	 * modified by the provider.
497 	 */
498 	bp->_bio_caller1 = bp->bio_caller1;
499 	bp->_bio_caller2 = bp->bio_caller2;
500 	bp->_bio_cflags = bp->bio_cflags;
501 #endif
502 
503 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
504 		KASSERT(bp->bio_data != NULL,
505 		    ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
506 	}
507 	if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
508 		KASSERT(bp->bio_data == NULL,
509 		    ("non-NULL bp->data in g_io_request(cmd=%hhu)",
510 		    bp->bio_cmd));
511 	}
512 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
513 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
514 		    ("wrong offset %jd for sectorsize %u",
515 		    bp->bio_offset, cp->provider->sectorsize));
516 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
517 		    ("wrong length %jd for sectorsize %u",
518 		    bp->bio_length, cp->provider->sectorsize));
519 	}
520 
521 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
522 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
523 
524 	bp->bio_from = cp;
525 	bp->bio_to = pp;
526 	bp->bio_error = 0;
527 	bp->bio_completed = 0;
528 
529 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
530 	    ("Bio already on queue bp=%p", bp));
531 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
532 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
533 		binuptime(&bp->bio_t0);
534 	else
535 		getbinuptime(&bp->bio_t0);
536 
537 #ifdef GET_STACK_USAGE
538 	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
539 	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
540 	    !g_is_geom_thread(curthread) &&
541 	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
542 	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
543 	    pace == 0;
544 	if (direct) {
545 		/* Block direct execution if less then half of stack left. */
546 		size_t	st, su;
547 		GET_STACK_USAGE(st, su);
548 		if (su * 2 > st)
549 			direct = 0;
550 	}
551 #else
552 	direct = 0;
553 #endif
554 
555 	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
556 		g_bioq_lock(&g_bio_run_down);
557 		g_run_classifiers(bp);
558 		g_bioq_unlock(&g_bio_run_down);
559 	}
560 
561 	/*
562 	 * The statistics collection is lockless, as such, but we
563 	 * can not update one instance of the statistics from more
564 	 * than one thread at a time, so grab the lock first.
565 	 */
566 	mtxp = mtx_pool_find(mtxpool_sleep, pp);
567 	mtx_lock(mtxp);
568 	if (g_collectstats & G_STATS_PROVIDERS)
569 		devstat_start_transaction(pp->stat, &bp->bio_t0);
570 	if (g_collectstats & G_STATS_CONSUMERS)
571 		devstat_start_transaction(cp->stat, &bp->bio_t0);
572 	pp->nstart++;
573 	cp->nstart++;
574 	mtx_unlock(mtxp);
575 
576 	if (direct) {
577 		error = g_io_check(bp);
578 		if (error >= 0) {
579 			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
580 			    "provider %s returned %d", bp, bp->bio_to->name,
581 			    error);
582 			g_io_deliver(bp, error);
583 			return;
584 		}
585 		bp->bio_to->geom->start(bp);
586 	} else {
587 		g_bioq_lock(&g_bio_run_down);
588 		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
589 		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
590 		bp->bio_flags |= BIO_ONQUEUE;
591 		g_bio_run_down.bio_queue_length++;
592 		g_bioq_unlock(&g_bio_run_down);
593 		/* Pass it on down. */
594 		if (first)
595 			wakeup(&g_wait_down);
596 	}
597 }
598 
599 void
600 g_io_deliver(struct bio *bp, int error)
601 {
602 	struct bintime now;
603 	struct g_consumer *cp;
604 	struct g_provider *pp;
605 	struct mtx *mtxp;
606 	int direct, first;
607 
608 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
609 	pp = bp->bio_to;
610 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
611 	cp = bp->bio_from;
612 	if (cp == NULL) {
613 		bp->bio_error = error;
614 		bp->bio_done(bp);
615 		return;
616 	}
617 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
618 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
619 #ifdef DIAGNOSTIC
620 	/*
621 	 * Some classes - GJournal in particular - can modify bio's
622 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
623 	 * flag means it's an expected behaviour for that particular geom.
624 	 */
625 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
626 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
627 		    ("bio_caller1 used by the provider %s", pp->name));
628 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
629 		    ("bio_caller2 used by the provider %s", pp->name));
630 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
631 		    ("bio_cflags used by the provider %s", pp->name));
632 	}
633 #endif
634 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
635 	KASSERT(bp->bio_completed <= bp->bio_length,
636 	    ("bio_completed can't be greater than bio_length"));
637 
638 	g_trace(G_T_BIO,
639 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
640 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
641 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
642 
643 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
644 	    ("Bio already on queue bp=%p", bp));
645 
646 	/*
647 	 * XXX: next two doesn't belong here
648 	 */
649 	bp->bio_bcount = bp->bio_length;
650 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
651 
652 #ifdef GET_STACK_USAGE
653 	direct = (pp->flags & G_PF_DIRECT_SEND) &&
654 		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
655 		 !g_is_geom_thread(curthread);
656 	if (direct) {
657 		/* Block direct execution if less then half of stack left. */
658 		size_t	st, su;
659 		GET_STACK_USAGE(st, su);
660 		if (su * 2 > st)
661 			direct = 0;
662 	}
663 #else
664 	direct = 0;
665 #endif
666 
667 	/*
668 	 * The statistics collection is lockless, as such, but we
669 	 * can not update one instance of the statistics from more
670 	 * than one thread at a time, so grab the lock first.
671 	 */
672 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
673 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
674 		binuptime(&now);
675 	mtxp = mtx_pool_find(mtxpool_sleep, cp);
676 	mtx_lock(mtxp);
677 	if (g_collectstats & G_STATS_PROVIDERS)
678 		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
679 	if (g_collectstats & G_STATS_CONSUMERS)
680 		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
681 	cp->nend++;
682 	pp->nend++;
683 	mtx_unlock(mtxp);
684 
685 	if (error != ENOMEM) {
686 		bp->bio_error = error;
687 		if (direct) {
688 			biodone(bp);
689 		} else {
690 			g_bioq_lock(&g_bio_run_up);
691 			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
692 			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
693 			bp->bio_flags |= BIO_ONQUEUE;
694 			g_bio_run_up.bio_queue_length++;
695 			g_bioq_unlock(&g_bio_run_up);
696 			if (first)
697 				wakeup(&g_wait_up);
698 		}
699 		return;
700 	}
701 
702 	if (bootverbose)
703 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
704 	bp->bio_children = 0;
705 	bp->bio_inbed = 0;
706 	bp->bio_driver1 = NULL;
707 	bp->bio_driver2 = NULL;
708 	bp->bio_pflags = 0;
709 	g_io_request(bp, cp);
710 	pace = 1;
711 	return;
712 }
713 
714 SYSCTL_DECL(_kern_geom);
715 
716 static long transient_maps;
717 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
718     &transient_maps, 0,
719     "Total count of the transient mapping requests");
720 u_int transient_map_retries = 10;
721 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
722     &transient_map_retries, 0,
723     "Max count of retries used before giving up on creating transient map");
724 int transient_map_hard_failures;
725 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
726     &transient_map_hard_failures, 0,
727     "Failures to establish the transient mapping due to retry attempts "
728     "exhausted");
729 int transient_map_soft_failures;
730 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
731     &transient_map_soft_failures, 0,
732     "Count of retried failures to establish the transient mapping");
733 int inflight_transient_maps;
734 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
735     &inflight_transient_maps, 0,
736     "Current count of the active transient maps");
737 
738 static int
739 g_io_transient_map_bio(struct bio *bp)
740 {
741 	vm_offset_t addr;
742 	long size;
743 	u_int retried;
744 
745 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
746 
747 	size = round_page(bp->bio_ma_offset + bp->bio_length);
748 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
749 	addr = 0;
750 	retried = 0;
751 	atomic_add_long(&transient_maps, 1);
752 retry:
753 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
754 		if (transient_map_retries != 0 &&
755 		    retried >= transient_map_retries) {
756 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
757 			    bp, bp->bio_to->name);
758 			atomic_add_int(&transient_map_hard_failures, 1);
759 			return (EDEADLK/* XXXKIB */);
760 		} else {
761 			/*
762 			 * Naive attempt to quisce the I/O to get more
763 			 * in-flight requests completed and defragment
764 			 * the transient_arena.
765 			 */
766 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
767 			    bp, bp->bio_to->name, retried);
768 			pause("g_d_tra", hz / 10);
769 			retried++;
770 			atomic_add_int(&transient_map_soft_failures, 1);
771 			goto retry;
772 		}
773 	}
774 	atomic_add_int(&inflight_transient_maps, 1);
775 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
776 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
777 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
778 	bp->bio_flags &= ~BIO_UNMAPPED;
779 	return (EJUSTRETURN);
780 }
781 
782 void
783 g_io_schedule_down(struct thread *tp __unused)
784 {
785 	struct bio *bp;
786 	int error;
787 
788 	for(;;) {
789 		g_bioq_lock(&g_bio_run_down);
790 		bp = g_bioq_first(&g_bio_run_down);
791 		if (bp == NULL) {
792 			CTR0(KTR_GEOM, "g_down going to sleep");
793 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
794 			    PRIBIO | PDROP, "-", 0);
795 			continue;
796 		}
797 		CTR0(KTR_GEOM, "g_down has work to do");
798 		g_bioq_unlock(&g_bio_run_down);
799 		if (pace != 0) {
800 			/*
801 			 * There has been at least one memory allocation
802 			 * failure since the last I/O completed. Pause 1ms to
803 			 * give the system a chance to free up memory. We only
804 			 * do this once because a large number of allocations
805 			 * can fail in the direct dispatch case and there's no
806 			 * relationship between the number of these failures and
807 			 * the length of the outage. If there's still an outage,
808 			 * we'll pause again and again until it's
809 			 * resolved. Older versions paused longer and once per
810 			 * allocation failure. This was OK for a single threaded
811 			 * g_down, but with direct dispatch would lead to max of
812 			 * 10 IOPs for minutes at a time when transient memory
813 			 * issues prevented allocation for a batch of requests
814 			 * from the upper layers.
815 			 *
816 			 * XXX This pacing is really lame. It needs to be solved
817 			 * by other methods. This is OK only because the worst
818 			 * case scenario is so rare. In the worst case scenario
819 			 * all memory is tied up waiting for I/O to complete
820 			 * which can never happen since we can't allocate bios
821 			 * for that I/O.
822 			 */
823 			CTR0(KTR_GEOM, "g_down pacing self");
824 			pause("g_down", min(hz/1000, 1));
825 			pace = 0;
826 		}
827 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
828 		    bp->bio_to->name);
829 		error = g_io_check(bp);
830 		if (error >= 0) {
831 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
832 			    "%s returned %d", bp, bp->bio_to->name, error);
833 			g_io_deliver(bp, error);
834 			continue;
835 		}
836 		THREAD_NO_SLEEPING();
837 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
838 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
839 		    bp->bio_length);
840 		bp->bio_to->geom->start(bp);
841 		THREAD_SLEEPING_OK();
842 	}
843 }
844 
845 void
846 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
847 {
848 	bp->bio_task = func;
849 	bp->bio_task_arg = arg;
850 	/*
851 	 * The taskqueue is actually just a second queue off the "up"
852 	 * queue, so we use the same lock.
853 	 */
854 	g_bioq_lock(&g_bio_run_up);
855 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
856 	    ("Bio already on queue bp=%p target taskq", bp));
857 	bp->bio_flags |= BIO_ONQUEUE;
858 	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
859 	g_bio_run_task.bio_queue_length++;
860 	wakeup(&g_wait_up);
861 	g_bioq_unlock(&g_bio_run_up);
862 }
863 
864 
865 void
866 g_io_schedule_up(struct thread *tp __unused)
867 {
868 	struct bio *bp;
869 	for(;;) {
870 		g_bioq_lock(&g_bio_run_up);
871 		bp = g_bioq_first(&g_bio_run_task);
872 		if (bp != NULL) {
873 			g_bioq_unlock(&g_bio_run_up);
874 			THREAD_NO_SLEEPING();
875 			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
876 			bp->bio_task(bp->bio_task_arg);
877 			THREAD_SLEEPING_OK();
878 			continue;
879 		}
880 		bp = g_bioq_first(&g_bio_run_up);
881 		if (bp != NULL) {
882 			g_bioq_unlock(&g_bio_run_up);
883 			THREAD_NO_SLEEPING();
884 			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
885 			    "%jd len %ld", bp, bp->bio_to->name,
886 			    bp->bio_offset, bp->bio_length);
887 			biodone(bp);
888 			THREAD_SLEEPING_OK();
889 			continue;
890 		}
891 		CTR0(KTR_GEOM, "g_up going to sleep");
892 		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
893 		    PRIBIO | PDROP, "-", 0);
894 	}
895 }
896 
897 void *
898 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
899 {
900 	struct bio *bp;
901 	void *ptr;
902 	int errorc;
903 
904 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
905 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
906 	    (intmax_t)length));
907 
908 	bp = g_alloc_bio();
909 	bp->bio_cmd = BIO_READ;
910 	bp->bio_done = NULL;
911 	bp->bio_offset = offset;
912 	bp->bio_length = length;
913 	ptr = g_malloc(length, M_WAITOK);
914 	bp->bio_data = ptr;
915 	g_io_request(bp, cp);
916 	errorc = biowait(bp, "gread");
917 	if (error != NULL)
918 		*error = errorc;
919 	g_destroy_bio(bp);
920 	if (errorc) {
921 		g_free(ptr);
922 		ptr = NULL;
923 	}
924 	return (ptr);
925 }
926 
927 int
928 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
929 {
930 	struct bio *bp;
931 	int error;
932 
933 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
934 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
935 	    (intmax_t)length));
936 
937 	bp = g_alloc_bio();
938 	bp->bio_cmd = BIO_WRITE;
939 	bp->bio_done = NULL;
940 	bp->bio_offset = offset;
941 	bp->bio_length = length;
942 	bp->bio_data = ptr;
943 	g_io_request(bp, cp);
944 	error = biowait(bp, "gwrite");
945 	g_destroy_bio(bp);
946 	return (error);
947 }
948 
949 int
950 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
951 {
952 	struct bio *bp;
953 	int error;
954 
955 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
956 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
957 
958 	bp = g_alloc_bio();
959 	bp->bio_cmd = BIO_DELETE;
960 	bp->bio_done = NULL;
961 	bp->bio_offset = offset;
962 	bp->bio_length = length;
963 	bp->bio_data = NULL;
964 	g_io_request(bp, cp);
965 	error = biowait(bp, "gdelete");
966 	g_destroy_bio(bp);
967 	return (error);
968 }
969 
970 void
971 g_print_bio(struct bio *bp)
972 {
973 	const char *pname, *cmd = NULL;
974 
975 	if (bp->bio_to != NULL)
976 		pname = bp->bio_to->name;
977 	else
978 		pname = "[unknown]";
979 
980 	switch (bp->bio_cmd) {
981 	case BIO_GETATTR:
982 		cmd = "GETATTR";
983 		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
984 		return;
985 	case BIO_FLUSH:
986 		cmd = "FLUSH";
987 		printf("%s[%s]", pname, cmd);
988 		return;
989 	case BIO_READ:
990 		cmd = "READ";
991 		break;
992 	case BIO_WRITE:
993 		cmd = "WRITE";
994 		break;
995 	case BIO_DELETE:
996 		cmd = "DELETE";
997 		break;
998 	default:
999 		cmd = "UNKNOWN";
1000 		printf("%s[%s()]", pname, cmd);
1001 		return;
1002 	}
1003 	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1004 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1005 }
1006