xref: /freebsd/sys/geom/geom_io.c (revision 145992504973bd16cf3518af9ba5ce185fefa82a)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 #include <sys/ktr.h>
45 #include <sys/proc.h>
46 #include <sys/stack.h>
47 
48 #include <sys/errno.h>
49 #include <geom/geom.h>
50 #include <geom/geom_int.h>
51 #include <sys/devicestat.h>
52 
53 #include <vm/uma.h>
54 
55 static struct g_bioq g_bio_run_down;
56 static struct g_bioq g_bio_run_up;
57 static struct g_bioq g_bio_run_task;
58 
59 static u_int pace;
60 static uma_zone_t	biozone;
61 
62 /*
63  * The head of the list of classifiers used in g_io_request.
64  * Use g_register_classifier() and g_unregister_classifier()
65  * to add/remove entries to the list.
66  * Classifiers are invoked in registration order.
67  */
68 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
69     g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
70 
71 #include <machine/atomic.h>
72 
73 static void
74 g_bioq_lock(struct g_bioq *bq)
75 {
76 
77 	mtx_lock(&bq->bio_queue_lock);
78 }
79 
80 static void
81 g_bioq_unlock(struct g_bioq *bq)
82 {
83 
84 	mtx_unlock(&bq->bio_queue_lock);
85 }
86 
87 #if 0
88 static void
89 g_bioq_destroy(struct g_bioq *bq)
90 {
91 
92 	mtx_destroy(&bq->bio_queue_lock);
93 }
94 #endif
95 
96 static void
97 g_bioq_init(struct g_bioq *bq)
98 {
99 
100 	TAILQ_INIT(&bq->bio_queue);
101 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
102 }
103 
104 static struct bio *
105 g_bioq_first(struct g_bioq *bq)
106 {
107 	struct bio *bp;
108 
109 	bp = TAILQ_FIRST(&bq->bio_queue);
110 	if (bp != NULL) {
111 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
112 		    ("Bio not on queue bp=%p target %p", bp, bq));
113 		bp->bio_flags &= ~BIO_ONQUEUE;
114 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
115 		bq->bio_queue_length--;
116 	}
117 	return (bp);
118 }
119 
120 struct bio *
121 g_new_bio(void)
122 {
123 	struct bio *bp;
124 
125 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
126 #ifdef KTR
127 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
128 		struct stack st;
129 
130 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
131 		stack_save(&st);
132 		CTRSTACK(KTR_GEOM, &st, 3, 0);
133 	}
134 #endif
135 	return (bp);
136 }
137 
138 struct bio *
139 g_alloc_bio(void)
140 {
141 	struct bio *bp;
142 
143 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
144 #ifdef KTR
145 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
146 		struct stack st;
147 
148 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
149 		stack_save(&st);
150 		CTRSTACK(KTR_GEOM, &st, 3, 0);
151 	}
152 #endif
153 	return (bp);
154 }
155 
156 void
157 g_destroy_bio(struct bio *bp)
158 {
159 #ifdef KTR
160 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
161 		struct stack st;
162 
163 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
164 		stack_save(&st);
165 		CTRSTACK(KTR_GEOM, &st, 3, 0);
166 	}
167 #endif
168 	uma_zfree(biozone, bp);
169 }
170 
171 struct bio *
172 g_clone_bio(struct bio *bp)
173 {
174 	struct bio *bp2;
175 
176 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
177 	if (bp2 != NULL) {
178 		bp2->bio_parent = bp;
179 		bp2->bio_cmd = bp->bio_cmd;
180 		/*
181 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
182 		 *  ordering restrictions, so this flag needs to be cloned.
183 		 *  Other bio flags are not suitable for cloning.
184 		 */
185 		bp2->bio_flags = bp->bio_flags & BIO_ORDERED;
186 		bp2->bio_length = bp->bio_length;
187 		bp2->bio_offset = bp->bio_offset;
188 		bp2->bio_data = bp->bio_data;
189 		bp2->bio_attribute = bp->bio_attribute;
190 		/* Inherit classification info from the parent */
191 		bp2->bio_classifier1 = bp->bio_classifier1;
192 		bp2->bio_classifier2 = bp->bio_classifier2;
193 		bp->bio_children++;
194 	}
195 #ifdef KTR
196 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
197 		struct stack st;
198 
199 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
200 		stack_save(&st);
201 		CTRSTACK(KTR_GEOM, &st, 3, 0);
202 	}
203 #endif
204 	return(bp2);
205 }
206 
207 struct bio *
208 g_duplicate_bio(struct bio *bp)
209 {
210 	struct bio *bp2;
211 
212 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
213 	bp2->bio_parent = bp;
214 	bp2->bio_cmd = bp->bio_cmd;
215 	bp2->bio_length = bp->bio_length;
216 	bp2->bio_offset = bp->bio_offset;
217 	bp2->bio_data = bp->bio_data;
218 	bp2->bio_attribute = bp->bio_attribute;
219 	bp->bio_children++;
220 #ifdef KTR
221 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
222 		struct stack st;
223 
224 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
225 		stack_save(&st);
226 		CTRSTACK(KTR_GEOM, &st, 3, 0);
227 	}
228 #endif
229 	return(bp2);
230 }
231 
232 void
233 g_io_init()
234 {
235 
236 	g_bioq_init(&g_bio_run_down);
237 	g_bioq_init(&g_bio_run_up);
238 	g_bioq_init(&g_bio_run_task);
239 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
240 	    NULL, NULL,
241 	    NULL, NULL,
242 	    0, 0);
243 }
244 
245 int
246 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
247 {
248 	struct bio *bp;
249 	int error;
250 
251 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
252 	bp = g_alloc_bio();
253 	bp->bio_cmd = BIO_GETATTR;
254 	bp->bio_done = NULL;
255 	bp->bio_attribute = attr;
256 	bp->bio_length = *len;
257 	bp->bio_data = ptr;
258 	g_io_request(bp, cp);
259 	error = biowait(bp, "ggetattr");
260 	*len = bp->bio_completed;
261 	g_destroy_bio(bp);
262 	return (error);
263 }
264 
265 int
266 g_io_flush(struct g_consumer *cp)
267 {
268 	struct bio *bp;
269 	int error;
270 
271 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
272 	bp = g_alloc_bio();
273 	bp->bio_cmd = BIO_FLUSH;
274 	bp->bio_flags |= BIO_ORDERED;
275 	bp->bio_done = NULL;
276 	bp->bio_attribute = NULL;
277 	bp->bio_offset = cp->provider->mediasize;
278 	bp->bio_length = 0;
279 	bp->bio_data = NULL;
280 	g_io_request(bp, cp);
281 	error = biowait(bp, "gflush");
282 	g_destroy_bio(bp);
283 	return (error);
284 }
285 
286 static int
287 g_io_check(struct bio *bp)
288 {
289 	struct g_consumer *cp;
290 	struct g_provider *pp;
291 
292 	cp = bp->bio_from;
293 	pp = bp->bio_to;
294 
295 	/* Fail if access counters dont allow the operation */
296 	switch(bp->bio_cmd) {
297 	case BIO_READ:
298 	case BIO_GETATTR:
299 		if (cp->acr == 0)
300 			return (EPERM);
301 		break;
302 	case BIO_WRITE:
303 	case BIO_DELETE:
304 	case BIO_FLUSH:
305 		if (cp->acw == 0)
306 			return (EPERM);
307 		break;
308 	default:
309 		return (EPERM);
310 	}
311 	/* if provider is marked for error, don't disturb. */
312 	if (pp->error)
313 		return (pp->error);
314 	if (cp->flags & G_CF_ORPHAN)
315 		return (ENXIO);
316 
317 	switch(bp->bio_cmd) {
318 	case BIO_READ:
319 	case BIO_WRITE:
320 	case BIO_DELETE:
321 		/* Zero sectorsize or mediasize is probably a lack of media. */
322 		if (pp->sectorsize == 0 || pp->mediasize == 0)
323 			return (ENXIO);
324 		/* Reject I/O not on sector boundary */
325 		if (bp->bio_offset % pp->sectorsize)
326 			return (EINVAL);
327 		/* Reject I/O not integral sector long */
328 		if (bp->bio_length % pp->sectorsize)
329 			return (EINVAL);
330 		/* Reject requests before or past the end of media. */
331 		if (bp->bio_offset < 0)
332 			return (EIO);
333 		if (bp->bio_offset > pp->mediasize)
334 			return (EIO);
335 		break;
336 	default:
337 		break;
338 	}
339 	return (0);
340 }
341 
342 /*
343  * bio classification support.
344  *
345  * g_register_classifier() and g_unregister_classifier()
346  * are used to add/remove a classifier from the list.
347  * The list is protected using the g_bio_run_down lock,
348  * because the classifiers are called in this path.
349  *
350  * g_io_request() passes bio's that are not already classified
351  * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
352  * Classifiers can store their result in the two fields
353  * bio_classifier1 and bio_classifier2.
354  * A classifier that updates one of the fields should
355  * return a non-zero value.
356  * If no classifier updates the field, g_run_classifiers() sets
357  * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
358  */
359 
360 int
361 g_register_classifier(struct g_classifier_hook *hook)
362 {
363 
364 	g_bioq_lock(&g_bio_run_down);
365 	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
366 	g_bioq_unlock(&g_bio_run_down);
367 
368 	return (0);
369 }
370 
371 void
372 g_unregister_classifier(struct g_classifier_hook *hook)
373 {
374 	struct g_classifier_hook *entry;
375 
376 	g_bioq_lock(&g_bio_run_down);
377 	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
378 		if (entry == hook) {
379 			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
380 			break;
381 		}
382 	}
383 	g_bioq_unlock(&g_bio_run_down);
384 }
385 
386 static void
387 g_run_classifiers(struct bio *bp)
388 {
389 	struct g_classifier_hook *hook;
390 	int classified = 0;
391 
392 	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
393 		classified |= hook->func(hook->arg, bp);
394 
395 	if (!classified)
396 		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
397 }
398 
399 void
400 g_io_request(struct bio *bp, struct g_consumer *cp)
401 {
402 	struct g_provider *pp;
403 	int first;
404 
405 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
406 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
407 	pp = cp->provider;
408 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
409 #ifdef DIAGNOSTIC
410 	KASSERT(bp->bio_driver1 == NULL,
411 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
412 	KASSERT(bp->bio_driver2 == NULL,
413 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
414 	KASSERT(bp->bio_pflags == 0,
415 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
416 	/*
417 	 * Remember consumer's private fields, so we can detect if they were
418 	 * modified by the provider.
419 	 */
420 	bp->_bio_caller1 = bp->bio_caller1;
421 	bp->_bio_caller2 = bp->bio_caller2;
422 	bp->_bio_cflags = bp->bio_cflags;
423 #endif
424 
425 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
426 		KASSERT(bp->bio_data != NULL,
427 		    ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
428 	}
429 	if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
430 		KASSERT(bp->bio_data == NULL,
431 		    ("non-NULL bp->data in g_io_request(cmd=%hhu)",
432 		    bp->bio_cmd));
433 	}
434 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
435 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
436 		    ("wrong offset %jd for sectorsize %u",
437 		    bp->bio_offset, cp->provider->sectorsize));
438 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
439 		    ("wrong length %jd for sectorsize %u",
440 		    bp->bio_length, cp->provider->sectorsize));
441 	}
442 
443 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
444 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
445 
446 	bp->bio_from = cp;
447 	bp->bio_to = pp;
448 	bp->bio_error = 0;
449 	bp->bio_completed = 0;
450 
451 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
452 	    ("Bio already on queue bp=%p", bp));
453 	bp->bio_flags |= BIO_ONQUEUE;
454 
455 	if (g_collectstats)
456 		binuptime(&bp->bio_t0);
457 	else
458 		getbinuptime(&bp->bio_t0);
459 
460 	/*
461 	 * The statistics collection is lockless, as such, but we
462 	 * can not update one instance of the statistics from more
463 	 * than one thread at a time, so grab the lock first.
464 	 *
465 	 * We also use the lock to protect the list of classifiers.
466 	 */
467 	g_bioq_lock(&g_bio_run_down);
468 
469 	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1)
470 		g_run_classifiers(bp);
471 
472 	if (g_collectstats & 1)
473 		devstat_start_transaction(pp->stat, &bp->bio_t0);
474 	if (g_collectstats & 2)
475 		devstat_start_transaction(cp->stat, &bp->bio_t0);
476 
477 	pp->nstart++;
478 	cp->nstart++;
479 	first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
480 	TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
481 	g_bio_run_down.bio_queue_length++;
482 	g_bioq_unlock(&g_bio_run_down);
483 
484 	/* Pass it on down. */
485 	if (first)
486 		wakeup(&g_wait_down);
487 }
488 
489 void
490 g_io_deliver(struct bio *bp, int error)
491 {
492 	struct g_consumer *cp;
493 	struct g_provider *pp;
494 	int first;
495 
496 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
497 	pp = bp->bio_to;
498 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
499 	cp = bp->bio_from;
500 	if (cp == NULL) {
501 		bp->bio_error = error;
502 		bp->bio_done(bp);
503 		return;
504 	}
505 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
506 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
507 #ifdef DIAGNOSTIC
508 	/*
509 	 * Some classes - GJournal in particular - can modify bio's
510 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
511 	 * flag means it's an expected behaviour for that particular geom.
512 	 */
513 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
514 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
515 		    ("bio_caller1 used by the provider %s", pp->name));
516 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
517 		    ("bio_caller2 used by the provider %s", pp->name));
518 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
519 		    ("bio_cflags used by the provider %s", pp->name));
520 	}
521 #endif
522 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
523 	KASSERT(bp->bio_completed <= bp->bio_length,
524 	    ("bio_completed can't be greater than bio_length"));
525 
526 	g_trace(G_T_BIO,
527 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
528 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
529 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
530 
531 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
532 	    ("Bio already on queue bp=%p", bp));
533 
534 	/*
535 	 * XXX: next two doesn't belong here
536 	 */
537 	bp->bio_bcount = bp->bio_length;
538 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
539 
540 	/*
541 	 * The statistics collection is lockless, as such, but we
542 	 * can not update one instance of the statistics from more
543 	 * than one thread at a time, so grab the lock first.
544 	 */
545 	g_bioq_lock(&g_bio_run_up);
546 	if (g_collectstats & 1)
547 		devstat_end_transaction_bio(pp->stat, bp);
548 	if (g_collectstats & 2)
549 		devstat_end_transaction_bio(cp->stat, bp);
550 
551 	cp->nend++;
552 	pp->nend++;
553 	if (error != ENOMEM) {
554 		bp->bio_error = error;
555 		first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
556 		TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
557 		bp->bio_flags |= BIO_ONQUEUE;
558 		g_bio_run_up.bio_queue_length++;
559 		g_bioq_unlock(&g_bio_run_up);
560 		if (first)
561 			wakeup(&g_wait_up);
562 		return;
563 	}
564 	g_bioq_unlock(&g_bio_run_up);
565 
566 	if (bootverbose)
567 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
568 	bp->bio_children = 0;
569 	bp->bio_inbed = 0;
570 	g_io_request(bp, cp);
571 	pace++;
572 	return;
573 }
574 
575 void
576 g_io_schedule_down(struct thread *tp __unused)
577 {
578 	struct bio *bp;
579 	off_t excess;
580 	int error;
581 
582 	for(;;) {
583 		g_bioq_lock(&g_bio_run_down);
584 		bp = g_bioq_first(&g_bio_run_down);
585 		if (bp == NULL) {
586 			CTR0(KTR_GEOM, "g_down going to sleep");
587 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
588 			    PRIBIO | PDROP, "-", 0);
589 			continue;
590 		}
591 		CTR0(KTR_GEOM, "g_down has work to do");
592 		g_bioq_unlock(&g_bio_run_down);
593 		if (pace > 0) {
594 			CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
595 			pause("g_down", hz/10);
596 			pace--;
597 		}
598 		error = g_io_check(bp);
599 		if (error) {
600 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
601 			    "%s returned %d", bp, bp->bio_to->name, error);
602 			g_io_deliver(bp, error);
603 			continue;
604 		}
605 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
606 		    bp->bio_to->name);
607 		switch (bp->bio_cmd) {
608 		case BIO_READ:
609 		case BIO_WRITE:
610 		case BIO_DELETE:
611 			/* Truncate requests to the end of providers media. */
612 			/*
613 			 * XXX: What if we truncate because of offset being
614 			 * bad, not length?
615 			 */
616 			excess = bp->bio_offset + bp->bio_length;
617 			if (excess > bp->bio_to->mediasize) {
618 				excess -= bp->bio_to->mediasize;
619 				bp->bio_length -= excess;
620 				if (excess > 0)
621 					CTR3(KTR_GEOM, "g_down truncated bio "
622 					    "%p provider %s by %d", bp,
623 					    bp->bio_to->name, excess);
624 			}
625 			/* Deliver zero length transfers right here. */
626 			if (bp->bio_length == 0) {
627 				g_io_deliver(bp, 0);
628 				CTR2(KTR_GEOM, "g_down terminated 0-length "
629 				    "bp %p provider %s", bp, bp->bio_to->name);
630 				continue;
631 			}
632 			break;
633 		default:
634 			break;
635 		}
636 		THREAD_NO_SLEEPING();
637 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
638 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
639 		    bp->bio_length);
640 		bp->bio_to->geom->start(bp);
641 		THREAD_SLEEPING_OK();
642 	}
643 }
644 
645 void
646 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
647 {
648 	bp->bio_task = func;
649 	bp->bio_task_arg = arg;
650 	/*
651 	 * The taskqueue is actually just a second queue off the "up"
652 	 * queue, so we use the same lock.
653 	 */
654 	g_bioq_lock(&g_bio_run_up);
655 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
656 	    ("Bio already on queue bp=%p target taskq", bp));
657 	bp->bio_flags |= BIO_ONQUEUE;
658 	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
659 	g_bio_run_task.bio_queue_length++;
660 	wakeup(&g_wait_up);
661 	g_bioq_unlock(&g_bio_run_up);
662 }
663 
664 
665 void
666 g_io_schedule_up(struct thread *tp __unused)
667 {
668 	struct bio *bp;
669 	for(;;) {
670 		g_bioq_lock(&g_bio_run_up);
671 		bp = g_bioq_first(&g_bio_run_task);
672 		if (bp != NULL) {
673 			g_bioq_unlock(&g_bio_run_up);
674 			THREAD_NO_SLEEPING();
675 			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
676 			bp->bio_task(bp->bio_task_arg);
677 			THREAD_SLEEPING_OK();
678 			continue;
679 		}
680 		bp = g_bioq_first(&g_bio_run_up);
681 		if (bp != NULL) {
682 			g_bioq_unlock(&g_bio_run_up);
683 			THREAD_NO_SLEEPING();
684 			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
685 			    "%jd len %ld", bp, bp->bio_to->name,
686 			    bp->bio_offset, bp->bio_length);
687 			biodone(bp);
688 			THREAD_SLEEPING_OK();
689 			continue;
690 		}
691 		CTR0(KTR_GEOM, "g_up going to sleep");
692 		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
693 		    PRIBIO | PDROP, "-", 0);
694 	}
695 }
696 
697 void *
698 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
699 {
700 	struct bio *bp;
701 	void *ptr;
702 	int errorc;
703 
704 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
705 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
706 	    (intmax_t)length));
707 
708 	bp = g_alloc_bio();
709 	bp->bio_cmd = BIO_READ;
710 	bp->bio_done = NULL;
711 	bp->bio_offset = offset;
712 	bp->bio_length = length;
713 	ptr = g_malloc(length, M_WAITOK);
714 	bp->bio_data = ptr;
715 	g_io_request(bp, cp);
716 	errorc = biowait(bp, "gread");
717 	if (error != NULL)
718 		*error = errorc;
719 	g_destroy_bio(bp);
720 	if (errorc) {
721 		g_free(ptr);
722 		ptr = NULL;
723 	}
724 	return (ptr);
725 }
726 
727 int
728 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
729 {
730 	struct bio *bp;
731 	int error;
732 
733 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
734 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
735 	    (intmax_t)length));
736 
737 	bp = g_alloc_bio();
738 	bp->bio_cmd = BIO_WRITE;
739 	bp->bio_done = NULL;
740 	bp->bio_offset = offset;
741 	bp->bio_length = length;
742 	bp->bio_data = ptr;
743 	g_io_request(bp, cp);
744 	error = biowait(bp, "gwrite");
745 	g_destroy_bio(bp);
746 	return (error);
747 }
748 
749 int
750 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
751 {
752 	struct bio *bp;
753 	int error;
754 
755 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
756 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
757 
758 	bp = g_alloc_bio();
759 	bp->bio_cmd = BIO_DELETE;
760 	bp->bio_done = NULL;
761 	bp->bio_offset = offset;
762 	bp->bio_length = length;
763 	bp->bio_data = NULL;
764 	g_io_request(bp, cp);
765 	error = biowait(bp, "gdelete");
766 	g_destroy_bio(bp);
767 	return (error);
768 }
769 
770 void
771 g_print_bio(struct bio *bp)
772 {
773 	const char *pname, *cmd = NULL;
774 
775 	if (bp->bio_to != NULL)
776 		pname = bp->bio_to->name;
777 	else
778 		pname = "[unknown]";
779 
780 	switch (bp->bio_cmd) {
781 	case BIO_GETATTR:
782 		cmd = "GETATTR";
783 		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
784 		return;
785 	case BIO_FLUSH:
786 		cmd = "FLUSH";
787 		printf("%s[%s]", pname, cmd);
788 		return;
789 	case BIO_READ:
790 		cmd = "READ";
791 		break;
792 	case BIO_WRITE:
793 		cmd = "WRITE";
794 		break;
795 	case BIO_DELETE:
796 		cmd = "DELETE";
797 		break;
798 	default:
799 		cmd = "UNKNOWN";
800 		printf("%s[%s()]", pname, cmd);
801 		return;
802 	}
803 	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
804 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
805 }
806