xref: /freebsd/sys/geom/geom_io.c (revision 963e8efffe4ab97233102e0e25f95061b6fefbe3)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 #include <sys/ktr.h>
45 #include <sys/proc.h>
46 #include <sys/stack.h>
47 
48 #include <sys/errno.h>
49 #include <geom/geom.h>
50 #include <geom/geom_int.h>
51 #include <sys/devicestat.h>
52 
53 #include <vm/uma.h>
54 
55 static struct g_bioq g_bio_run_down;
56 static struct g_bioq g_bio_run_up;
57 static struct g_bioq g_bio_run_task;
58 
59 static u_int pace;
60 static uma_zone_t	biozone;
61 
62 #include <machine/atomic.h>
63 
64 static void
65 g_bioq_lock(struct g_bioq *bq)
66 {
67 
68 	mtx_lock(&bq->bio_queue_lock);
69 }
70 
71 static void
72 g_bioq_unlock(struct g_bioq *bq)
73 {
74 
75 	mtx_unlock(&bq->bio_queue_lock);
76 }
77 
78 #if 0
79 static void
80 g_bioq_destroy(struct g_bioq *bq)
81 {
82 
83 	mtx_destroy(&bq->bio_queue_lock);
84 }
85 #endif
86 
87 static void
88 g_bioq_init(struct g_bioq *bq)
89 {
90 
91 	TAILQ_INIT(&bq->bio_queue);
92 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
93 }
94 
95 static struct bio *
96 g_bioq_first(struct g_bioq *bq)
97 {
98 	struct bio *bp;
99 
100 	bp = TAILQ_FIRST(&bq->bio_queue);
101 	if (bp != NULL) {
102 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
103 		    ("Bio not on queue bp=%p target %p", bp, bq));
104 		bp->bio_flags &= ~BIO_ONQUEUE;
105 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
106 		bq->bio_queue_length--;
107 	}
108 	return (bp);
109 }
110 
111 struct bio *
112 g_new_bio(void)
113 {
114 	struct bio *bp;
115 
116 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
117 #ifdef KTR
118 	if (KTR_COMPILE & KTR_GEOM) {
119 		struct stack st;
120 
121 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
122 		stack_save(&st);
123 		CTRSTACK(KTR_GEOM, &st, 3, 0);
124 	}
125 #endif
126 	return (bp);
127 }
128 
129 struct bio *
130 g_alloc_bio(void)
131 {
132 	struct bio *bp;
133 
134 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
135 #ifdef KTR
136 	if (KTR_COMPILE & KTR_GEOM) {
137 		struct stack st;
138 
139 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
140 		stack_save(&st);
141 		CTRSTACK(KTR_GEOM, &st, 3, 0);
142 	}
143 #endif
144 	return (bp);
145 }
146 
147 void
148 g_destroy_bio(struct bio *bp)
149 {
150 #ifdef KTR
151 	if (KTR_COMPILE & KTR_GEOM) {
152 		struct stack st;
153 
154 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
155 		stack_save(&st);
156 		CTRSTACK(KTR_GEOM, &st, 3, 0);
157 	}
158 #endif
159 	uma_zfree(biozone, bp);
160 }
161 
162 struct bio *
163 g_clone_bio(struct bio *bp)
164 {
165 	struct bio *bp2;
166 
167 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
168 	if (bp2 != NULL) {
169 		bp2->bio_parent = bp;
170 		bp2->bio_cmd = bp->bio_cmd;
171 		bp2->bio_length = bp->bio_length;
172 		bp2->bio_offset = bp->bio_offset;
173 		bp2->bio_data = bp->bio_data;
174 		bp2->bio_attribute = bp->bio_attribute;
175 		bp->bio_children++;
176 	}
177 #ifdef KTR
178 	if (KTR_COMPILE & KTR_GEOM) {
179 		struct stack st;
180 
181 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
182 		stack_save(&st);
183 		CTRSTACK(KTR_GEOM, &st, 3, 0);
184 	}
185 #endif
186 	return(bp2);
187 }
188 
189 struct bio *
190 g_duplicate_bio(struct bio *bp)
191 {
192 	struct bio *bp2;
193 
194 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
195 	bp2->bio_parent = bp;
196 	bp2->bio_cmd = bp->bio_cmd;
197 	bp2->bio_length = bp->bio_length;
198 	bp2->bio_offset = bp->bio_offset;
199 	bp2->bio_data = bp->bio_data;
200 	bp2->bio_attribute = bp->bio_attribute;
201 	bp->bio_children++;
202 #ifdef KTR
203 	if (KTR_COMPILE & KTR_GEOM) {
204 		struct stack st;
205 
206 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
207 		stack_save(&st);
208 		CTRSTACK(KTR_GEOM, &st, 3, 0);
209 	}
210 #endif
211 	return(bp2);
212 }
213 
214 void
215 g_io_init()
216 {
217 
218 	g_bioq_init(&g_bio_run_down);
219 	g_bioq_init(&g_bio_run_up);
220 	g_bioq_init(&g_bio_run_task);
221 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
222 	    NULL, NULL,
223 	    NULL, NULL,
224 	    0, 0);
225 }
226 
227 int
228 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
229 {
230 	struct bio *bp;
231 	int error;
232 
233 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
234 	bp = g_alloc_bio();
235 	bp->bio_cmd = BIO_GETATTR;
236 	bp->bio_done = NULL;
237 	bp->bio_attribute = attr;
238 	bp->bio_length = *len;
239 	bp->bio_data = ptr;
240 	g_io_request(bp, cp);
241 	error = biowait(bp, "ggetattr");
242 	*len = bp->bio_completed;
243 	g_destroy_bio(bp);
244 	return (error);
245 }
246 
247 int
248 g_io_flush(struct g_consumer *cp)
249 {
250 	struct bio *bp;
251 	int error;
252 
253 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
254 	bp = g_alloc_bio();
255 	bp->bio_cmd = BIO_FLUSH;
256 	bp->bio_done = NULL;
257 	bp->bio_attribute = NULL;
258 	bp->bio_offset = cp->provider->mediasize;
259 	bp->bio_length = 0;
260 	bp->bio_data = NULL;
261 	g_io_request(bp, cp);
262 	error = biowait(bp, "gflush");
263 	g_destroy_bio(bp);
264 	return (error);
265 }
266 
267 static int
268 g_io_check(struct bio *bp)
269 {
270 	struct g_consumer *cp;
271 	struct g_provider *pp;
272 
273 	cp = bp->bio_from;
274 	pp = bp->bio_to;
275 
276 	/* Fail if access counters dont allow the operation */
277 	switch(bp->bio_cmd) {
278 	case BIO_READ:
279 	case BIO_GETATTR:
280 		if (cp->acr == 0)
281 			return (EPERM);
282 		break;
283 	case BIO_WRITE:
284 	case BIO_DELETE:
285 	case BIO_FLUSH:
286 		if (cp->acw == 0)
287 			return (EPERM);
288 		break;
289 	default:
290 		return (EPERM);
291 	}
292 	/* if provider is marked for error, don't disturb. */
293 	if (pp->error)
294 		return (pp->error);
295 
296 	switch(bp->bio_cmd) {
297 	case BIO_READ:
298 	case BIO_WRITE:
299 	case BIO_DELETE:
300 		/* Zero sectorsize is a probably lack of media */
301 		if (pp->sectorsize == 0)
302 			return (ENXIO);
303 		/* Reject I/O not on sector boundary */
304 		if (bp->bio_offset % pp->sectorsize)
305 			return (EINVAL);
306 		/* Reject I/O not integral sector long */
307 		if (bp->bio_length % pp->sectorsize)
308 			return (EINVAL);
309 		/* Reject requests before or past the end of media. */
310 		if (bp->bio_offset < 0)
311 			return (EIO);
312 		if (bp->bio_offset > pp->mediasize)
313 			return (EIO);
314 		break;
315 	default:
316 		break;
317 	}
318 	return (0);
319 }
320 
321 void
322 g_io_request(struct bio *bp, struct g_consumer *cp)
323 {
324 	struct g_provider *pp;
325 
326 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
327 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
328 	pp = cp->provider;
329 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
330 #ifdef DIAGNOSTIC
331 	KASSERT(bp->bio_driver1 == NULL,
332 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
333 	KASSERT(bp->bio_driver2 == NULL,
334 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
335 	KASSERT(bp->bio_pflags == 0,
336 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
337 	/*
338 	 * Remember consumer's private fields, so we can detect if they were
339 	 * modified by the provider.
340 	 */
341 	bp->_bio_caller1 = bp->bio_caller1;
342 	bp->_bio_caller2 = bp->bio_caller2;
343 	bp->_bio_cflags = bp->bio_cflags;
344 #endif
345 
346 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE|BIO_GETATTR)) {
347 		KASSERT(bp->bio_data != NULL,
348 		    ("NULL bp->data in g_io_request"));
349 	}
350 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
351 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
352 		    ("wrong offset %jd for sectorsize %u",
353 		    bp->bio_offset, cp->provider->sectorsize));
354 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
355 		    ("wrong length %jd for sectorsize %u",
356 		    bp->bio_length, cp->provider->sectorsize));
357 	}
358 
359 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
360 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
361 
362 	bp->bio_from = cp;
363 	bp->bio_to = pp;
364 	bp->bio_error = 0;
365 	bp->bio_completed = 0;
366 
367 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
368 	    ("Bio already on queue bp=%p", bp));
369 	bp->bio_flags |= BIO_ONQUEUE;
370 
371 	binuptime(&bp->bio_t0);
372 
373 	/*
374 	 * The statistics collection is lockless, as such, but we
375 	 * can not update one instance of the statistics from more
376 	 * than one thread at a time, so grab the lock first.
377 	 */
378 	g_bioq_lock(&g_bio_run_down);
379 	if (g_collectstats & 1)
380 		devstat_start_transaction(pp->stat, &bp->bio_t0);
381 	if (g_collectstats & 2)
382 		devstat_start_transaction(cp->stat, &bp->bio_t0);
383 
384 	pp->nstart++;
385 	cp->nstart++;
386 	TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
387 	g_bio_run_down.bio_queue_length++;
388 	g_bioq_unlock(&g_bio_run_down);
389 
390 	/* Pass it on down. */
391 	wakeup(&g_wait_down);
392 }
393 
394 void
395 g_io_deliver(struct bio *bp, int error)
396 {
397 	struct g_consumer *cp;
398 	struct g_provider *pp;
399 
400 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
401 	pp = bp->bio_to;
402 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
403 #ifdef DIAGNOSTIC
404 	KASSERT(bp->bio_caller1 == bp->_bio_caller1,
405 	    ("bio_caller1 used by the provider %s", pp->name));
406 	KASSERT(bp->bio_caller2 == bp->_bio_caller2,
407 	    ("bio_caller2 used by the provider %s", pp->name));
408 	KASSERT(bp->bio_cflags == bp->_bio_cflags,
409 	    ("bio_cflags used by the provider %s", pp->name));
410 #endif
411 	cp = bp->bio_from;
412 	if (cp == NULL) {
413 		bp->bio_error = error;
414 		bp->bio_done(bp);
415 		return;
416 	}
417 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
418 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
419 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
420 	KASSERT(bp->bio_completed <= bp->bio_length,
421 	    ("bio_completed can't be greater than bio_length"));
422 
423 	g_trace(G_T_BIO,
424 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
425 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
426 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
427 
428 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
429 	    ("Bio already on queue bp=%p", bp));
430 
431 	/*
432 	 * XXX: next two doesn't belong here
433 	 */
434 	bp->bio_bcount = bp->bio_length;
435 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
436 
437 	/*
438 	 * The statistics collection is lockless, as such, but we
439 	 * can not update one instance of the statistics from more
440 	 * than one thread at a time, so grab the lock first.
441 	 */
442 	g_bioq_lock(&g_bio_run_up);
443 	if (g_collectstats & 1)
444 		devstat_end_transaction_bio(pp->stat, bp);
445 	if (g_collectstats & 2)
446 		devstat_end_transaction_bio(cp->stat, bp);
447 
448 	cp->nend++;
449 	pp->nend++;
450 	if (error != ENOMEM) {
451 		bp->bio_error = error;
452 		TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
453 		bp->bio_flags |= BIO_ONQUEUE;
454 		g_bio_run_up.bio_queue_length++;
455 		g_bioq_unlock(&g_bio_run_up);
456 		wakeup(&g_wait_up);
457 		return;
458 	}
459 	g_bioq_unlock(&g_bio_run_up);
460 
461 	if (bootverbose)
462 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
463 	bp->bio_children = 0;
464 	bp->bio_inbed = 0;
465 	g_io_request(bp, cp);
466 	pace++;
467 	return;
468 }
469 
470 void
471 g_io_schedule_down(struct thread *tp __unused)
472 {
473 	struct bio *bp;
474 	off_t excess;
475 	int error;
476 
477 	for(;;) {
478 		g_bioq_lock(&g_bio_run_down);
479 		bp = g_bioq_first(&g_bio_run_down);
480 		if (bp == NULL) {
481 			CTR0(KTR_GEOM, "g_down going to sleep");
482 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
483 			    PRIBIO | PDROP, "-", hz/10);
484 			continue;
485 		}
486 		CTR0(KTR_GEOM, "g_down has work to do");
487 		g_bioq_unlock(&g_bio_run_down);
488 		if (pace > 0) {
489 			CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
490 			msleep(&error, NULL, PRIBIO, "g_down", hz/10);
491 			pace--;
492 		}
493 		error = g_io_check(bp);
494 		if (error) {
495 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
496 			    "%s returned %d", bp, bp->bio_to->name, error);
497 			g_io_deliver(bp, error);
498 			continue;
499 		}
500 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
501 		    bp->bio_to->name);
502 		switch (bp->bio_cmd) {
503 		case BIO_READ:
504 		case BIO_WRITE:
505 		case BIO_DELETE:
506 			/* Truncate requests to the end of providers media. */
507 			/*
508 			 * XXX: What if we truncate because of offset being
509 			 * bad, not length?
510 			 */
511 			excess = bp->bio_offset + bp->bio_length;
512 			if (excess > bp->bio_to->mediasize) {
513 				excess -= bp->bio_to->mediasize;
514 				bp->bio_length -= excess;
515 				if (excess > 0)
516 					CTR3(KTR_GEOM, "g_down truncated bio "
517 					    "%p provider %s by %d", bp,
518 					    bp->bio_to->name, excess);
519 			}
520 			/* Deliver zero length transfers right here. */
521 			if (bp->bio_length == 0) {
522 				g_io_deliver(bp, 0);
523 				CTR2(KTR_GEOM, "g_down terminated 0-length "
524 				    "bp %p provider %s", bp, bp->bio_to->name);
525 				continue;
526 			}
527 			break;
528 		default:
529 			break;
530 		}
531 		THREAD_NO_SLEEPING();
532 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
533 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
534 		    bp->bio_length);
535 		bp->bio_to->geom->start(bp);
536 		THREAD_SLEEPING_OK();
537 	}
538 }
539 
540 void
541 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
542 {
543 	bp->bio_task = func;
544 	bp->bio_task_arg = arg;
545 	/*
546 	 * The taskqueue is actually just a second queue off the "up"
547 	 * queue, so we use the same lock.
548 	 */
549 	g_bioq_lock(&g_bio_run_up);
550 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
551 	    ("Bio already on queue bp=%p target taskq", bp));
552 	bp->bio_flags |= BIO_ONQUEUE;
553 	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
554 	g_bio_run_task.bio_queue_length++;
555 	wakeup(&g_wait_up);
556 	g_bioq_unlock(&g_bio_run_up);
557 }
558 
559 
560 void
561 g_io_schedule_up(struct thread *tp __unused)
562 {
563 	struct bio *bp;
564 	for(;;) {
565 		g_bioq_lock(&g_bio_run_up);
566 		bp = g_bioq_first(&g_bio_run_task);
567 		if (bp != NULL) {
568 			g_bioq_unlock(&g_bio_run_up);
569 			THREAD_NO_SLEEPING();
570 			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
571 			bp->bio_task(bp->bio_task_arg);
572 			THREAD_SLEEPING_OK();
573 			continue;
574 		}
575 		bp = g_bioq_first(&g_bio_run_up);
576 		if (bp != NULL) {
577 			g_bioq_unlock(&g_bio_run_up);
578 			THREAD_NO_SLEEPING();
579 			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
580 			    "%ld len %ld", bp, bp->bio_to->name,
581 			    bp->bio_offset, bp->bio_length);
582 			biodone(bp);
583 			THREAD_SLEEPING_OK();
584 			continue;
585 		}
586 		CTR0(KTR_GEOM, "g_up going to sleep");
587 		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
588 		    PRIBIO | PDROP, "-", hz/10);
589 	}
590 }
591 
592 void *
593 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
594 {
595 	struct bio *bp;
596 	void *ptr;
597 	int errorc;
598 
599 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
600 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
601 	    (intmax_t)length));
602 
603 	bp = g_alloc_bio();
604 	bp->bio_cmd = BIO_READ;
605 	bp->bio_done = NULL;
606 	bp->bio_offset = offset;
607 	bp->bio_length = length;
608 	ptr = g_malloc(length, M_WAITOK);
609 	bp->bio_data = ptr;
610 	g_io_request(bp, cp);
611 	errorc = biowait(bp, "gread");
612 	if (error != NULL)
613 		*error = errorc;
614 	g_destroy_bio(bp);
615 	if (errorc) {
616 		g_free(ptr);
617 		ptr = NULL;
618 	}
619 	return (ptr);
620 }
621 
622 int
623 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
624 {
625 	struct bio *bp;
626 	int error;
627 
628 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
629 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
630 	    (intmax_t)length));
631 
632 	bp = g_alloc_bio();
633 	bp->bio_cmd = BIO_WRITE;
634 	bp->bio_done = NULL;
635 	bp->bio_offset = offset;
636 	bp->bio_length = length;
637 	bp->bio_data = ptr;
638 	g_io_request(bp, cp);
639 	error = biowait(bp, "gwrite");
640 	g_destroy_bio(bp);
641 	return (error);
642 }
643 
644 void
645 g_print_bio(struct bio *bp)
646 {
647 	const char *pname, *cmd = NULL;
648 
649 	if (bp->bio_to != NULL)
650 		pname = bp->bio_to->name;
651 	else
652 		pname = "[unknown]";
653 
654 	switch (bp->bio_cmd) {
655 	case BIO_GETATTR:
656 		cmd = "GETATTR";
657 		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
658 		return;
659 	case BIO_FLUSH:
660 		cmd = "FLUSH";
661 		printf("%s[%s]", pname, cmd);
662 		return;
663 	case BIO_READ:
664 		cmd = "READ";
665 	case BIO_WRITE:
666 		if (cmd == NULL)
667 			cmd = "WRITE";
668 	case BIO_DELETE:
669 		if (cmd == NULL)
670 			cmd = "DELETE";
671 		printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
672 		    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
673 		return;
674 	default:
675 		cmd = "UNKNOWN";
676 		printf("%s[%s()]", pname, cmd);
677 		return;
678 	}
679 	/* NOTREACHED */
680 }
681