xref: /freebsd/sys/geom/geom_io.c (revision cc759c1995237364b02829feb9e5fdd1e6ed2c5b)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8  * and NAI Labs, the Security Research Division of Network Associates, Inc.
9  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10  * DARPA CHATS research program.
11  *
12  * Portions of this software were developed by Konstantin Belousov
13  * under sponsorship from the FreeBSD Foundation.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. The names of the authors may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/bio.h>
48 #include <sys/ktr.h>
49 #include <sys/proc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
52 #include <sys/vmem.h>
53 
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
58 
59 #include <vm/uma.h>
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 
68 static struct g_bioq g_bio_run_down;
69 static struct g_bioq g_bio_run_up;
70 static struct g_bioq g_bio_run_task;
71 
72 static u_int pace;
73 static uma_zone_t	biozone;
74 
75 /*
76  * The head of the list of classifiers used in g_io_request.
77  * Use g_register_classifier() and g_unregister_classifier()
78  * to add/remove entries to the list.
79  * Classifiers are invoked in registration order.
80  */
81 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
82     g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
83 
84 #include <machine/atomic.h>
85 
86 static void
87 g_bioq_lock(struct g_bioq *bq)
88 {
89 
90 	mtx_lock(&bq->bio_queue_lock);
91 }
92 
93 static void
94 g_bioq_unlock(struct g_bioq *bq)
95 {
96 
97 	mtx_unlock(&bq->bio_queue_lock);
98 }
99 
100 #if 0
101 static void
102 g_bioq_destroy(struct g_bioq *bq)
103 {
104 
105 	mtx_destroy(&bq->bio_queue_lock);
106 }
107 #endif
108 
109 static void
110 g_bioq_init(struct g_bioq *bq)
111 {
112 
113 	TAILQ_INIT(&bq->bio_queue);
114 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
115 }
116 
117 static struct bio *
118 g_bioq_first(struct g_bioq *bq)
119 {
120 	struct bio *bp;
121 
122 	bp = TAILQ_FIRST(&bq->bio_queue);
123 	if (bp != NULL) {
124 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
125 		    ("Bio not on queue bp=%p target %p", bp, bq));
126 		bp->bio_flags &= ~BIO_ONQUEUE;
127 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
128 		bq->bio_queue_length--;
129 	}
130 	return (bp);
131 }
132 
133 struct bio *
134 g_new_bio(void)
135 {
136 	struct bio *bp;
137 
138 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
139 #ifdef KTR
140 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
141 		struct stack st;
142 
143 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
144 		stack_save(&st);
145 		CTRSTACK(KTR_GEOM, &st, 3, 0);
146 	}
147 #endif
148 	return (bp);
149 }
150 
151 struct bio *
152 g_alloc_bio(void)
153 {
154 	struct bio *bp;
155 
156 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
157 #ifdef KTR
158 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
159 		struct stack st;
160 
161 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
162 		stack_save(&st);
163 		CTRSTACK(KTR_GEOM, &st, 3, 0);
164 	}
165 #endif
166 	return (bp);
167 }
168 
169 void
170 g_destroy_bio(struct bio *bp)
171 {
172 #ifdef KTR
173 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
174 		struct stack st;
175 
176 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
177 		stack_save(&st);
178 		CTRSTACK(KTR_GEOM, &st, 3, 0);
179 	}
180 #endif
181 	uma_zfree(biozone, bp);
182 }
183 
184 struct bio *
185 g_clone_bio(struct bio *bp)
186 {
187 	struct bio *bp2;
188 
189 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
190 	if (bp2 != NULL) {
191 		bp2->bio_parent = bp;
192 		bp2->bio_cmd = bp->bio_cmd;
193 		/*
194 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
195 		 *  ordering restrictions, so this flag needs to be cloned.
196 		 *  BIO_UNMAPPED should be inherited, to properly indicate
197 		 *  which way the buffer is passed.
198 		 *  Other bio flags are not suitable for cloning.
199 		 */
200 		bp2->bio_flags = bp->bio_flags & (BIO_ORDERED | BIO_UNMAPPED);
201 		bp2->bio_length = bp->bio_length;
202 		bp2->bio_offset = bp->bio_offset;
203 		bp2->bio_data = bp->bio_data;
204 		bp2->bio_ma = bp->bio_ma;
205 		bp2->bio_ma_n = bp->bio_ma_n;
206 		bp2->bio_ma_offset = bp->bio_ma_offset;
207 		bp2->bio_attribute = bp->bio_attribute;
208 		/* Inherit classification info from the parent */
209 		bp2->bio_classifier1 = bp->bio_classifier1;
210 		bp2->bio_classifier2 = bp->bio_classifier2;
211 		bp->bio_children++;
212 	}
213 #ifdef KTR
214 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
215 		struct stack st;
216 
217 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
218 		stack_save(&st);
219 		CTRSTACK(KTR_GEOM, &st, 3, 0);
220 	}
221 #endif
222 	return(bp2);
223 }
224 
225 struct bio *
226 g_duplicate_bio(struct bio *bp)
227 {
228 	struct bio *bp2;
229 
230 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
231 	bp2->bio_flags = bp->bio_flags & BIO_UNMAPPED;
232 	bp2->bio_parent = bp;
233 	bp2->bio_cmd = bp->bio_cmd;
234 	bp2->bio_length = bp->bio_length;
235 	bp2->bio_offset = bp->bio_offset;
236 	bp2->bio_data = bp->bio_data;
237 	bp2->bio_ma = bp->bio_ma;
238 	bp2->bio_ma_n = bp->bio_ma_n;
239 	bp2->bio_ma_offset = bp->bio_ma_offset;
240 	bp2->bio_attribute = bp->bio_attribute;
241 	bp->bio_children++;
242 #ifdef KTR
243 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
244 		struct stack st;
245 
246 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
247 		stack_save(&st);
248 		CTRSTACK(KTR_GEOM, &st, 3, 0);
249 	}
250 #endif
251 	return(bp2);
252 }
253 
254 void
255 g_io_init()
256 {
257 
258 	g_bioq_init(&g_bio_run_down);
259 	g_bioq_init(&g_bio_run_up);
260 	g_bioq_init(&g_bio_run_task);
261 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
262 	    NULL, NULL,
263 	    NULL, NULL,
264 	    0, 0);
265 }
266 
267 int
268 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
269 {
270 	struct bio *bp;
271 	int error;
272 
273 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
274 	bp = g_alloc_bio();
275 	bp->bio_cmd = BIO_GETATTR;
276 	bp->bio_done = NULL;
277 	bp->bio_attribute = attr;
278 	bp->bio_length = *len;
279 	bp->bio_data = ptr;
280 	g_io_request(bp, cp);
281 	error = biowait(bp, "ggetattr");
282 	*len = bp->bio_completed;
283 	g_destroy_bio(bp);
284 	return (error);
285 }
286 
287 int
288 g_io_flush(struct g_consumer *cp)
289 {
290 	struct bio *bp;
291 	int error;
292 
293 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
294 	bp = g_alloc_bio();
295 	bp->bio_cmd = BIO_FLUSH;
296 	bp->bio_flags |= BIO_ORDERED;
297 	bp->bio_done = NULL;
298 	bp->bio_attribute = NULL;
299 	bp->bio_offset = cp->provider->mediasize;
300 	bp->bio_length = 0;
301 	bp->bio_data = NULL;
302 	g_io_request(bp, cp);
303 	error = biowait(bp, "gflush");
304 	g_destroy_bio(bp);
305 	return (error);
306 }
307 
308 static int
309 g_io_check(struct bio *bp)
310 {
311 	struct g_consumer *cp;
312 	struct g_provider *pp;
313 
314 	cp = bp->bio_from;
315 	pp = bp->bio_to;
316 
317 	/* Fail if access counters dont allow the operation */
318 	switch(bp->bio_cmd) {
319 	case BIO_READ:
320 	case BIO_GETATTR:
321 		if (cp->acr == 0)
322 			return (EPERM);
323 		break;
324 	case BIO_WRITE:
325 	case BIO_DELETE:
326 	case BIO_FLUSH:
327 		if (cp->acw == 0)
328 			return (EPERM);
329 		break;
330 	default:
331 		return (EPERM);
332 	}
333 	/* if provider is marked for error, don't disturb. */
334 	if (pp->error)
335 		return (pp->error);
336 	if (cp->flags & G_CF_ORPHAN)
337 		return (ENXIO);
338 
339 	switch(bp->bio_cmd) {
340 	case BIO_READ:
341 	case BIO_WRITE:
342 	case BIO_DELETE:
343 		/* Zero sectorsize or mediasize is probably a lack of media. */
344 		if (pp->sectorsize == 0 || pp->mediasize == 0)
345 			return (ENXIO);
346 		/* Reject I/O not on sector boundary */
347 		if (bp->bio_offset % pp->sectorsize)
348 			return (EINVAL);
349 		/* Reject I/O not integral sector long */
350 		if (bp->bio_length % pp->sectorsize)
351 			return (EINVAL);
352 		/* Reject requests before or past the end of media. */
353 		if (bp->bio_offset < 0)
354 			return (EIO);
355 		if (bp->bio_offset > pp->mediasize)
356 			return (EIO);
357 		break;
358 	default:
359 		break;
360 	}
361 	return (0);
362 }
363 
364 /*
365  * bio classification support.
366  *
367  * g_register_classifier() and g_unregister_classifier()
368  * are used to add/remove a classifier from the list.
369  * The list is protected using the g_bio_run_down lock,
370  * because the classifiers are called in this path.
371  *
372  * g_io_request() passes bio's that are not already classified
373  * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
374  * Classifiers can store their result in the two fields
375  * bio_classifier1 and bio_classifier2.
376  * A classifier that updates one of the fields should
377  * return a non-zero value.
378  * If no classifier updates the field, g_run_classifiers() sets
379  * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
380  */
381 
382 int
383 g_register_classifier(struct g_classifier_hook *hook)
384 {
385 
386 	g_bioq_lock(&g_bio_run_down);
387 	TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
388 	g_bioq_unlock(&g_bio_run_down);
389 
390 	return (0);
391 }
392 
393 void
394 g_unregister_classifier(struct g_classifier_hook *hook)
395 {
396 	struct g_classifier_hook *entry;
397 
398 	g_bioq_lock(&g_bio_run_down);
399 	TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
400 		if (entry == hook) {
401 			TAILQ_REMOVE(&g_classifier_tailq, hook, link);
402 			break;
403 		}
404 	}
405 	g_bioq_unlock(&g_bio_run_down);
406 }
407 
408 static void
409 g_run_classifiers(struct bio *bp)
410 {
411 	struct g_classifier_hook *hook;
412 	int classified = 0;
413 
414 	TAILQ_FOREACH(hook, &g_classifier_tailq, link)
415 		classified |= hook->func(hook->arg, bp);
416 
417 	if (!classified)
418 		bp->bio_classifier1 = BIO_NOTCLASSIFIED;
419 }
420 
421 void
422 g_io_request(struct bio *bp, struct g_consumer *cp)
423 {
424 	struct g_provider *pp;
425 	int first;
426 
427 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
428 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
429 	pp = cp->provider;
430 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
431 #ifdef DIAGNOSTIC
432 	KASSERT(bp->bio_driver1 == NULL,
433 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
434 	KASSERT(bp->bio_driver2 == NULL,
435 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
436 	KASSERT(bp->bio_pflags == 0,
437 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
438 	/*
439 	 * Remember consumer's private fields, so we can detect if they were
440 	 * modified by the provider.
441 	 */
442 	bp->_bio_caller1 = bp->bio_caller1;
443 	bp->_bio_caller2 = bp->bio_caller2;
444 	bp->_bio_cflags = bp->bio_cflags;
445 #endif
446 
447 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
448 		KASSERT(bp->bio_data != NULL,
449 		    ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
450 	}
451 	if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
452 		KASSERT(bp->bio_data == NULL,
453 		    ("non-NULL bp->data in g_io_request(cmd=%hhu)",
454 		    bp->bio_cmd));
455 	}
456 	if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
457 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
458 		    ("wrong offset %jd for sectorsize %u",
459 		    bp->bio_offset, cp->provider->sectorsize));
460 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
461 		    ("wrong length %jd for sectorsize %u",
462 		    bp->bio_length, cp->provider->sectorsize));
463 	}
464 
465 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
466 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
467 
468 	bp->bio_from = cp;
469 	bp->bio_to = pp;
470 	bp->bio_error = 0;
471 	bp->bio_completed = 0;
472 
473 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
474 	    ("Bio already on queue bp=%p", bp));
475 	bp->bio_flags |= BIO_ONQUEUE;
476 
477 	if (g_collectstats)
478 		binuptime(&bp->bio_t0);
479 	else
480 		getbinuptime(&bp->bio_t0);
481 
482 	/*
483 	 * The statistics collection is lockless, as such, but we
484 	 * can not update one instance of the statistics from more
485 	 * than one thread at a time, so grab the lock first.
486 	 *
487 	 * We also use the lock to protect the list of classifiers.
488 	 */
489 	g_bioq_lock(&g_bio_run_down);
490 
491 	if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1)
492 		g_run_classifiers(bp);
493 
494 	if (g_collectstats & 1)
495 		devstat_start_transaction(pp->stat, &bp->bio_t0);
496 	if (g_collectstats & 2)
497 		devstat_start_transaction(cp->stat, &bp->bio_t0);
498 
499 	pp->nstart++;
500 	cp->nstart++;
501 	first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
502 	TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
503 	g_bio_run_down.bio_queue_length++;
504 	g_bioq_unlock(&g_bio_run_down);
505 
506 	/* Pass it on down. */
507 	if (first)
508 		wakeup(&g_wait_down);
509 }
510 
511 void
512 g_io_deliver(struct bio *bp, int error)
513 {
514 	struct g_consumer *cp;
515 	struct g_provider *pp;
516 	int first;
517 
518 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
519 	pp = bp->bio_to;
520 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
521 	cp = bp->bio_from;
522 	if (cp == NULL) {
523 		bp->bio_error = error;
524 		bp->bio_done(bp);
525 		return;
526 	}
527 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
528 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
529 #ifdef DIAGNOSTIC
530 	/*
531 	 * Some classes - GJournal in particular - can modify bio's
532 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
533 	 * flag means it's an expected behaviour for that particular geom.
534 	 */
535 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
536 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
537 		    ("bio_caller1 used by the provider %s", pp->name));
538 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
539 		    ("bio_caller2 used by the provider %s", pp->name));
540 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
541 		    ("bio_cflags used by the provider %s", pp->name));
542 	}
543 #endif
544 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
545 	KASSERT(bp->bio_completed <= bp->bio_length,
546 	    ("bio_completed can't be greater than bio_length"));
547 
548 	g_trace(G_T_BIO,
549 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
550 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
551 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
552 
553 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
554 	    ("Bio already on queue bp=%p", bp));
555 
556 	/*
557 	 * XXX: next two doesn't belong here
558 	 */
559 	bp->bio_bcount = bp->bio_length;
560 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
561 
562 	/*
563 	 * The statistics collection is lockless, as such, but we
564 	 * can not update one instance of the statistics from more
565 	 * than one thread at a time, so grab the lock first.
566 	 */
567 	g_bioq_lock(&g_bio_run_up);
568 	if (g_collectstats & 1)
569 		devstat_end_transaction_bio(pp->stat, bp);
570 	if (g_collectstats & 2)
571 		devstat_end_transaction_bio(cp->stat, bp);
572 
573 	cp->nend++;
574 	pp->nend++;
575 	if (error != ENOMEM) {
576 		bp->bio_error = error;
577 		first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
578 		TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
579 		bp->bio_flags |= BIO_ONQUEUE;
580 		g_bio_run_up.bio_queue_length++;
581 		g_bioq_unlock(&g_bio_run_up);
582 		if (first)
583 			wakeup(&g_wait_up);
584 		return;
585 	}
586 	g_bioq_unlock(&g_bio_run_up);
587 
588 	if (bootverbose)
589 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
590 	bp->bio_children = 0;
591 	bp->bio_inbed = 0;
592 	bp->bio_driver1 = NULL;
593 	bp->bio_driver2 = NULL;
594 	bp->bio_pflags = 0;
595 	g_io_request(bp, cp);
596 	pace++;
597 	return;
598 }
599 
600 SYSCTL_DECL(_kern_geom);
601 
602 static long transient_maps;
603 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
604     &transient_maps, 0,
605     "Total count of the transient mapping requests");
606 u_int transient_map_retries = 10;
607 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
608     &transient_map_retries, 0,
609     "Max count of retries used before giving up on creating transient map");
610 int transient_map_hard_failures;
611 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
612     &transient_map_hard_failures, 0,
613     "Failures to establish the transient mapping due to retry attempts "
614     "exhausted");
615 int transient_map_soft_failures;
616 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
617     &transient_map_soft_failures, 0,
618     "Count of retried failures to establish the transient mapping");
619 int inflight_transient_maps;
620 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
621     &inflight_transient_maps, 0,
622     "Current count of the active transient maps");
623 
624 static int
625 g_io_transient_map_bio(struct bio *bp)
626 {
627 	vm_offset_t addr;
628 	long size;
629 	u_int retried;
630 
631 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
632 
633 	size = round_page(bp->bio_ma_offset + bp->bio_length);
634 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
635 	addr = 0;
636 	retried = 0;
637 	atomic_add_long(&transient_maps, 1);
638 retry:
639 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
640 		if (transient_map_retries != 0 &&
641 		    retried >= transient_map_retries) {
642 			g_io_deliver(bp, EDEADLK/* XXXKIB */);
643 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
644 			    bp, bp->bio_to->name);
645 			atomic_add_int(&transient_map_hard_failures, 1);
646 			return (1);
647 		} else {
648 			/*
649 			 * Naive attempt to quisce the I/O to get more
650 			 * in-flight requests completed and defragment
651 			 * the transient_arena.
652 			 */
653 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
654 			    bp, bp->bio_to->name, retried);
655 			pause("g_d_tra", hz / 10);
656 			retried++;
657 			atomic_add_int(&transient_map_soft_failures, 1);
658 			goto retry;
659 		}
660 	}
661 	atomic_add_int(&inflight_transient_maps, 1);
662 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
663 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
664 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
665 	bp->bio_flags &= ~BIO_UNMAPPED;
666 	return (0);
667 }
668 
669 void
670 g_io_schedule_down(struct thread *tp __unused)
671 {
672 	struct bio *bp;
673 	off_t excess;
674 	int error;
675 
676 	for(;;) {
677 		g_bioq_lock(&g_bio_run_down);
678 		bp = g_bioq_first(&g_bio_run_down);
679 		if (bp == NULL) {
680 			CTR0(KTR_GEOM, "g_down going to sleep");
681 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
682 			    PRIBIO | PDROP, "-", 0);
683 			continue;
684 		}
685 		CTR0(KTR_GEOM, "g_down has work to do");
686 		g_bioq_unlock(&g_bio_run_down);
687 		if (pace > 0) {
688 			CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
689 			pause("g_down", hz/10);
690 			pace--;
691 		}
692 		error = g_io_check(bp);
693 		if (error) {
694 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
695 			    "%s returned %d", bp, bp->bio_to->name, error);
696 			g_io_deliver(bp, error);
697 			continue;
698 		}
699 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
700 		    bp->bio_to->name);
701 		switch (bp->bio_cmd) {
702 		case BIO_READ:
703 		case BIO_WRITE:
704 		case BIO_DELETE:
705 			/* Truncate requests to the end of providers media. */
706 			/*
707 			 * XXX: What if we truncate because of offset being
708 			 * bad, not length?
709 			 */
710 			excess = bp->bio_offset + bp->bio_length;
711 			if (excess > bp->bio_to->mediasize) {
712 				KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
713 				    round_page(bp->bio_ma_offset +
714 				    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
715 				    ("excess bio %p too short", bp));
716 				excess -= bp->bio_to->mediasize;
717 				bp->bio_length -= excess;
718 				if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
719 					bp->bio_ma_n = round_page(
720 					    bp->bio_ma_offset +
721 					    bp->bio_length) / PAGE_SIZE;
722 				}
723 				if (excess > 0)
724 					CTR3(KTR_GEOM, "g_down truncated bio "
725 					    "%p provider %s by %d", bp,
726 					    bp->bio_to->name, excess);
727 			}
728 			/* Deliver zero length transfers right here. */
729 			if (bp->bio_length == 0) {
730 				g_io_deliver(bp, 0);
731 				CTR2(KTR_GEOM, "g_down terminated 0-length "
732 				    "bp %p provider %s", bp, bp->bio_to->name);
733 				continue;
734 			}
735 			break;
736 		default:
737 			break;
738 		}
739 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
740 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
741 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
742 			if (g_io_transient_map_bio(bp))
743 				continue;
744 		}
745 		THREAD_NO_SLEEPING();
746 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
747 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
748 		    bp->bio_length);
749 		bp->bio_to->geom->start(bp);
750 		THREAD_SLEEPING_OK();
751 	}
752 }
753 
754 void
755 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
756 {
757 	bp->bio_task = func;
758 	bp->bio_task_arg = arg;
759 	/*
760 	 * The taskqueue is actually just a second queue off the "up"
761 	 * queue, so we use the same lock.
762 	 */
763 	g_bioq_lock(&g_bio_run_up);
764 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
765 	    ("Bio already on queue bp=%p target taskq", bp));
766 	bp->bio_flags |= BIO_ONQUEUE;
767 	TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
768 	g_bio_run_task.bio_queue_length++;
769 	wakeup(&g_wait_up);
770 	g_bioq_unlock(&g_bio_run_up);
771 }
772 
773 
774 void
775 g_io_schedule_up(struct thread *tp __unused)
776 {
777 	struct bio *bp;
778 	for(;;) {
779 		g_bioq_lock(&g_bio_run_up);
780 		bp = g_bioq_first(&g_bio_run_task);
781 		if (bp != NULL) {
782 			g_bioq_unlock(&g_bio_run_up);
783 			THREAD_NO_SLEEPING();
784 			CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
785 			bp->bio_task(bp->bio_task_arg);
786 			THREAD_SLEEPING_OK();
787 			continue;
788 		}
789 		bp = g_bioq_first(&g_bio_run_up);
790 		if (bp != NULL) {
791 			g_bioq_unlock(&g_bio_run_up);
792 			THREAD_NO_SLEEPING();
793 			CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
794 			    "%jd len %ld", bp, bp->bio_to->name,
795 			    bp->bio_offset, bp->bio_length);
796 			biodone(bp);
797 			THREAD_SLEEPING_OK();
798 			continue;
799 		}
800 		CTR0(KTR_GEOM, "g_up going to sleep");
801 		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
802 		    PRIBIO | PDROP, "-", 0);
803 	}
804 }
805 
806 void *
807 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
808 {
809 	struct bio *bp;
810 	void *ptr;
811 	int errorc;
812 
813 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
814 	    length <= MAXPHYS, ("g_read_data(): invalid length %jd",
815 	    (intmax_t)length));
816 
817 	bp = g_alloc_bio();
818 	bp->bio_cmd = BIO_READ;
819 	bp->bio_done = NULL;
820 	bp->bio_offset = offset;
821 	bp->bio_length = length;
822 	ptr = g_malloc(length, M_WAITOK);
823 	bp->bio_data = ptr;
824 	g_io_request(bp, cp);
825 	errorc = biowait(bp, "gread");
826 	if (error != NULL)
827 		*error = errorc;
828 	g_destroy_bio(bp);
829 	if (errorc) {
830 		g_free(ptr);
831 		ptr = NULL;
832 	}
833 	return (ptr);
834 }
835 
836 int
837 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
838 {
839 	struct bio *bp;
840 	int error;
841 
842 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
843 	    length <= MAXPHYS, ("g_write_data(): invalid length %jd",
844 	    (intmax_t)length));
845 
846 	bp = g_alloc_bio();
847 	bp->bio_cmd = BIO_WRITE;
848 	bp->bio_done = NULL;
849 	bp->bio_offset = offset;
850 	bp->bio_length = length;
851 	bp->bio_data = ptr;
852 	g_io_request(bp, cp);
853 	error = biowait(bp, "gwrite");
854 	g_destroy_bio(bp);
855 	return (error);
856 }
857 
858 int
859 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
860 {
861 	struct bio *bp;
862 	int error;
863 
864 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
865 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
866 
867 	bp = g_alloc_bio();
868 	bp->bio_cmd = BIO_DELETE;
869 	bp->bio_done = NULL;
870 	bp->bio_offset = offset;
871 	bp->bio_length = length;
872 	bp->bio_data = NULL;
873 	g_io_request(bp, cp);
874 	error = biowait(bp, "gdelete");
875 	g_destroy_bio(bp);
876 	return (error);
877 }
878 
879 void
880 g_print_bio(struct bio *bp)
881 {
882 	const char *pname, *cmd = NULL;
883 
884 	if (bp->bio_to != NULL)
885 		pname = bp->bio_to->name;
886 	else
887 		pname = "[unknown]";
888 
889 	switch (bp->bio_cmd) {
890 	case BIO_GETATTR:
891 		cmd = "GETATTR";
892 		printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
893 		return;
894 	case BIO_FLUSH:
895 		cmd = "FLUSH";
896 		printf("%s[%s]", pname, cmd);
897 		return;
898 	case BIO_READ:
899 		cmd = "READ";
900 		break;
901 	case BIO_WRITE:
902 		cmd = "WRITE";
903 		break;
904 	case BIO_DELETE:
905 		cmd = "DELETE";
906 		break;
907 	default:
908 		cmd = "UNKNOWN";
909 		printf("%s[%s()]", pname, cmd);
910 		return;
911 	}
912 	printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
913 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
914 }
915