xref: /freebsd/sys/geom/geom_io.c (revision 0fa02ea5f786ef02befd46f8f083f48c8cd9630b)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 
45 #include <sys/errno.h>
46 #include <geom/geom.h>
47 #include <geom/geom_int.h>
48 #include <sys/devicestat.h>
49 
50 #include <vm/uma.h>
51 
52 static struct g_bioq g_bio_run_down;
53 static struct g_bioq g_bio_run_up;
54 
55 static u_int pace;
56 static uma_zone_t	biozone;
57 
58 #include <machine/atomic.h>
59 
60 static void
61 g_bioq_lock(struct g_bioq *bq)
62 {
63 
64 	mtx_lock(&bq->bio_queue_lock);
65 }
66 
67 static void
68 g_bioq_unlock(struct g_bioq *bq)
69 {
70 
71 	mtx_unlock(&bq->bio_queue_lock);
72 }
73 
74 #if 0
75 static void
76 g_bioq_destroy(struct g_bioq *bq)
77 {
78 
79 	mtx_destroy(&bq->bio_queue_lock);
80 }
81 #endif
82 
83 static void
84 g_bioq_init(struct g_bioq *bq)
85 {
86 
87 	TAILQ_INIT(&bq->bio_queue);
88 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
89 }
90 
91 static struct bio *
92 g_bioq_first(struct g_bioq *bq)
93 {
94 	struct bio *bp;
95 
96 	bp = TAILQ_FIRST(&bq->bio_queue);
97 	if (bp != NULL) {
98 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
99 		bq->bio_queue_length--;
100 	}
101 	return (bp);
102 }
103 
104 static void
105 g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
106 {
107 
108 	g_bioq_lock(rq);
109 	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
110 	rq->bio_queue_length++;
111 	g_bioq_unlock(rq);
112 }
113 
114 struct bio *
115 g_new_bio(void)
116 {
117 	struct bio *bp;
118 
119 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
120 	return (bp);
121 }
122 
123 void
124 g_destroy_bio(struct bio *bp)
125 {
126 
127 	uma_zfree(biozone, bp);
128 }
129 
130 struct bio *
131 g_clone_bio(struct bio *bp)
132 {
133 	struct bio *bp2;
134 
135 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
136 	if (bp2 != NULL) {
137 		bp2->bio_parent = bp;
138 		bp2->bio_cmd = bp->bio_cmd;
139 		bp2->bio_length = bp->bio_length;
140 		bp2->bio_offset = bp->bio_offset;
141 		bp2->bio_data = bp->bio_data;
142 		bp2->bio_attribute = bp->bio_attribute;
143 		bp->bio_children++;
144 	}
145 	return(bp2);
146 }
147 
148 void
149 g_io_init()
150 {
151 
152 	g_bioq_init(&g_bio_run_down);
153 	g_bioq_init(&g_bio_run_up);
154 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
155 	    NULL, NULL,
156 	    NULL, NULL,
157 	    0, 0);
158 }
159 
160 int
161 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
162 {
163 	struct bio *bp;
164 	int error;
165 
166 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
167 	bp = g_new_bio();
168 	bp->bio_cmd = BIO_GETATTR;
169 	bp->bio_done = NULL;
170 	bp->bio_attribute = attr;
171 	bp->bio_length = *len;
172 	bp->bio_data = ptr;
173 	g_io_request(bp, cp);
174 	error = biowait(bp, "ggetattr");
175 	*len = bp->bio_completed;
176 	g_destroy_bio(bp);
177 	return (error);
178 }
179 
180 static int
181 g_io_check(struct bio *bp)
182 {
183 	struct g_consumer *cp;
184 	struct g_provider *pp;
185 
186 	cp = bp->bio_from;
187 	pp = bp->bio_to;
188 
189 	/* Fail if access counters dont allow the operation */
190 	switch(bp->bio_cmd) {
191 	case BIO_READ:
192 	case BIO_GETATTR:
193 		if (cp->acr == 0)
194 			return (EPERM);
195 		break;
196 	case BIO_WRITE:
197 	case BIO_DELETE:
198 		if (cp->acw == 0)
199 			return (EPERM);
200 		break;
201 	default:
202 		return (EPERM);
203 	}
204 	/* if provider is marked for error, don't disturb. */
205 	if (pp->error)
206 		return (pp->error);
207 
208 	switch(bp->bio_cmd) {
209 	case BIO_READ:
210 	case BIO_WRITE:
211 	case BIO_DELETE:
212 		/* Zero sectorsize is a probably lack of media */
213 		if (pp->sectorsize == 0)
214 			return (ENXIO);
215 		/* Reject I/O not on sector boundary */
216 		if (bp->bio_offset % pp->sectorsize)
217 			return (EINVAL);
218 		/* Reject I/O not integral sector long */
219 		if (bp->bio_length % pp->sectorsize)
220 			return (EINVAL);
221 		/* Reject requests before or past the end of media. */
222 		if (bp->bio_offset < 0)
223 			return (EIO);
224 		if (bp->bio_offset > pp->mediasize)
225 			return (EIO);
226 		break;
227 	default:
228 		break;
229 	}
230 	return (0);
231 }
232 
233 void
234 g_io_request(struct bio *bp, struct g_consumer *cp)
235 {
236 	struct g_provider *pp;
237 
238 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
239 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
240 	KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
241 	pp = cp->provider;
242 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
243 
244 	bp->bio_from = cp;
245 	bp->bio_to = pp;
246 	bp->bio_error = 0;
247 	bp->bio_completed = 0;
248 
249 	if (g_collectstats) {
250 		devstat_start_transaction_bio(cp->stat, bp);
251 		devstat_start_transaction_bio(pp->stat, bp);
252 	}
253 	cp->nstart++;
254 	pp->nstart++;
255 
256 	/* Pass it on down. */
257 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
258 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
259 	g_bioq_enqueue_tail(bp, &g_bio_run_down);
260 	wakeup(&g_wait_down);
261 }
262 
263 void
264 g_io_deliver(struct bio *bp, int error)
265 {
266 	struct g_consumer *cp;
267 	struct g_provider *pp;
268 
269 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
270 	pp = bp->bio_to;
271 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
272 	cp = bp->bio_from;
273 	if (cp == NULL) {
274 		bp->bio_error = error;
275 		bp->bio_done(bp);
276 		return;
277 	}
278 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
279 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
280 
281 	g_trace(G_T_BIO,
282 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
283 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
284 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
285 
286 	bp->bio_bcount = bp->bio_length;
287 	if (g_collectstats) {
288 		bp->bio_resid = bp->bio_bcount - bp->bio_completed;
289 		devstat_end_transaction_bio(cp->stat, bp);
290 		devstat_end_transaction_bio(pp->stat, bp);
291 	}
292 	cp->nend++;
293 	pp->nend++;
294 
295 	if (error == ENOMEM) {
296 		if (bootverbose)
297 			printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
298 		g_io_request(bp, cp);
299 		pace++;
300 		return;
301 	}
302 	bp->bio_error = error;
303 	g_bioq_enqueue_tail(bp, &g_bio_run_up);
304 	wakeup(&g_wait_up);
305 }
306 
307 void
308 g_io_schedule_down(struct thread *tp __unused)
309 {
310 	struct bio *bp;
311 	off_t excess;
312 	int error;
313 	struct mtx mymutex;
314 
315 	bzero(&mymutex, sizeof mymutex);
316 	mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF);
317 
318 	for(;;) {
319 		g_bioq_lock(&g_bio_run_down);
320 		bp = g_bioq_first(&g_bio_run_down);
321 		if (bp == NULL) {
322 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
323 			    PRIBIO | PDROP, "-", hz/10);
324 			continue;
325 		}
326 		g_bioq_unlock(&g_bio_run_down);
327 		if (pace > 0) {
328 			msleep(&error, NULL, PRIBIO, "g_down", hz/10);
329 			pace--;
330 		}
331 		error = g_io_check(bp);
332 		if (error) {
333 			g_io_deliver(bp, error);
334 			continue;
335 		}
336 		switch (bp->bio_cmd) {
337 		case BIO_READ:
338 		case BIO_WRITE:
339 		case BIO_DELETE:
340 			/* Truncate requests to the end of providers media. */
341 			excess = bp->bio_offset + bp->bio_length;
342 			if (excess > bp->bio_to->mediasize) {
343 				excess -= bp->bio_to->mediasize;
344 				bp->bio_length -= excess;
345 			}
346 			/* Deliver zero length transfers right here. */
347 			if (bp->bio_length == 0) {
348 				g_io_deliver(bp, 0);
349 				continue;
350 			}
351 			break;
352 		default:
353 			break;
354 		}
355 		mtx_lock(&mymutex);
356 		bp->bio_to->geom->start(bp);
357 		mtx_unlock(&mymutex);
358 	}
359 }
360 
361 void
362 g_io_schedule_up(struct thread *tp __unused)
363 {
364 	struct bio *bp;
365 	struct mtx mymutex;
366 
367 	bzero(&mymutex, sizeof mymutex);
368 	mtx_init(&mymutex, "g_xup", NULL, MTX_DEF);
369 	for(;;) {
370 		g_bioq_lock(&g_bio_run_up);
371 		bp = g_bioq_first(&g_bio_run_up);
372 		if (bp != NULL) {
373 			g_bioq_unlock(&g_bio_run_up);
374 			mtx_lock(&mymutex);
375 			biodone(bp);
376 			mtx_unlock(&mymutex);
377 			continue;
378 		}
379 		msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
380 		    PRIBIO | PDROP, "-", hz/10);
381 	}
382 }
383 
384 void *
385 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
386 {
387 	struct bio *bp;
388 	void *ptr;
389 	int errorc;
390 
391 	KASSERT(length >= 512 && length <= DFLTPHYS,
392 		("g_read_data(): invalid length %jd", (intmax_t)length));
393 
394 	bp = g_new_bio();
395 	bp->bio_cmd = BIO_READ;
396 	bp->bio_done = NULL;
397 	bp->bio_offset = offset;
398 	bp->bio_length = length;
399 	ptr = g_malloc(length, M_WAITOK);
400 	bp->bio_data = ptr;
401 	g_io_request(bp, cp);
402 	errorc = biowait(bp, "gread");
403 	if (error != NULL)
404 		*error = errorc;
405 	g_destroy_bio(bp);
406 	if (errorc) {
407 		g_free(ptr);
408 		ptr = NULL;
409 	}
410 	return (ptr);
411 }
412 
413 int
414 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
415 {
416 	struct bio *bp;
417 	int error;
418 
419 	KASSERT(length >= 512 && length <= DFLTPHYS,
420 		("g_write_data(): invalid length %jd", (intmax_t)length));
421 
422 	bp = g_new_bio();
423 	bp->bio_cmd = BIO_WRITE;
424 	bp->bio_done = NULL;
425 	bp->bio_offset = offset;
426 	bp->bio_length = length;
427 	bp->bio_data = ptr;
428 	g_io_request(bp, cp);
429 	error = biowait(bp, "gwrite");
430 	g_destroy_bio(bp);
431 	return (error);
432 }
433