xref: /freebsd/sys/geom/geom_io.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 
39 #include <sys/param.h>
40 #include <sys/stdint.h>
41 #ifndef _KERNEL
42 #include <stdio.h>
43 #include <string.h>
44 #include <stdlib.h>
45 #include <signal.h>
46 #include <err.h>
47 #include <sched.h>
48 #else
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
52 #include <sys/bio.h>
53 #endif
54 
55 #include <sys/errno.h>
56 #include <geom/geom.h>
57 #include <geom/geom_int.h>
58 
59 static struct g_bioq g_bio_run_down;
60 static struct g_bioq g_bio_run_up;
61 static struct g_bioq g_bio_idle;
62 
63 static u_int pace;
64 
65 #include <machine/atomic.h>
66 
67 static void
68 g_bioq_lock(struct g_bioq *bq)
69 {
70 
71 	mtx_lock(&bq->bio_queue_lock);
72 }
73 
74 static void
75 g_bioq_unlock(struct g_bioq *bq)
76 {
77 
78 	mtx_unlock(&bq->bio_queue_lock);
79 }
80 
81 #if 0
82 static void
83 g_bioq_destroy(struct g_bioq *bq)
84 {
85 
86 	mtx_destroy(&bq->bio_queue_lock);
87 }
88 #endif
89 
90 static void
91 g_bioq_init(struct g_bioq *bq)
92 {
93 
94 	TAILQ_INIT(&bq->bio_queue);
95 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
96 }
97 
98 static struct bio *
99 g_bioq_first(struct g_bioq *bq)
100 {
101 	struct bio *bp;
102 
103 	g_bioq_lock(bq);
104 	bp = TAILQ_FIRST(&bq->bio_queue);
105 	if (bp != NULL) {
106 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
107 		bq->bio_queue_length--;
108 	}
109 	g_bioq_unlock(bq);
110 	return (bp);
111 }
112 
113 static void
114 g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
115 {
116 
117 	g_bioq_lock(rq);
118 	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
119 	rq->bio_queue_length++;
120 	g_bioq_unlock(rq);
121 }
122 
123 struct bio *
124 g_new_bio(void)
125 {
126 	struct bio *bp;
127 
128 	bp = g_bioq_first(&g_bio_idle);
129 	if (bp == NULL)
130 		bp = g_malloc(sizeof *bp, M_NOWAIT | M_ZERO);
131 	/* g_trace(G_T_BIO, "g_new_bio() = %p", bp); */
132 	return (bp);
133 }
134 
135 void
136 g_destroy_bio(struct bio *bp)
137 {
138 
139 	/* g_trace(G_T_BIO, "g_destroy_bio(%p)", bp); */
140 	bzero(bp, sizeof *bp);
141 	g_bioq_enqueue_tail(bp, &g_bio_idle);
142 }
143 
144 struct bio *
145 g_clone_bio(struct bio *bp)
146 {
147 	struct bio *bp2;
148 
149 	bp2 = g_new_bio();
150 	if (bp2 != NULL) {
151 		bp2->bio_linkage = bp;
152 		bp2->bio_cmd = bp->bio_cmd;
153 		bp2->bio_length = bp->bio_length;
154 		bp2->bio_offset = bp->bio_offset;
155 		bp2->bio_data = bp->bio_data;
156 		bp2->bio_attribute = bp->bio_attribute;
157 		bp->bio_children++;	/* XXX: atomic ? */
158 	}
159 	/* g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2); */
160 	return(bp2);
161 }
162 
163 void
164 g_io_init()
165 {
166 
167 	g_bioq_init(&g_bio_run_down);
168 	g_bioq_init(&g_bio_run_up);
169 	g_bioq_init(&g_bio_idle);
170 }
171 
172 int
173 g_io_setattr(const char *attr, struct g_consumer *cp, int len, void *ptr)
174 {
175 	struct bio *bp;
176 	int error;
177 
178 	g_trace(G_T_BIO, "bio_setattr(%s)", attr);
179 	bp = g_new_bio();
180 	bp->bio_cmd = BIO_SETATTR;
181 	bp->bio_done = NULL;
182 	bp->bio_attribute = attr;
183 	bp->bio_length = len;
184 	bp->bio_data = ptr;
185 	g_io_request(bp, cp);
186 	error = biowait(bp, "gsetattr");
187 	g_destroy_bio(bp);
188 	return (error);
189 }
190 
191 
192 int
193 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
194 {
195 	struct bio *bp;
196 	int error;
197 
198 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
199 	bp = g_new_bio();
200 	bp->bio_cmd = BIO_GETATTR;
201 	bp->bio_done = NULL;
202 	bp->bio_attribute = attr;
203 	bp->bio_length = *len;
204 	bp->bio_data = ptr;
205 	g_io_request(bp, cp);
206 	error = biowait(bp, "ggetattr");
207 	*len = bp->bio_completed;
208 	g_destroy_bio(bp);
209 	return (error);
210 }
211 
212 void
213 g_io_request(struct bio *bp, struct g_consumer *cp)
214 {
215 	int error;
216 	off_t excess;
217 
218 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
219 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
220 	KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
221 	error = 0;
222 	bp->bio_from = cp;
223 	bp->bio_to = cp->provider;
224 	bp->bio_error = 0;
225 	bp->bio_completed = 0;
226 
227 	/* begin_stats(&bp->stats); */
228 
229 	atomic_add_int(&cp->biocount, 1);
230 	/* Fail on unattached consumers */
231 	if (bp->bio_to == NULL) {
232 		g_io_deliver(bp, ENXIO);
233 		return;
234 	}
235 	/* Fail if access doesn't allow operation */
236 	switch(bp->bio_cmd) {
237 	case BIO_READ:
238 	case BIO_GETATTR:
239 		if (cp->acr == 0) {
240 			g_io_deliver(bp, EPERM);
241 			return;
242 		}
243 		break;
244 	case BIO_WRITE:
245 	case BIO_DELETE:
246 		if (cp->acw == 0) {
247 			g_io_deliver(bp, EPERM);
248 			return;
249 		}
250 		break;
251 	case BIO_SETATTR:
252 		/* XXX: Should ideally check for (cp->ace == 0) */
253 		if ((cp->acw == 0)) {
254 #ifdef DIAGNOSTIC
255 			printf("setattr on %s mode (%d,%d,%d)\n",
256 				cp->provider->name,
257 				cp->acr, cp->acw, cp->ace);
258 #endif
259 			g_io_deliver(bp, EPERM);
260 			return;
261 		}
262 		break;
263 	default:
264 		g_io_deliver(bp, EPERM);
265 		return;
266 	}
267 	/* if provider is marked for error, don't disturb. */
268 	if (bp->bio_to->error) {
269 		g_io_deliver(bp, bp->bio_to->error);
270 		return;
271 	}
272 	switch(bp->bio_cmd) {
273 	case BIO_READ:
274 	case BIO_WRITE:
275 	case BIO_DELETE:
276 		/* Reject I/O not on sector boundary */
277 		if (bp->bio_offset % bp->bio_to->sectorsize) {
278 			g_io_deliver(bp, EINVAL);
279 			return;
280 		}
281 		/* Reject I/O not integral sector long */
282 		if (bp->bio_length % bp->bio_to->sectorsize) {
283 			g_io_deliver(bp, EINVAL);
284 			return;
285 		}
286 		/* Reject requests past the end of media. */
287 		if (bp->bio_offset > bp->bio_to->mediasize) {
288 			g_io_deliver(bp, EIO);
289 			return;
290 		}
291 		/* Truncate requests to the end of providers media. */
292 		excess = bp->bio_offset + bp->bio_length;
293 		if (excess > bp->bio_to->mediasize) {
294 			excess -= bp->bio_to->mediasize;
295 			bp->bio_length -= excess;
296 		}
297 		/* Deliver zero length transfers right here. */
298 		if (bp->bio_length == 0) {
299 			g_io_deliver(bp, 0);
300 			return;
301 		}
302 		break;
303 	default:
304 		break;
305 	}
306 	/* Pass it on down. */
307 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
308 	    bp, bp->bio_from, bp->bio_from->geom->name,
309 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd);
310 	g_bioq_enqueue_tail(bp, &g_bio_run_down);
311 	wakeup(&g_wait_down);
312 }
313 
314 void
315 g_io_deliver(struct bio *bp, int error)
316 {
317 
318 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
319 	KASSERT(bp->bio_from != NULL, ("NULL bio_from in g_io_deliver"));
320 	KASSERT(bp->bio_from->geom != NULL,
321 	    ("NULL bio_from->geom in g_io_deliver"));
322 	KASSERT(bp->bio_to != NULL, ("NULL bio_to in g_io_deliver"));
323 
324 	g_trace(G_T_BIO,
325 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
326 	    bp, bp->bio_from, bp->bio_from->geom->name,
327 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd, error,
328 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
329 	/* finish_stats(&bp->stats); */
330 
331 	if (error == ENOMEM) {
332 		printf("ENOMEM %p on %p(%s)\n",
333 			bp, bp->bio_to, bp->bio_to->name);
334 		g_io_request(bp, bp->bio_from);
335 		pace++;
336 		return;
337 	}
338 
339 	bp->bio_error = error;
340 
341 	g_bioq_enqueue_tail(bp, &g_bio_run_up);
342 
343 	wakeup(&g_wait_up);
344 }
345 
346 void
347 g_io_schedule_down(struct thread *tp __unused)
348 {
349 	struct bio *bp;
350 
351 	for(;;) {
352 		bp = g_bioq_first(&g_bio_run_down);
353 		if (bp == NULL)
354 			break;
355 		bp->bio_to->geom->start(bp);
356 		if (pace) {
357 			pace--;
358 			break;
359 		}
360 	}
361 }
362 
363 void
364 g_io_schedule_up(struct thread *tp __unused)
365 {
366 	struct bio *bp;
367 	struct g_consumer *cp;
368 
369 	for(;;) {
370 		bp = g_bioq_first(&g_bio_run_up);
371 		if (bp == NULL)
372 			break;
373 
374 		cp = bp->bio_from;
375 
376 		atomic_add_int(&cp->biocount, -1);
377 		biodone(bp);
378 	}
379 }
380 
381 void *
382 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
383 {
384 	struct bio *bp;
385 	void *ptr;
386 	int errorc;
387 
388 	bp = g_new_bio();
389 	bp->bio_cmd = BIO_READ;
390 	bp->bio_done = NULL;
391 	bp->bio_offset = offset;
392 	bp->bio_length = length;
393 	ptr = g_malloc(length, 0);
394 	bp->bio_data = ptr;
395 	g_io_request(bp, cp);
396 	errorc = biowait(bp, "gread");
397 	if (error != NULL)
398 		*error = errorc;
399 	g_destroy_bio(bp);
400 	if (errorc) {
401 		g_free(ptr);
402 		ptr = NULL;
403 	}
404 	return (ptr);
405 }
406 
407 int
408 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
409 {
410 	struct bio *bp;
411 	int error;
412 
413 	bp = g_new_bio();
414 	bp->bio_cmd = BIO_WRITE;
415 	bp->bio_done = NULL;
416 	bp->bio_offset = offset;
417 	bp->bio_length = length;
418 	bp->bio_data = ptr;
419 	g_io_request(bp, cp);
420 	error = biowait(bp, "gwrite");
421 	g_destroy_bio(bp);
422 	return (error);
423 }
424