xref: /freebsd/sys/geom/geom_io.c (revision 2a4a1db342263067035ce69a4017c645da63455d)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 
39 #include <sys/param.h>
40 #ifndef _KERNEL
41 #include <stdio.h>
42 #include <string.h>
43 #include <stdlib.h>
44 #include <signal.h>
45 #include <err.h>
46 #include <sched.h>
47 #else
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/bio.h>
52 #endif
53 
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 
58 static struct g_bioq g_bio_run_down;
59 static struct g_bioq g_bio_run_up;
60 static struct g_bioq g_bio_idle;
61 
62 #include <machine/atomic.h>
63 
64 static void
65 g_bioq_lock(struct g_bioq *bq)
66 {
67 
68 	mtx_lock(&bq->bio_queue_lock);
69 }
70 
71 static void
72 g_bioq_unlock(struct g_bioq *bq)
73 {
74 
75 	mtx_unlock(&bq->bio_queue_lock);
76 }
77 
78 #if 0
79 static void
80 g_bioq_destroy(struct g_bioq *bq)
81 {
82 
83 	mtx_destroy(&bq->bio_queue_lock);
84 }
85 #endif
86 
87 static void
88 g_bioq_init(struct g_bioq *bq)
89 {
90 
91 	TAILQ_INIT(&bq->bio_queue);
92 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
93 }
94 
95 static struct bio *
96 g_bioq_first(struct g_bioq *bq)
97 {
98 	struct bio *bp;
99 
100 	g_bioq_lock(bq);
101 	bp = TAILQ_FIRST(&bq->bio_queue);
102 	if (bp != NULL) {
103 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
104 		bq->bio_queue_length--;
105 	}
106 	g_bioq_unlock(bq);
107 	return (bp);
108 }
109 
110 static void
111 g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
112 {
113 
114 	g_bioq_lock(rq);
115 	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
116 	rq->bio_queue_length++;
117 	g_bioq_unlock(rq);
118 }
119 
120 struct bio *
121 g_new_bio(void)
122 {
123 	struct bio *bp;
124 
125 	bp = g_bioq_first(&g_bio_idle);
126 	if (bp == NULL)
127 		bp = g_malloc(sizeof *bp, M_WAITOK | M_ZERO);
128 	g_trace(G_T_BIO, "g_new_bio() = %p", bp);
129 	return (bp);
130 }
131 
132 void
133 g_destroy_bio(struct bio *bp)
134 {
135 
136 	g_trace(G_T_BIO, "g_destroy_bio(%p)", bp);
137 	bzero(bp, sizeof *bp);
138 	g_bioq_enqueue_tail(bp, &g_bio_idle);
139 }
140 
141 struct bio *
142 g_clone_bio(struct bio *bp)
143 {
144 	struct bio *bp2;
145 
146 	bp2 = g_new_bio();
147 	bp2->bio_linkage = bp;
148 	bp2->bio_cmd = bp->bio_cmd;
149 	bp2->bio_length = bp->bio_length;
150 	bp2->bio_offset = bp->bio_offset;
151 	bp2->bio_data = bp->bio_data;
152 	bp2->bio_attribute = bp->bio_attribute;
153 	g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2);
154 	return(bp2);
155 }
156 
157 void
158 g_io_init()
159 {
160 
161 	g_bioq_init(&g_bio_run_down);
162 	g_bioq_init(&g_bio_run_up);
163 	g_bioq_init(&g_bio_idle);
164 }
165 
166 int
167 g_io_setattr(const char *attr, struct g_consumer *cp, int len, void *ptr)
168 {
169 	struct bio *bp;
170 	int error;
171 
172 	g_trace(G_T_BIO, "bio_setattr(%s)", attr);
173 	do {
174 		bp = g_new_bio();
175 		bp->bio_cmd = BIO_SETATTR;
176 		bp->bio_done = NULL;
177 		bp->bio_attribute = attr;
178 		bp->bio_length = len;
179 		bp->bio_data = ptr;
180 		g_io_request(bp, cp);
181 		while ((bp->bio_flags & BIO_DONE) == 0) {
182 			mtx_lock(&Giant);
183 			tsleep(bp, 0, "setattr", hz / 10);
184 			mtx_unlock(&Giant);
185 		}
186 		error = bp->bio_error;
187 		g_destroy_bio(bp);
188 		if (error == EBUSY)
189 			tsleep(&error, 0, "setattr_busy", hz);
190 	} while(error == EBUSY);
191 	return (error);
192 }
193 
194 
195 int
196 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
197 {
198 	struct bio *bp;
199 	int error;
200 
201 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
202 	do {
203 		bp = g_new_bio();
204 		bp->bio_cmd = BIO_GETATTR;
205 		bp->bio_done = NULL;
206 		bp->bio_attribute = attr;
207 		bp->bio_length = *len;
208 		bp->bio_data = ptr;
209 		g_io_request(bp, cp);
210 		while ((bp->bio_flags & BIO_DONE) == 0) {
211 			mtx_lock(&Giant);
212 			tsleep(bp, 0, "getattr", hz / 10);
213 			mtx_unlock(&Giant);
214 		}
215 		*len = bp->bio_completed;
216 		error = bp->bio_error;
217 		g_destroy_bio(bp);
218 		if (error == EBUSY)
219 			tsleep(&error, 0, "getattr_busy", hz);
220 
221 	} while(error == EBUSY);
222 	return (error);
223 }
224 
225 void
226 g_io_fail(struct bio *bp, int error)
227 {
228 
229 	bp->bio_error = error;
230 
231 	g_trace(G_T_BIO,
232 	    "bio_fail(%p) from %p(%s) to %p(%s) cmd %d error %d\n",
233 	    bp, bp->bio_from, bp->bio_from->geom->name,
234 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
235 	g_io_deliver(bp);
236 	return;
237 }
238 
239 void
240 g_io_request(struct bio *bp, struct g_consumer *cp)
241 {
242 	int error;
243 	off_t excess;
244 
245 	KASSERT(cp != NULL, ("bio_request on thin air"));
246 	error = 0;
247 	bp->bio_from = cp;
248 	bp->bio_to = cp->provider;
249 	bp->bio_error = 0;
250 	bp->bio_completed = 0;
251 
252 	/* begin_stats(&bp->stats); */
253 
254 	atomic_add_int(&cp->biocount, 1);
255 	/* Fail on unattached consumers */
256 	if (bp->bio_to == NULL)
257 		return (g_io_fail(bp, ENXIO));
258 	/* Fail if access doesn't allow operation */
259 	switch(bp->bio_cmd) {
260 	case BIO_READ:
261 	case BIO_GETATTR:
262 		if (cp->acr == 0)
263 			return (g_io_fail(bp, EPERM));
264 		break;
265 	case BIO_WRITE:
266 	case BIO_DELETE:
267 		if (cp->acw == 0)
268 			return (g_io_fail(bp, EPERM));
269 		break;
270 	case BIO_SETATTR:
271 		if ((cp->acw == 0) || (cp->ace == 0))
272 			return (g_io_fail(bp, EPERM));
273 		break;
274 	default:
275 		return (g_io_fail(bp, EPERM));
276 	}
277 	/* if provider is marked for error, don't disturb. */
278 	if (bp->bio_to->error)
279 		return (g_io_fail(bp, bp->bio_to->error));
280 	switch(bp->bio_cmd) {
281 	case BIO_READ:
282 	case BIO_WRITE:
283 	case BIO_DELETE:
284 		/* Reject requests past the end of media. */
285 		if (bp->bio_offset > bp->bio_to->mediasize)
286 			return (g_io_fail(bp, EIO));
287 		/* Truncate requests to the end of providers media. */
288 		excess = bp->bio_offset + bp->bio_length;
289 		if (excess > bp->bio_to->mediasize) {
290 			excess -= bp->bio_to->mediasize;
291 			bp->bio_length -= excess;
292 		}
293 		/* Deliver zero length transfers right here. */
294 		if (bp->bio_length == 0)
295 			return (g_io_deliver(bp));
296 		break;
297 	default:
298 		break;
299 	}
300 	/* Pass it on down. */
301 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
302 	    bp, bp->bio_from, bp->bio_from->geom->name,
303 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd);
304 	g_bioq_enqueue_tail(bp, &g_bio_run_down);
305 	mtx_lock(&Giant);
306 	wakeup(&g_wait_down);
307 	mtx_unlock(&Giant);
308 }
309 
310 void
311 g_io_deliver(struct bio *bp)
312 {
313 
314 	g_trace(G_T_BIO,
315 	    "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d",
316 	    bp, bp->bio_from, bp->bio_from->geom->name,
317 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
318 	/* finish_stats(&bp->stats); */
319 
320 	g_bioq_enqueue_tail(bp, &g_bio_run_up);
321 
322 	mtx_lock(&Giant);
323 	wakeup(&g_wait_up);
324 	mtx_unlock(&Giant);
325 }
326 
327 void
328 g_io_schedule_down(struct thread *tp __unused)
329 {
330 	struct bio *bp;
331 
332 	for(;;) {
333 		bp = g_bioq_first(&g_bio_run_down);
334 		if (bp == NULL)
335 			break;
336 		bp->bio_to->geom->start(bp);
337 	}
338 }
339 
340 void
341 g_io_schedule_up(struct thread *tp __unused)
342 {
343 	struct bio *bp;
344 	struct g_consumer *cp;
345 
346 	for(;;) {
347 		bp = g_bioq_first(&g_bio_run_up);
348 		if (bp == NULL)
349 			break;
350 
351 		cp = bp->bio_from;
352 
353 		bp->bio_flags |= BIO_DONE;
354 		atomic_add_int(&cp->biocount, -1);
355 		if (bp->bio_done != NULL) {
356 			bp->bio_done(bp);
357 		} else {
358 			mtx_lock(&Giant);
359 			wakeup(bp);
360 			mtx_unlock(&Giant);
361 		}
362 	}
363 }
364 
365 void *
366 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
367 {
368 	struct bio *bp;
369 	void *ptr;
370 	int errorc;
371 
372         do {
373 		bp = g_new_bio();
374 		bp->bio_cmd = BIO_READ;
375 		bp->bio_done = NULL;
376 		bp->bio_offset = offset;
377 		bp->bio_length = length;
378 		ptr = g_malloc(length, M_WAITOK);
379 		bp->bio_data = ptr;
380 		g_io_request(bp, cp);
381 		while ((bp->bio_flags & BIO_DONE) == 0) {
382 			mtx_lock(&Giant);
383 			tsleep(bp, 0, "g_read_data", hz / 10);
384 			mtx_unlock(&Giant);
385 		}
386 		errorc = bp->bio_error;
387 		if (error != NULL)
388 			*error = errorc;
389 		g_destroy_bio(bp);
390 		if (errorc) {
391 			g_free(ptr);
392 			ptr = NULL;
393 		}
394 		if (errorc == EBUSY)
395 			tsleep(&errorc, 0, "g_read_data_busy", hz);
396         } while (errorc == EBUSY);
397 	return (ptr);
398 }
399