xref: /freebsd/sys/geom/geom_io.c (revision c17d43407fe04133a94055b0dbc7ea8965654a9f)
1 /*-
2  * Copyright (c) 2002 Poul-Henning Kamp
3  * Copyright (c) 2002 Networks Associates Technology, Inc.
4  * All rights reserved.
5  *
6  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7  * and NAI Labs, the Security Research Division of Network Associates, Inc.
8  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9  * DARPA CHATS research program.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The names of the authors may not be used to endorse or promote
20  *    products derived from this software without specific prior written
21  *    permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37 
38 
39 #include <sys/param.h>
40 #ifndef _KERNEL
41 #include <stdio.h>
42 #include <string.h>
43 #include <stdlib.h>
44 #include <signal.h>
45 #include <err.h>
46 #include <sched.h>
47 #else
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/bio.h>
52 #endif
53 
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 
57 static struct g_bioq g_bio_run_down;
58 static struct g_bioq g_bio_run_up;
59 static struct g_bioq g_bio_idle;
60 
61 #include <machine/atomic.h>
62 
63 static void
64 g_bioq_lock(struct g_bioq *bq)
65 {
66 
67 	mtx_lock(&bq->bio_queue_lock);
68 }
69 
70 static void
71 g_bioq_unlock(struct g_bioq *bq)
72 {
73 
74 	mtx_unlock(&bq->bio_queue_lock);
75 }
76 
77 #if 0
78 static void
79 g_bioq_destroy(struct g_bioq *bq)
80 {
81 
82 	mtx_destroy(&bq->bio_queue_lock);
83 }
84 #endif
85 
86 static void
87 g_bioq_init(struct g_bioq *bq)
88 {
89 
90 	TAILQ_INIT(&bq->bio_queue);
91 	mtx_init(&bq->bio_queue_lock, "bio queue", MTX_DEF);
92 }
93 
94 static struct bio *
95 g_bioq_first(struct g_bioq *bq)
96 {
97 	struct bio *bp;
98 
99 	g_bioq_lock(bq);
100 	bp = TAILQ_FIRST(&bq->bio_queue);
101 	if (bp != NULL) {
102 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
103 		bq->bio_queue_length--;
104 	}
105 	g_bioq_unlock(bq);
106 	return (bp);
107 }
108 
109 static void
110 g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
111 {
112 
113 	g_bioq_lock(rq);
114 	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
115 	rq->bio_queue_length++;
116 	g_bioq_unlock(rq);
117 }
118 
119 struct bio *
120 g_new_bio(void)
121 {
122 	struct bio *bp;
123 
124 	bp = g_bioq_first(&g_bio_idle);
125 	if (bp == NULL)
126 		bp = g_malloc(sizeof *bp, M_WAITOK | M_ZERO);
127 	g_trace(G_T_BIO, "g_new_bio() = %p", bp);
128 	return (bp);
129 }
130 
131 void
132 g_destroy_bio(struct bio *bp)
133 {
134 
135 	g_trace(G_T_BIO, "g_destroy_bio(%p)", bp);
136 	bzero(bp, sizeof *bp);
137 	g_bioq_enqueue_tail(bp, &g_bio_idle);
138 }
139 
140 struct bio *
141 g_clone_bio(struct bio *bp)
142 {
143 	struct bio *bp2;
144 
145 	bp2 = g_new_bio();
146 	bp2->bio_linkage = bp;
147 	bp2->bio_cmd = bp->bio_cmd;
148 	bp2->bio_length = bp->bio_length;
149 	bp2->bio_offset = bp->bio_offset;
150 	bp2->bio_data = bp->bio_data;
151 	bp2->bio_attribute = bp->bio_attribute;
152 	g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2);
153 	return(bp2);
154 }
155 
156 void
157 g_io_init()
158 {
159 
160 	g_bioq_init(&g_bio_run_down);
161 	g_bioq_init(&g_bio_run_up);
162 	g_bioq_init(&g_bio_idle);
163 }
164 
165 int
166 g_io_setattr(char *attr, struct g_consumer *cp, int len, void *ptr, struct thread *tp __unused)
167 {
168 	struct bio *bp;
169 	int error;
170 
171 	g_trace(G_T_BIO, "bio_setattr(%s)", attr);
172 	do {
173 		bp = g_new_bio();
174 		bp->bio_cmd = BIO_SETATTR;
175 		bp->bio_done = NULL;
176 		bp->bio_attribute = attr;
177 		bp->bio_length = len;
178 		bp->bio_data = ptr;
179 		g_io_request(bp, cp);
180 		while ((bp->bio_flags & BIO_DONE) == 0) {
181 			mtx_lock(&Giant);
182 			tsleep(bp, 0, "setattr", hz / 10);
183 			mtx_unlock(&Giant);
184 		}
185 		error = bp->bio_error;
186 		g_destroy_bio(bp);
187 		if (error == EBUSY)
188 			tsleep(&error, 0, "setattr_busy", hz);
189 	} while(error == EBUSY);
190 	return (error);
191 }
192 
193 
194 int
195 g_io_getattr(char *attr, struct g_consumer *cp, int *len, void *ptr, struct thread *tp __unused)
196 {
197 	struct bio *bp;
198 	int error;
199 
200 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
201 	do {
202 		bp = g_new_bio();
203 		bp->bio_cmd = BIO_GETATTR;
204 		bp->bio_done = NULL;
205 		bp->bio_attribute = attr;
206 		bp->bio_length = *len;
207 		bp->bio_data = ptr;
208 		g_io_request(bp, cp);
209 		while ((bp->bio_flags & BIO_DONE) == 0) {
210 			mtx_lock(&Giant);
211 			tsleep(bp, 0, "getattr", hz / 10);
212 			mtx_unlock(&Giant);
213 		}
214 		*len = bp->bio_completed;
215 		error = bp->bio_error;
216 		g_destroy_bio(bp);
217 		if (error == EBUSY)
218 			tsleep(&error, 0, "getattr_busy", hz);
219 
220 	} while(error == EBUSY);
221 	return (error);
222 }
223 
224 void
225 g_io_request(struct bio *bp, struct g_consumer *cp)
226 {
227 	int error;
228 
229 	KASSERT(cp != NULL, ("bio_request on thin air"));
230 	error = 0;
231 	bp->bio_from = cp;
232 	bp->bio_to = cp->provider;
233 
234 	/* begin_stats(&bp->stats); */
235 
236 	atomic_add_int(&cp->biocount, 1);
237 	if (bp->bio_to == NULL)
238 		error = ENXIO;
239 	if (!error) {
240 		switch(bp->bio_cmd) {
241 		case BIO_READ:
242 		case BIO_GETATTR:
243 			if (cp->acr == 0)
244 				error = EPERM;
245 			break;
246 		case BIO_WRITE:
247 			if (cp->acw == 0)
248 				error = EPERM;
249 			break;
250 		case BIO_SETATTR:
251 		case BIO_DELETE:
252 		case BIO_FORMAT:
253 			if ((cp->acw == 0) || (cp->ace == 0))
254 				error = EPERM;
255 			break;
256 		default:
257 			error = EPERM;
258 			break;
259 		}
260 	}
261 	/* if provider is marked for error, don't disturb */
262 	if (!error)
263 		error = bp->bio_to->error;
264 	if (error) {
265 		bp->bio_error = error;
266 		/* finish_stats(&bp->stats); */
267 
268 		g_trace(G_T_BIO,
269 		    "bio_request(%p) from %p(%s) to %p(%s) cmd %d error %d\n",
270 		    bp, bp->bio_from, bp->bio_from->geom->name,
271 		    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
272 		g_bioq_enqueue_tail(bp, &g_bio_run_up);
273 		mtx_lock(&Giant);
274 		wakeup(&g_wait_up);
275 		mtx_unlock(&Giant);
276 	} else {
277 		g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
278 		    bp, bp->bio_from, bp->bio_from->geom->name,
279 		    bp->bio_to, bp->bio_to->name, bp->bio_cmd);
280 		g_bioq_enqueue_tail(bp, &g_bio_run_down);
281 		mtx_lock(&Giant);
282 		wakeup(&g_wait_down);
283 		mtx_unlock(&Giant);
284 	}
285 }
286 
287 void
288 g_io_deliver(struct bio *bp)
289 {
290 
291 	g_trace(G_T_BIO,
292 	    "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d",
293 	    bp, bp->bio_from, bp->bio_from->geom->name,
294 	    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
295 	/* finish_stats(&bp->stats); */
296 
297 	g_bioq_enqueue_tail(bp, &g_bio_run_up);
298 
299 	mtx_lock(&Giant);
300 	wakeup(&g_wait_up);
301 	mtx_unlock(&Giant);
302 }
303 
304 void
305 g_io_schedule_down(struct thread *tp __unused)
306 {
307 	struct bio *bp;
308 
309 	for(;;) {
310 		bp = g_bioq_first(&g_bio_run_down);
311 		if (bp == NULL)
312 			break;
313 		bp->bio_to->geom->start(bp);
314 	}
315 }
316 
317 void
318 g_io_schedule_up(struct thread *tp __unused)
319 {
320 	struct bio *bp;
321 	struct g_consumer *cp;
322 
323 	for(;;) {
324 		bp = g_bioq_first(&g_bio_run_up);
325 		if (bp == NULL)
326 			break;
327 
328 		cp = bp->bio_from;
329 
330 		bp->bio_flags |= BIO_DONE;
331 		atomic_add_int(&cp->biocount, -1);
332 		if (bp->bio_done != NULL) {
333 			bp->bio_done(bp);
334 		} else {
335 			mtx_lock(&Giant);
336 			wakeup(bp);
337 			mtx_unlock(&Giant);
338 		}
339 	}
340 }
341 
342 void *
343 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
344 {
345 	struct bio *bp;
346 	void *ptr;
347 	int errorc;
348 
349         do {
350 		bp = g_new_bio();
351 		bp->bio_cmd = BIO_READ;
352 		bp->bio_done = NULL;
353 		bp->bio_offset = offset;
354 		bp->bio_length = length;
355 		ptr = g_malloc(length, M_WAITOK);
356 		bp->bio_data = ptr;
357 		g_io_request(bp, cp);
358 		while ((bp->bio_flags & BIO_DONE) == 0) {
359 			mtx_lock(&Giant);
360 			tsleep(bp, 0, "g_read_data", hz / 10);
361 			mtx_unlock(&Giant);
362 		}
363 		errorc = bp->bio_error;
364 		if (error != NULL)
365 			*error = errorc;
366 		g_destroy_bio(bp);
367 		if (errorc) {
368 			g_free(ptr);
369 			ptr = NULL;
370 		}
371 		if (errorc == EBUSY)
372 			tsleep(&errorc, 0, "g_read_data_busy", hz);
373         } while (errorc == EBUSY);
374 	return (ptr);
375 }
376