xref: /freebsd/sys/geom/cache/g_cache.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/queue.h>
40 #include <sys/time.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <geom/cache/g_cache.h>
44 
45 FEATURE(geom_cache, "GEOM cache module");
46 
47 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
48 
49 SYSCTL_DECL(_kern_geom);
50 SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0, "GEOM_CACHE stuff");
51 static u_int g_cache_debug = 0;
52 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
53     "Debug level");
54 static u_int g_cache_enable = 1;
55 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
56     "");
57 static u_int g_cache_timeout = 10;
58 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
59     0, "");
60 static u_int g_cache_idletime = 5;
61 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
62     0, "");
63 static u_int g_cache_used_lo = 5;
64 static u_int g_cache_used_hi = 20;
65 static int
66 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
67 {
68 	u_int val = *(u_int *)arg1;
69 	int error;
70 
71 	error = sysctl_handle_int(oidp, &val, 0, req);
72 	if (error || !req->newptr)
73 		return (error);
74 	if (val < 0 || val > 100)
75 		return (EINVAL);
76 	if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
77 	    (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
78 		return (EINVAL);
79 	*(u_int *)arg1 = val;
80 	return (0);
81 }
82 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
83 	&g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
84 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
85 	&g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
86 
87 
88 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
89 static g_ctl_destroy_geom_t g_cache_destroy_geom;
90 
91 static g_taste_t g_cache_taste;
92 static g_ctl_req_t g_cache_config;
93 static g_dumpconf_t g_cache_dumpconf;
94 
95 struct g_class g_cache_class = {
96 	.name = G_CACHE_CLASS_NAME,
97 	.version = G_VERSION,
98 	.ctlreq = g_cache_config,
99 	.taste = g_cache_taste,
100 	.destroy_geom = g_cache_destroy_geom
101 };
102 
103 #define	OFF2BNO(off, sc)	((off) >> (sc)->sc_bshift)
104 #define	BNO2OFF(bno, sc)	((bno) << (sc)->sc_bshift)
105 
106 
107 static struct g_cache_desc *
108 g_cache_alloc(struct g_cache_softc *sc)
109 {
110 	struct g_cache_desc *dp;
111 
112 	mtx_assert(&sc->sc_mtx, MA_OWNED);
113 
114 	if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
115 		dp = TAILQ_FIRST(&sc->sc_usedlist);
116 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
117 		sc->sc_nused--;
118 		dp->d_flags = 0;
119 		LIST_REMOVE(dp, d_next);
120 		return (dp);
121 	}
122 	if (sc->sc_nent > sc->sc_maxent) {
123 		sc->sc_cachefull++;
124 		return (NULL);
125 	}
126 	dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
127 	if (dp == NULL)
128 		return (NULL);
129 	dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
130 	if (dp->d_data == NULL) {
131 		free(dp, M_GCACHE);
132 		return (NULL);
133 	}
134 	sc->sc_nent++;
135 	return (dp);
136 }
137 
138 static void
139 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
140 {
141 
142 	mtx_assert(&sc->sc_mtx, MA_OWNED);
143 
144 	uma_zfree(sc->sc_zone, dp->d_data);
145 	free(dp, M_GCACHE);
146 	sc->sc_nent--;
147 }
148 
149 static void
150 g_cache_free_used(struct g_cache_softc *sc)
151 {
152 	struct g_cache_desc *dp;
153 	u_int n;
154 
155 	mtx_assert(&sc->sc_mtx, MA_OWNED);
156 
157 	n = g_cache_used_lo * sc->sc_maxent / 100;
158 	while (sc->sc_nused > n) {
159 		KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
160 		dp = TAILQ_FIRST(&sc->sc_usedlist);
161 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
162 		sc->sc_nused--;
163 		LIST_REMOVE(dp, d_next);
164 		g_cache_free(sc, dp);
165 	}
166 }
167 
168 static void
169 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
170     struct g_cache_desc *dp, int error)
171 {
172 	off_t off1, off, len;
173 
174 	mtx_assert(&sc->sc_mtx, MA_OWNED);
175 	KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
176 	KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
177 	    dp->d_bno, ("wrong entry"));
178 
179 	off1 = BNO2OFF(dp->d_bno, sc);
180 	off = MAX(bp->bio_offset, off1);
181 	len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
182 
183 	if (bp->bio_error == 0)
184 		bp->bio_error = error;
185 	if (bp->bio_error == 0) {
186 		bcopy(dp->d_data + (off - off1),
187 		    bp->bio_data + (off - bp->bio_offset), len);
188 	}
189 	bp->bio_completed += len;
190 	KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
191 	if (bp->bio_completed == bp->bio_length) {
192 		if (bp->bio_error != 0)
193 			bp->bio_completed = 0;
194 		g_io_deliver(bp, bp->bio_error);
195 	}
196 
197 	if (dp->d_flags & D_FLAG_USED) {
198 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
199 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
200 	} else if (OFF2BNO(off + len, sc) > dp->d_bno) {
201 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
202 		sc->sc_nused++;
203 		dp->d_flags |= D_FLAG_USED;
204 	}
205 	dp->d_atime = time_uptime;
206 }
207 
208 static void
209 g_cache_done(struct bio *bp)
210 {
211 	struct g_cache_softc *sc;
212 	struct g_cache_desc *dp;
213 	struct bio *bp2, *tmpbp;
214 
215 	sc = bp->bio_from->geom->softc;
216 	KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
217 	dp = G_CACHE_DESC2(bp);
218 	mtx_lock(&sc->sc_mtx);
219 	bp2 = dp->d_biolist;
220 	while (bp2 != NULL) {
221 		KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
222 		tmpbp = G_CACHE_NEXT_BIO2(bp2);
223 		g_cache_deliver(sc, bp2, dp, bp->bio_error);
224 		bp2 = tmpbp;
225 	}
226 	dp->d_biolist = NULL;
227 	if (dp->d_flags & D_FLAG_INVALID) {
228 		sc->sc_invalid--;
229 		g_cache_free(sc, dp);
230 	} else if (bp->bio_error) {
231 		LIST_REMOVE(dp, d_next);
232 		if (dp->d_flags & D_FLAG_USED) {
233 			TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
234 			sc->sc_nused--;
235 		}
236 		g_cache_free(sc, dp);
237 	}
238 	mtx_unlock(&sc->sc_mtx);
239 	g_destroy_bio(bp);
240 }
241 
242 static struct g_cache_desc *
243 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
244 {
245 	struct g_cache_desc *dp;
246 
247 	mtx_assert(&sc->sc_mtx, MA_OWNED);
248 
249 	LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
250 		if (dp->d_bno == bno)
251 			return (dp);
252 	return (NULL);
253 }
254 
255 static int
256 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
257 {
258 	struct bio *cbp;
259 	struct g_cache_desc *dp;
260 
261 	mtx_lock(&sc->sc_mtx);
262 	dp = g_cache_lookup(sc,
263 	    OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
264 	if (dp != NULL) {
265 		/* Add to waiters list or deliver. */
266 		sc->sc_cachehits++;
267 		if (dp->d_biolist != NULL) {
268 			G_CACHE_NEXT_BIO1(bp) = sc;
269 			G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
270 			dp->d_biolist = bp;
271 		} else
272 			g_cache_deliver(sc, bp, dp, 0);
273 		mtx_unlock(&sc->sc_mtx);
274 		return (0);
275 	}
276 
277 	/* Cache miss.  Allocate entry and schedule bio.  */
278 	sc->sc_cachemisses++;
279 	dp = g_cache_alloc(sc);
280 	if (dp == NULL) {
281 		mtx_unlock(&sc->sc_mtx);
282 		return (ENOMEM);
283 	}
284 	cbp = g_clone_bio(bp);
285 	if (cbp == NULL) {
286 		g_cache_free(sc, dp);
287 		mtx_unlock(&sc->sc_mtx);
288 		return (ENOMEM);
289 	}
290 
291 	dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
292 	G_CACHE_NEXT_BIO1(bp) = sc;
293 	G_CACHE_NEXT_BIO2(bp) = NULL;
294 	dp->d_biolist = bp;
295 	LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
296 	    dp, d_next);
297 	mtx_unlock(&sc->sc_mtx);
298 
299 	G_CACHE_DESC1(cbp) = sc;
300 	G_CACHE_DESC2(cbp) = dp;
301 	cbp->bio_done = g_cache_done;
302 	cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
303 	cbp->bio_data = dp->d_data;
304 	cbp->bio_length = sc->sc_bsize;
305 	g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
306 	return (0);
307 }
308 
309 static void
310 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
311 {
312 	struct g_cache_desc *dp;
313 	off_t bno, lim;
314 
315 	mtx_lock(&sc->sc_mtx);
316 	bno = OFF2BNO(bp->bio_offset, sc);
317 	lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
318 	do {
319 		if ((dp = g_cache_lookup(sc, bno)) != NULL) {
320 			LIST_REMOVE(dp, d_next);
321 			if (dp->d_flags & D_FLAG_USED) {
322 				TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
323 				sc->sc_nused--;
324 			}
325 			if (dp->d_biolist == NULL)
326 				g_cache_free(sc, dp);
327 			else {
328 				dp->d_flags = D_FLAG_INVALID;
329 				sc->sc_invalid++;
330 			}
331 		}
332 		bno++;
333 	} while (bno <= lim);
334 	mtx_unlock(&sc->sc_mtx);
335 }
336 
337 static void
338 g_cache_start(struct bio *bp)
339 {
340 	struct g_cache_softc *sc;
341 	struct g_geom *gp;
342 	struct g_cache_desc *dp;
343 	struct bio *cbp;
344 
345 	gp = bp->bio_to->geom;
346 	sc = gp->softc;
347 	G_CACHE_LOGREQ(bp, "Request received.");
348 	switch (bp->bio_cmd) {
349 	case BIO_READ:
350 		sc->sc_reads++;
351 		sc->sc_readbytes += bp->bio_length;
352 		if (!g_cache_enable)
353 			break;
354 		if (bp->bio_offset + bp->bio_length > sc->sc_tail)
355 			break;
356 		if (OFF2BNO(bp->bio_offset, sc) ==
357 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
358 			sc->sc_cachereads++;
359 			sc->sc_cachereadbytes += bp->bio_length;
360 			if (g_cache_read(sc, bp) == 0)
361 				return;
362 			sc->sc_cachereads--;
363 			sc->sc_cachereadbytes -= bp->bio_length;
364 			break;
365 		} else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
366 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
367 			mtx_lock(&sc->sc_mtx);
368 			dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
369 			if (dp == NULL || dp->d_biolist != NULL) {
370 				mtx_unlock(&sc->sc_mtx);
371 				break;
372 			}
373 			sc->sc_cachereads++;
374 			sc->sc_cachereadbytes += bp->bio_length;
375 			g_cache_deliver(sc, bp, dp, 0);
376 			mtx_unlock(&sc->sc_mtx);
377 			if (g_cache_read(sc, bp) == 0)
378 				return;
379 			sc->sc_cachereads--;
380 			sc->sc_cachereadbytes -= bp->bio_length;
381 			break;
382 		}
383 		break;
384 	case BIO_WRITE:
385 		sc->sc_writes++;
386 		sc->sc_wrotebytes += bp->bio_length;
387 		g_cache_invalidate(sc, bp);
388 		break;
389 	}
390 	cbp = g_clone_bio(bp);
391 	if (cbp == NULL) {
392 		g_io_deliver(bp, ENOMEM);
393 		return;
394 	}
395 	cbp->bio_done = g_std_done;
396 	G_CACHE_LOGREQ(cbp, "Sending request.");
397 	g_io_request(cbp, LIST_FIRST(&gp->consumer));
398 }
399 
400 static void
401 g_cache_go(void *arg)
402 {
403 	struct g_cache_softc *sc = arg;
404 	struct g_cache_desc *dp;
405 	int i;
406 
407 	mtx_assert(&sc->sc_mtx, MA_OWNED);
408 
409 	/* Forcibly mark idle ready entries as used. */
410 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
411 		LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
412 			if (dp->d_flags & D_FLAG_USED ||
413 			    dp->d_biolist != NULL ||
414 			    time_uptime - dp->d_atime < g_cache_idletime)
415 				continue;
416 			TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
417 			sc->sc_nused++;
418 			dp->d_flags |= D_FLAG_USED;
419 		}
420 	}
421 
422 	/* Keep the number of used entries low. */
423 	if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
424 		g_cache_free_used(sc);
425 
426 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
427 }
428 
429 static int
430 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
431 {
432 	struct g_geom *gp;
433 	struct g_consumer *cp;
434 	int error;
435 
436 	gp = pp->geom;
437 	cp = LIST_FIRST(&gp->consumer);
438 	error = g_access(cp, dr, dw, de);
439 
440 	return (error);
441 }
442 
443 static void
444 g_cache_orphan(struct g_consumer *cp)
445 {
446 
447 	g_topology_assert();
448 	g_cache_destroy(cp->geom->softc, 1);
449 }
450 
451 static struct g_cache_softc *
452 g_cache_find_device(struct g_class *mp, const char *name)
453 {
454 	struct g_geom *gp;
455 
456 	LIST_FOREACH(gp, &mp->geom, geom) {
457 		if (strcmp(gp->name, name) == 0)
458 			return (gp->softc);
459 	}
460 	return (NULL);
461 }
462 
463 static struct g_geom *
464 g_cache_create(struct g_class *mp, struct g_provider *pp,
465     const struct g_cache_metadata *md, u_int type)
466 {
467 	struct g_cache_softc *sc;
468 	struct g_geom *gp;
469 	struct g_provider *newpp;
470 	struct g_consumer *cp;
471 	u_int bshift;
472 	int i;
473 
474 	g_topology_assert();
475 
476 	gp = NULL;
477 	newpp = NULL;
478 	cp = NULL;
479 
480 	G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
481 
482 	/* Cache size is minimum 100. */
483 	if (md->md_size < 100) {
484 		G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
485 		return (NULL);
486 	}
487 
488 	/* Block size restrictions. */
489 	bshift = ffs(md->md_bsize) - 1;
490 	if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
491 	    md->md_bsize != 1 << bshift ||
492 	    (md->md_bsize % pp->sectorsize) != 0) {
493 		G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
494 		return (NULL);
495 	}
496 
497 	/* Check for duplicate unit. */
498 	if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
499 		G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
500 		return (NULL);
501 	}
502 
503 	gp = g_new_geomf(mp, md->md_name);
504 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
505 	sc->sc_type = type;
506 	sc->sc_bshift = bshift;
507 	sc->sc_bsize = 1 << bshift;
508 	sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
509 	    UMA_ALIGN_PTR, 0);
510 	mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
511 	for (i = 0; i < G_CACHE_BUCKETS; i++)
512 		LIST_INIT(&sc->sc_desclist[i]);
513 	TAILQ_INIT(&sc->sc_usedlist);
514 	sc->sc_maxent = md->md_size;
515 	callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
516 	gp->softc = sc;
517 	sc->sc_geom = gp;
518 	gp->start = g_cache_start;
519 	gp->orphan = g_cache_orphan;
520 	gp->access = g_cache_access;
521 	gp->dumpconf = g_cache_dumpconf;
522 
523 	newpp = g_new_providerf(gp, "cache/%s", gp->name);
524 	newpp->sectorsize = pp->sectorsize;
525 	newpp->mediasize = pp->mediasize;
526 	if (type == G_CACHE_TYPE_AUTOMATIC)
527 		newpp->mediasize -= pp->sectorsize;
528 	sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
529 
530 	cp = g_new_consumer(gp);
531 	if (g_attach(cp, pp) != 0) {
532 		G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
533 		g_destroy_consumer(cp);
534 		g_destroy_provider(newpp);
535 		mtx_destroy(&sc->sc_mtx);
536 		g_free(sc);
537 		g_destroy_geom(gp);
538 		return (NULL);
539 	}
540 
541 	g_error_provider(newpp, 0);
542 	G_CACHE_DEBUG(0, "Device %s created.", gp->name);
543 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
544 	return (gp);
545 }
546 
547 static int
548 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
549 {
550 	struct g_geom *gp;
551 	struct g_provider *pp;
552 	struct g_cache_desc *dp, *dp2;
553 	int i;
554 
555 	g_topology_assert();
556 	if (sc == NULL)
557 		return (ENXIO);
558 	gp = sc->sc_geom;
559 	pp = LIST_FIRST(&gp->provider);
560 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
561 		if (force) {
562 			G_CACHE_DEBUG(0, "Device %s is still open, so it "
563 			    "can't be definitely removed.", pp->name);
564 		} else {
565 			G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
566 			    pp->name, pp->acr, pp->acw, pp->ace);
567 			return (EBUSY);
568 		}
569 	} else {
570 		G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
571 	}
572 	callout_drain(&sc->sc_callout);
573 	mtx_lock(&sc->sc_mtx);
574 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
575 		dp = LIST_FIRST(&sc->sc_desclist[i]);
576 		while (dp != NULL) {
577 			dp2 = LIST_NEXT(dp, d_next);
578 			g_cache_free(sc, dp);
579 			dp = dp2;
580 		}
581 	}
582 	mtx_unlock(&sc->sc_mtx);
583 	mtx_destroy(&sc->sc_mtx);
584 	uma_zdestroy(sc->sc_zone);
585 	g_free(sc);
586 	gp->softc = NULL;
587 	g_wither_geom(gp, ENXIO);
588 
589 	return (0);
590 }
591 
592 static int
593 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
594 {
595 
596 	return (g_cache_destroy(gp->softc, 0));
597 }
598 
599 static int
600 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
601 {
602 	struct g_provider *pp;
603 	u_char *buf;
604 	int error;
605 
606 	g_topology_assert();
607 
608 	error = g_access(cp, 1, 0, 0);
609 	if (error != 0)
610 		return (error);
611 	pp = cp->provider;
612 	g_topology_unlock();
613 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
614 	    &error);
615 	g_topology_lock();
616 	g_access(cp, -1, 0, 0);
617 	if (buf == NULL)
618 		return (error);
619 
620 	/* Decode metadata. */
621 	cache_metadata_decode(buf, md);
622 	g_free(buf);
623 
624 	return (0);
625 }
626 
627 static int
628 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
629 {
630 	struct g_provider *pp;
631 	u_char *buf;
632 	int error;
633 
634 	g_topology_assert();
635 
636 	error = g_access(cp, 0, 1, 0);
637 	if (error != 0)
638 		return (error);
639 	pp = cp->provider;
640 	buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
641 	cache_metadata_encode(md, buf);
642 	g_topology_unlock();
643 	error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
644 	g_topology_lock();
645 	g_access(cp, 0, -1, 0);
646 	free(buf, M_GCACHE);
647 
648 	return (error);
649 }
650 
651 static struct g_geom *
652 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
653 {
654 	struct g_cache_metadata md;
655 	struct g_consumer *cp;
656 	struct g_geom *gp;
657 	int error;
658 
659 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
660 	g_topology_assert();
661 
662 	G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
663 
664 	gp = g_new_geomf(mp, "cache:taste");
665 	gp->start = g_cache_start;
666 	gp->orphan = g_cache_orphan;
667 	gp->access = g_cache_access;
668 	cp = g_new_consumer(gp);
669 	g_attach(cp, pp);
670 	error = g_cache_read_metadata(cp, &md);
671 	g_detach(cp);
672 	g_destroy_consumer(cp);
673 	g_destroy_geom(gp);
674 	if (error != 0)
675 		return (NULL);
676 
677 	if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
678 		return (NULL);
679 	if (md.md_version > G_CACHE_VERSION) {
680 		printf("geom_cache.ko module is too old to handle %s.\n",
681 		    pp->name);
682 		return (NULL);
683 	}
684 	if (md.md_provsize != pp->mediasize)
685 		return (NULL);
686 
687 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
688 	if (gp == NULL) {
689 		G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
690 		return (NULL);
691 	}
692 	return (gp);
693 }
694 
695 static void
696 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
697 {
698 	struct g_cache_metadata md;
699 	struct g_provider *pp;
700 	struct g_geom *gp;
701 	intmax_t *bsize, *size;
702 	const char *name;
703 	int *nargs;
704 
705 	g_topology_assert();
706 
707 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
708 	if (nargs == NULL) {
709 		gctl_error(req, "No '%s' argument", "nargs");
710 		return;
711 	}
712 	if (*nargs != 2) {
713 		gctl_error(req, "Invalid number of arguments.");
714 		return;
715 	}
716 
717 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
718 	md.md_version = G_CACHE_VERSION;
719 	name = gctl_get_asciiparam(req, "arg0");
720 	if (name == NULL) {
721 		gctl_error(req, "No 'arg0' argument");
722 		return;
723 	}
724 	strlcpy(md.md_name, name, sizeof(md.md_name));
725 
726 	size = gctl_get_paraml(req, "size", sizeof(*size));
727 	if (size == NULL) {
728 		gctl_error(req, "No '%s' argument", "size");
729 		return;
730 	}
731 	if ((u_int)*size < 100) {
732 		gctl_error(req, "Invalid '%s' argument", "size");
733 		return;
734 	}
735 	md.md_size = (u_int)*size;
736 
737 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
738 	if (bsize == NULL) {
739 		gctl_error(req, "No '%s' argument", "blocksize");
740 		return;
741 	}
742 	if (*bsize < 0) {
743 		gctl_error(req, "Invalid '%s' argument", "blocksize");
744 		return;
745 	}
746 	md.md_bsize = (u_int)*bsize;
747 
748 	/* This field is not important here. */
749 	md.md_provsize = 0;
750 
751 	name = gctl_get_asciiparam(req, "arg1");
752 	if (name == NULL) {
753 		gctl_error(req, "No 'arg1' argument");
754 		return;
755 	}
756 	if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
757 		name += strlen("/dev/");
758 	pp = g_provider_by_name(name);
759 	if (pp == NULL) {
760 		G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
761 		gctl_error(req, "Provider %s is invalid.", name);
762 		return;
763 	}
764 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
765 	if (gp == NULL) {
766 		gctl_error(req, "Can't create %s.", md.md_name);
767 		return;
768 	}
769 }
770 
771 static void
772 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
773 {
774 	struct g_cache_metadata md;
775 	struct g_cache_softc *sc;
776 	struct g_consumer *cp;
777 	intmax_t *bsize, *size;
778 	const char *name;
779 	int error, *nargs;
780 
781 	g_topology_assert();
782 
783 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
784 	if (nargs == NULL) {
785 		gctl_error(req, "No '%s' argument", "nargs");
786 		return;
787 	}
788 	if (*nargs != 1) {
789 		gctl_error(req, "Missing device.");
790 		return;
791 	}
792 
793 	name = gctl_get_asciiparam(req, "arg0");
794 	if (name == NULL) {
795 		gctl_error(req, "No 'arg0' argument");
796 		return;
797 	}
798 	sc = g_cache_find_device(mp, name);
799 	if (sc == NULL) {
800 		G_CACHE_DEBUG(1, "Device %s is invalid.", name);
801 		gctl_error(req, "Device %s is invalid.", name);
802 		return;
803 	}
804 
805 	size = gctl_get_paraml(req, "size", sizeof(*size));
806 	if (size == NULL) {
807 		gctl_error(req, "No '%s' argument", "size");
808 		return;
809 	}
810 	if ((u_int)*size != 0 && (u_int)*size < 100) {
811 		gctl_error(req, "Invalid '%s' argument", "size");
812 		return;
813 	}
814 	if ((u_int)*size != 0)
815 		sc->sc_maxent = (u_int)*size;
816 
817 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
818 	if (bsize == NULL) {
819 		gctl_error(req, "No '%s' argument", "blocksize");
820 		return;
821 	}
822 	if (*bsize < 0) {
823 		gctl_error(req, "Invalid '%s' argument", "blocksize");
824 		return;
825 	}
826 
827 	if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
828 		return;
829 
830 	strlcpy(md.md_name, name, sizeof(md.md_name));
831 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
832 	md.md_version = G_CACHE_VERSION;
833 	if ((u_int)*size != 0)
834 		md.md_size = (u_int)*size;
835 	else
836 		md.md_size = sc->sc_maxent;
837 	if ((u_int)*bsize != 0)
838 		md.md_bsize = (u_int)*bsize;
839 	else
840 		md.md_bsize = sc->sc_bsize;
841 	cp = LIST_FIRST(&sc->sc_geom->consumer);
842 	md.md_provsize = cp->provider->mediasize;
843 	error = g_cache_write_metadata(cp, &md);
844 	if (error == 0)
845 		G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
846 	else
847 		G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
848 		    cp->provider->name, error);
849 }
850 
851 static void
852 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
853 {
854 	int *nargs, *force, error, i;
855 	struct g_cache_softc *sc;
856 	const char *name;
857 	char param[16];
858 
859 	g_topology_assert();
860 
861 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
862 	if (nargs == NULL) {
863 		gctl_error(req, "No '%s' argument", "nargs");
864 		return;
865 	}
866 	if (*nargs <= 0) {
867 		gctl_error(req, "Missing device(s).");
868 		return;
869 	}
870 	force = gctl_get_paraml(req, "force", sizeof(*force));
871 	if (force == NULL) {
872 		gctl_error(req, "No 'force' argument");
873 		return;
874 	}
875 
876 	for (i = 0; i < *nargs; i++) {
877 		snprintf(param, sizeof(param), "arg%d", i);
878 		name = gctl_get_asciiparam(req, param);
879 		if (name == NULL) {
880 			gctl_error(req, "No 'arg%d' argument", i);
881 			return;
882 		}
883 		sc = g_cache_find_device(mp, name);
884 		if (sc == NULL) {
885 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
886 			gctl_error(req, "Device %s is invalid.", name);
887 			return;
888 		}
889 		error = g_cache_destroy(sc, *force);
890 		if (error != 0) {
891 			gctl_error(req, "Cannot destroy device %s (error=%d).",
892 			    sc->sc_name, error);
893 			return;
894 		}
895 	}
896 }
897 
898 static void
899 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
900 {
901 	struct g_cache_softc *sc;
902 	const char *name;
903 	char param[16];
904 	int i, *nargs;
905 
906 	g_topology_assert();
907 
908 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
909 	if (nargs == NULL) {
910 		gctl_error(req, "No '%s' argument", "nargs");
911 		return;
912 	}
913 	if (*nargs <= 0) {
914 		gctl_error(req, "Missing device(s).");
915 		return;
916 	}
917 
918 	for (i = 0; i < *nargs; i++) {
919 		snprintf(param, sizeof(param), "arg%d", i);
920 		name = gctl_get_asciiparam(req, param);
921 		if (name == NULL) {
922 			gctl_error(req, "No 'arg%d' argument", i);
923 			return;
924 		}
925 		sc = g_cache_find_device(mp, name);
926 		if (sc == NULL) {
927 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
928 			gctl_error(req, "Device %s is invalid.", name);
929 			return;
930 		}
931 		sc->sc_reads = 0;
932 		sc->sc_readbytes = 0;
933 		sc->sc_cachereads = 0;
934 		sc->sc_cachereadbytes = 0;
935 		sc->sc_cachehits = 0;
936 		sc->sc_cachemisses = 0;
937 		sc->sc_cachefull = 0;
938 		sc->sc_writes = 0;
939 		sc->sc_wrotebytes = 0;
940 	}
941 }
942 
943 static void
944 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
945 {
946 	uint32_t *version;
947 
948 	g_topology_assert();
949 
950 	version = gctl_get_paraml(req, "version", sizeof(*version));
951 	if (version == NULL) {
952 		gctl_error(req, "No '%s' argument.", "version");
953 		return;
954 	}
955 	if (*version != G_CACHE_VERSION) {
956 		gctl_error(req, "Userland and kernel parts are out of sync.");
957 		return;
958 	}
959 
960 	if (strcmp(verb, "create") == 0) {
961 		g_cache_ctl_create(req, mp);
962 		return;
963 	} else if (strcmp(verb, "configure") == 0) {
964 		g_cache_ctl_configure(req, mp);
965 		return;
966 	} else if (strcmp(verb, "destroy") == 0 ||
967 	    strcmp(verb, "stop") == 0) {
968 		g_cache_ctl_destroy(req, mp);
969 		return;
970 	} else if (strcmp(verb, "reset") == 0) {
971 		g_cache_ctl_reset(req, mp);
972 		return;
973 	}
974 
975 	gctl_error(req, "Unknown verb.");
976 }
977 
978 static void
979 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
980     struct g_consumer *cp, struct g_provider *pp)
981 {
982 	struct g_cache_softc *sc;
983 
984 	if (pp != NULL || cp != NULL)
985 		return;
986 	sc = gp->softc;
987 	sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
988 	sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
989 	sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
990 	    (uintmax_t)sc->sc_tail);
991 	sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
992 	sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
993 	    sc->sc_nused);
994 	sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
995 	    sc->sc_invalid);
996 	sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
997 	sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
998 	    sc->sc_readbytes);
999 	sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1000 	    sc->sc_cachereads);
1001 	sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1002 	    sc->sc_cachereadbytes);
1003 	sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1004 	    sc->sc_cachehits);
1005 	sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1006 	    sc->sc_cachemisses);
1007 	sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1008 	    sc->sc_cachefull);
1009 	sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1010 	sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1011 	    sc->sc_wrotebytes);
1012 }
1013 
1014 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1015