xref: /freebsd/sys/geom/cache/g_cache.c (revision 6b7b2d80ed4d728d3ffd12c422e57798c1b63a84)
1 /*-
2  * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 #include <sys/time.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <geom/cache/g_cache.h>
45 
46 FEATURE(geom_cache, "GEOM cache module");
47 
48 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
49 
50 SYSCTL_DECL(_kern_geom);
51 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0,
52     "GEOM_CACHE stuff");
53 static u_int g_cache_debug = 0;
54 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
55     "Debug level");
56 static u_int g_cache_enable = 1;
57 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
58     "");
59 static u_int g_cache_timeout = 10;
60 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
61     0, "");
62 static u_int g_cache_idletime = 5;
63 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
64     0, "");
65 static u_int g_cache_used_lo = 5;
66 static u_int g_cache_used_hi = 20;
67 static int
68 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
69 {
70 	u_int val = *(u_int *)arg1;
71 	int error;
72 
73 	error = sysctl_handle_int(oidp, &val, 0, req);
74 	if (error || !req->newptr)
75 		return (error);
76 	if (val > 100)
77 		return (EINVAL);
78 	if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
79 	    (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
80 		return (EINVAL);
81 	*(u_int *)arg1 = val;
82 	return (0);
83 }
84 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
85 	&g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
86 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
87 	&g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
88 
89 
90 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
91 static g_ctl_destroy_geom_t g_cache_destroy_geom;
92 
93 static g_taste_t g_cache_taste;
94 static g_ctl_req_t g_cache_config;
95 static g_dumpconf_t g_cache_dumpconf;
96 
97 struct g_class g_cache_class = {
98 	.name = G_CACHE_CLASS_NAME,
99 	.version = G_VERSION,
100 	.ctlreq = g_cache_config,
101 	.taste = g_cache_taste,
102 	.destroy_geom = g_cache_destroy_geom
103 };
104 
105 #define	OFF2BNO(off, sc)	((off) >> (sc)->sc_bshift)
106 #define	BNO2OFF(bno, sc)	((bno) << (sc)->sc_bshift)
107 
108 
109 static struct g_cache_desc *
110 g_cache_alloc(struct g_cache_softc *sc)
111 {
112 	struct g_cache_desc *dp;
113 
114 	mtx_assert(&sc->sc_mtx, MA_OWNED);
115 
116 	if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
117 		dp = TAILQ_FIRST(&sc->sc_usedlist);
118 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
119 		sc->sc_nused--;
120 		dp->d_flags = 0;
121 		LIST_REMOVE(dp, d_next);
122 		return (dp);
123 	}
124 	if (sc->sc_nent > sc->sc_maxent) {
125 		sc->sc_cachefull++;
126 		return (NULL);
127 	}
128 	dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
129 	if (dp == NULL)
130 		return (NULL);
131 	dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
132 	if (dp->d_data == NULL) {
133 		free(dp, M_GCACHE);
134 		return (NULL);
135 	}
136 	sc->sc_nent++;
137 	return (dp);
138 }
139 
140 static void
141 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
142 {
143 
144 	mtx_assert(&sc->sc_mtx, MA_OWNED);
145 
146 	uma_zfree(sc->sc_zone, dp->d_data);
147 	free(dp, M_GCACHE);
148 	sc->sc_nent--;
149 }
150 
151 static void
152 g_cache_free_used(struct g_cache_softc *sc)
153 {
154 	struct g_cache_desc *dp;
155 	u_int n;
156 
157 	mtx_assert(&sc->sc_mtx, MA_OWNED);
158 
159 	n = g_cache_used_lo * sc->sc_maxent / 100;
160 	while (sc->sc_nused > n) {
161 		KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
162 		dp = TAILQ_FIRST(&sc->sc_usedlist);
163 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
164 		sc->sc_nused--;
165 		LIST_REMOVE(dp, d_next);
166 		g_cache_free(sc, dp);
167 	}
168 }
169 
170 static void
171 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
172     struct g_cache_desc *dp, int error)
173 {
174 	off_t off1, off, len;
175 
176 	mtx_assert(&sc->sc_mtx, MA_OWNED);
177 	KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
178 	KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
179 	    dp->d_bno, ("wrong entry"));
180 
181 	off1 = BNO2OFF(dp->d_bno, sc);
182 	off = MAX(bp->bio_offset, off1);
183 	len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
184 
185 	if (bp->bio_error == 0)
186 		bp->bio_error = error;
187 	if (bp->bio_error == 0) {
188 		bcopy(dp->d_data + (off - off1),
189 		    bp->bio_data + (off - bp->bio_offset), len);
190 	}
191 	bp->bio_completed += len;
192 	KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
193 	if (bp->bio_completed == bp->bio_length) {
194 		if (bp->bio_error != 0)
195 			bp->bio_completed = 0;
196 		g_io_deliver(bp, bp->bio_error);
197 	}
198 
199 	if (dp->d_flags & D_FLAG_USED) {
200 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
201 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
202 	} else if (OFF2BNO(off + len, sc) > dp->d_bno) {
203 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
204 		sc->sc_nused++;
205 		dp->d_flags |= D_FLAG_USED;
206 	}
207 	dp->d_atime = time_uptime;
208 }
209 
210 static void
211 g_cache_done(struct bio *bp)
212 {
213 	struct g_cache_softc *sc;
214 	struct g_cache_desc *dp;
215 	struct bio *bp2, *tmpbp;
216 
217 	sc = bp->bio_from->geom->softc;
218 	KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
219 	dp = G_CACHE_DESC2(bp);
220 	mtx_lock(&sc->sc_mtx);
221 	bp2 = dp->d_biolist;
222 	while (bp2 != NULL) {
223 		KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
224 		tmpbp = G_CACHE_NEXT_BIO2(bp2);
225 		g_cache_deliver(sc, bp2, dp, bp->bio_error);
226 		bp2 = tmpbp;
227 	}
228 	dp->d_biolist = NULL;
229 	if (dp->d_flags & D_FLAG_INVALID) {
230 		sc->sc_invalid--;
231 		g_cache_free(sc, dp);
232 	} else if (bp->bio_error) {
233 		LIST_REMOVE(dp, d_next);
234 		if (dp->d_flags & D_FLAG_USED) {
235 			TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
236 			sc->sc_nused--;
237 		}
238 		g_cache_free(sc, dp);
239 	}
240 	mtx_unlock(&sc->sc_mtx);
241 	g_destroy_bio(bp);
242 }
243 
244 static struct g_cache_desc *
245 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
246 {
247 	struct g_cache_desc *dp;
248 
249 	mtx_assert(&sc->sc_mtx, MA_OWNED);
250 
251 	LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
252 		if (dp->d_bno == bno)
253 			return (dp);
254 	return (NULL);
255 }
256 
257 static int
258 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
259 {
260 	struct bio *cbp;
261 	struct g_cache_desc *dp;
262 
263 	mtx_lock(&sc->sc_mtx);
264 	dp = g_cache_lookup(sc,
265 	    OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
266 	if (dp != NULL) {
267 		/* Add to waiters list or deliver. */
268 		sc->sc_cachehits++;
269 		if (dp->d_biolist != NULL) {
270 			G_CACHE_NEXT_BIO1(bp) = sc;
271 			G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
272 			dp->d_biolist = bp;
273 		} else
274 			g_cache_deliver(sc, bp, dp, 0);
275 		mtx_unlock(&sc->sc_mtx);
276 		return (0);
277 	}
278 
279 	/* Cache miss.  Allocate entry and schedule bio.  */
280 	sc->sc_cachemisses++;
281 	dp = g_cache_alloc(sc);
282 	if (dp == NULL) {
283 		mtx_unlock(&sc->sc_mtx);
284 		return (ENOMEM);
285 	}
286 	cbp = g_clone_bio(bp);
287 	if (cbp == NULL) {
288 		g_cache_free(sc, dp);
289 		mtx_unlock(&sc->sc_mtx);
290 		return (ENOMEM);
291 	}
292 
293 	dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
294 	G_CACHE_NEXT_BIO1(bp) = sc;
295 	G_CACHE_NEXT_BIO2(bp) = NULL;
296 	dp->d_biolist = bp;
297 	LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
298 	    dp, d_next);
299 	mtx_unlock(&sc->sc_mtx);
300 
301 	G_CACHE_DESC1(cbp) = sc;
302 	G_CACHE_DESC2(cbp) = dp;
303 	cbp->bio_done = g_cache_done;
304 	cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
305 	cbp->bio_data = dp->d_data;
306 	cbp->bio_length = sc->sc_bsize;
307 	g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
308 	return (0);
309 }
310 
311 static void
312 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
313 {
314 	struct g_cache_desc *dp;
315 	off_t bno, lim;
316 
317 	mtx_lock(&sc->sc_mtx);
318 	bno = OFF2BNO(bp->bio_offset, sc);
319 	lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
320 	do {
321 		if ((dp = g_cache_lookup(sc, bno)) != NULL) {
322 			LIST_REMOVE(dp, d_next);
323 			if (dp->d_flags & D_FLAG_USED) {
324 				TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
325 				sc->sc_nused--;
326 			}
327 			if (dp->d_biolist == NULL)
328 				g_cache_free(sc, dp);
329 			else {
330 				dp->d_flags = D_FLAG_INVALID;
331 				sc->sc_invalid++;
332 			}
333 		}
334 		bno++;
335 	} while (bno <= lim);
336 	mtx_unlock(&sc->sc_mtx);
337 }
338 
339 static void
340 g_cache_start(struct bio *bp)
341 {
342 	struct g_cache_softc *sc;
343 	struct g_geom *gp;
344 	struct g_cache_desc *dp;
345 	struct bio *cbp;
346 
347 	gp = bp->bio_to->geom;
348 	sc = gp->softc;
349 	G_CACHE_LOGREQ(bp, "Request received.");
350 	switch (bp->bio_cmd) {
351 	case BIO_READ:
352 		sc->sc_reads++;
353 		sc->sc_readbytes += bp->bio_length;
354 		if (!g_cache_enable)
355 			break;
356 		if (bp->bio_offset + bp->bio_length > sc->sc_tail)
357 			break;
358 		if (OFF2BNO(bp->bio_offset, sc) ==
359 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
360 			sc->sc_cachereads++;
361 			sc->sc_cachereadbytes += bp->bio_length;
362 			if (g_cache_read(sc, bp) == 0)
363 				return;
364 			sc->sc_cachereads--;
365 			sc->sc_cachereadbytes -= bp->bio_length;
366 			break;
367 		} else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
368 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
369 			mtx_lock(&sc->sc_mtx);
370 			dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
371 			if (dp == NULL || dp->d_biolist != NULL) {
372 				mtx_unlock(&sc->sc_mtx);
373 				break;
374 			}
375 			sc->sc_cachereads++;
376 			sc->sc_cachereadbytes += bp->bio_length;
377 			g_cache_deliver(sc, bp, dp, 0);
378 			mtx_unlock(&sc->sc_mtx);
379 			if (g_cache_read(sc, bp) == 0)
380 				return;
381 			sc->sc_cachereads--;
382 			sc->sc_cachereadbytes -= bp->bio_length;
383 			break;
384 		}
385 		break;
386 	case BIO_WRITE:
387 		sc->sc_writes++;
388 		sc->sc_wrotebytes += bp->bio_length;
389 		g_cache_invalidate(sc, bp);
390 		break;
391 	}
392 	cbp = g_clone_bio(bp);
393 	if (cbp == NULL) {
394 		g_io_deliver(bp, ENOMEM);
395 		return;
396 	}
397 	cbp->bio_done = g_std_done;
398 	G_CACHE_LOGREQ(cbp, "Sending request.");
399 	g_io_request(cbp, LIST_FIRST(&gp->consumer));
400 }
401 
402 static void
403 g_cache_go(void *arg)
404 {
405 	struct g_cache_softc *sc = arg;
406 	struct g_cache_desc *dp;
407 	int i;
408 
409 	mtx_assert(&sc->sc_mtx, MA_OWNED);
410 
411 	/* Forcibly mark idle ready entries as used. */
412 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
413 		LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
414 			if (dp->d_flags & D_FLAG_USED ||
415 			    dp->d_biolist != NULL ||
416 			    time_uptime - dp->d_atime < g_cache_idletime)
417 				continue;
418 			TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
419 			sc->sc_nused++;
420 			dp->d_flags |= D_FLAG_USED;
421 		}
422 	}
423 
424 	/* Keep the number of used entries low. */
425 	if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
426 		g_cache_free_used(sc);
427 
428 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
429 }
430 
431 static int
432 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
433 {
434 	struct g_geom *gp;
435 	struct g_consumer *cp;
436 	int error;
437 
438 	gp = pp->geom;
439 	cp = LIST_FIRST(&gp->consumer);
440 	error = g_access(cp, dr, dw, de);
441 
442 	return (error);
443 }
444 
445 static void
446 g_cache_orphan(struct g_consumer *cp)
447 {
448 
449 	g_topology_assert();
450 	g_cache_destroy(cp->geom->softc, 1);
451 }
452 
453 static struct g_cache_softc *
454 g_cache_find_device(struct g_class *mp, const char *name)
455 {
456 	struct g_geom *gp;
457 
458 	LIST_FOREACH(gp, &mp->geom, geom) {
459 		if (strcmp(gp->name, name) == 0)
460 			return (gp->softc);
461 	}
462 	return (NULL);
463 }
464 
465 static struct g_geom *
466 g_cache_create(struct g_class *mp, struct g_provider *pp,
467     const struct g_cache_metadata *md, u_int type)
468 {
469 	struct g_cache_softc *sc;
470 	struct g_geom *gp;
471 	struct g_provider *newpp;
472 	struct g_consumer *cp;
473 	u_int bshift;
474 	int i;
475 
476 	g_topology_assert();
477 
478 	gp = NULL;
479 	newpp = NULL;
480 	cp = NULL;
481 
482 	G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
483 
484 	/* Cache size is minimum 100. */
485 	if (md->md_size < 100) {
486 		G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
487 		return (NULL);
488 	}
489 
490 	/* Block size restrictions. */
491 	bshift = ffs(md->md_bsize) - 1;
492 	if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
493 	    md->md_bsize != 1 << bshift ||
494 	    (md->md_bsize % pp->sectorsize) != 0) {
495 		G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
496 		return (NULL);
497 	}
498 
499 	/* Check for duplicate unit. */
500 	if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
501 		G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
502 		return (NULL);
503 	}
504 
505 	gp = g_new_geomf(mp, "%s", md->md_name);
506 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
507 	sc->sc_type = type;
508 	sc->sc_bshift = bshift;
509 	sc->sc_bsize = 1 << bshift;
510 	sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
511 	    UMA_ALIGN_PTR, 0);
512 	mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
513 	for (i = 0; i < G_CACHE_BUCKETS; i++)
514 		LIST_INIT(&sc->sc_desclist[i]);
515 	TAILQ_INIT(&sc->sc_usedlist);
516 	sc->sc_maxent = md->md_size;
517 	callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
518 	gp->softc = sc;
519 	sc->sc_geom = gp;
520 	gp->start = g_cache_start;
521 	gp->orphan = g_cache_orphan;
522 	gp->access = g_cache_access;
523 	gp->dumpconf = g_cache_dumpconf;
524 
525 	newpp = g_new_providerf(gp, "cache/%s", gp->name);
526 	newpp->sectorsize = pp->sectorsize;
527 	newpp->mediasize = pp->mediasize;
528 	if (type == G_CACHE_TYPE_AUTOMATIC)
529 		newpp->mediasize -= pp->sectorsize;
530 	sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
531 
532 	cp = g_new_consumer(gp);
533 	if (g_attach(cp, pp) != 0) {
534 		G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
535 		g_destroy_consumer(cp);
536 		g_destroy_provider(newpp);
537 		mtx_destroy(&sc->sc_mtx);
538 		g_free(sc);
539 		g_destroy_geom(gp);
540 		return (NULL);
541 	}
542 
543 	g_error_provider(newpp, 0);
544 	G_CACHE_DEBUG(0, "Device %s created.", gp->name);
545 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
546 	return (gp);
547 }
548 
549 static int
550 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
551 {
552 	struct g_geom *gp;
553 	struct g_provider *pp;
554 	struct g_cache_desc *dp, *dp2;
555 	int i;
556 
557 	g_topology_assert();
558 	if (sc == NULL)
559 		return (ENXIO);
560 	gp = sc->sc_geom;
561 	pp = LIST_FIRST(&gp->provider);
562 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
563 		if (force) {
564 			G_CACHE_DEBUG(0, "Device %s is still open, so it "
565 			    "can't be definitely removed.", pp->name);
566 		} else {
567 			G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
568 			    pp->name, pp->acr, pp->acw, pp->ace);
569 			return (EBUSY);
570 		}
571 	} else {
572 		G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
573 	}
574 	callout_drain(&sc->sc_callout);
575 	mtx_lock(&sc->sc_mtx);
576 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
577 		dp = LIST_FIRST(&sc->sc_desclist[i]);
578 		while (dp != NULL) {
579 			dp2 = LIST_NEXT(dp, d_next);
580 			g_cache_free(sc, dp);
581 			dp = dp2;
582 		}
583 	}
584 	mtx_unlock(&sc->sc_mtx);
585 	mtx_destroy(&sc->sc_mtx);
586 	uma_zdestroy(sc->sc_zone);
587 	g_free(sc);
588 	gp->softc = NULL;
589 	g_wither_geom(gp, ENXIO);
590 
591 	return (0);
592 }
593 
594 static int
595 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
596 {
597 
598 	return (g_cache_destroy(gp->softc, 0));
599 }
600 
601 static int
602 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
603 {
604 	struct g_provider *pp;
605 	u_char *buf;
606 	int error;
607 
608 	g_topology_assert();
609 
610 	error = g_access(cp, 1, 0, 0);
611 	if (error != 0)
612 		return (error);
613 	pp = cp->provider;
614 	g_topology_unlock();
615 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
616 	    &error);
617 	g_topology_lock();
618 	g_access(cp, -1, 0, 0);
619 	if (buf == NULL)
620 		return (error);
621 
622 	/* Decode metadata. */
623 	cache_metadata_decode(buf, md);
624 	g_free(buf);
625 
626 	return (0);
627 }
628 
629 static int
630 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
631 {
632 	struct g_provider *pp;
633 	u_char *buf;
634 	int error;
635 
636 	g_topology_assert();
637 
638 	error = g_access(cp, 0, 1, 0);
639 	if (error != 0)
640 		return (error);
641 	pp = cp->provider;
642 	buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
643 	cache_metadata_encode(md, buf);
644 	g_topology_unlock();
645 	error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
646 	g_topology_lock();
647 	g_access(cp, 0, -1, 0);
648 	free(buf, M_GCACHE);
649 
650 	return (error);
651 }
652 
653 static struct g_geom *
654 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
655 {
656 	struct g_cache_metadata md;
657 	struct g_consumer *cp;
658 	struct g_geom *gp;
659 	int error;
660 
661 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
662 	g_topology_assert();
663 
664 	G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
665 
666 	gp = g_new_geomf(mp, "cache:taste");
667 	gp->start = g_cache_start;
668 	gp->orphan = g_cache_orphan;
669 	gp->access = g_cache_access;
670 	cp = g_new_consumer(gp);
671 	g_attach(cp, pp);
672 	error = g_cache_read_metadata(cp, &md);
673 	g_detach(cp);
674 	g_destroy_consumer(cp);
675 	g_destroy_geom(gp);
676 	if (error != 0)
677 		return (NULL);
678 
679 	if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
680 		return (NULL);
681 	if (md.md_version > G_CACHE_VERSION) {
682 		printf("geom_cache.ko module is too old to handle %s.\n",
683 		    pp->name);
684 		return (NULL);
685 	}
686 	if (md.md_provsize != pp->mediasize)
687 		return (NULL);
688 
689 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
690 	if (gp == NULL) {
691 		G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
692 		return (NULL);
693 	}
694 	return (gp);
695 }
696 
697 static void
698 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
699 {
700 	struct g_cache_metadata md;
701 	struct g_provider *pp;
702 	struct g_geom *gp;
703 	intmax_t *bsize, *size;
704 	const char *name;
705 	int *nargs;
706 
707 	g_topology_assert();
708 
709 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
710 	if (nargs == NULL) {
711 		gctl_error(req, "No '%s' argument", "nargs");
712 		return;
713 	}
714 	if (*nargs != 2) {
715 		gctl_error(req, "Invalid number of arguments.");
716 		return;
717 	}
718 
719 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
720 	md.md_version = G_CACHE_VERSION;
721 	name = gctl_get_asciiparam(req, "arg0");
722 	if (name == NULL) {
723 		gctl_error(req, "No 'arg0' argument");
724 		return;
725 	}
726 	strlcpy(md.md_name, name, sizeof(md.md_name));
727 
728 	size = gctl_get_paraml(req, "size", sizeof(*size));
729 	if (size == NULL) {
730 		gctl_error(req, "No '%s' argument", "size");
731 		return;
732 	}
733 	if ((u_int)*size < 100) {
734 		gctl_error(req, "Invalid '%s' argument", "size");
735 		return;
736 	}
737 	md.md_size = (u_int)*size;
738 
739 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
740 	if (bsize == NULL) {
741 		gctl_error(req, "No '%s' argument", "blocksize");
742 		return;
743 	}
744 	if (*bsize < 0) {
745 		gctl_error(req, "Invalid '%s' argument", "blocksize");
746 		return;
747 	}
748 	md.md_bsize = (u_int)*bsize;
749 
750 	/* This field is not important here. */
751 	md.md_provsize = 0;
752 
753 	name = gctl_get_asciiparam(req, "arg1");
754 	if (name == NULL) {
755 		gctl_error(req, "No 'arg1' argument");
756 		return;
757 	}
758 	if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
759 		name += strlen("/dev/");
760 	pp = g_provider_by_name(name);
761 	if (pp == NULL) {
762 		G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
763 		gctl_error(req, "Provider %s is invalid.", name);
764 		return;
765 	}
766 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
767 	if (gp == NULL) {
768 		gctl_error(req, "Can't create %s.", md.md_name);
769 		return;
770 	}
771 }
772 
773 static void
774 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
775 {
776 	struct g_cache_metadata md;
777 	struct g_cache_softc *sc;
778 	struct g_consumer *cp;
779 	intmax_t *bsize, *size;
780 	const char *name;
781 	int error, *nargs;
782 
783 	g_topology_assert();
784 
785 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
786 	if (nargs == NULL) {
787 		gctl_error(req, "No '%s' argument", "nargs");
788 		return;
789 	}
790 	if (*nargs != 1) {
791 		gctl_error(req, "Missing device.");
792 		return;
793 	}
794 
795 	name = gctl_get_asciiparam(req, "arg0");
796 	if (name == NULL) {
797 		gctl_error(req, "No 'arg0' argument");
798 		return;
799 	}
800 	sc = g_cache_find_device(mp, name);
801 	if (sc == NULL) {
802 		G_CACHE_DEBUG(1, "Device %s is invalid.", name);
803 		gctl_error(req, "Device %s is invalid.", name);
804 		return;
805 	}
806 
807 	size = gctl_get_paraml(req, "size", sizeof(*size));
808 	if (size == NULL) {
809 		gctl_error(req, "No '%s' argument", "size");
810 		return;
811 	}
812 	if ((u_int)*size != 0 && (u_int)*size < 100) {
813 		gctl_error(req, "Invalid '%s' argument", "size");
814 		return;
815 	}
816 	if ((u_int)*size != 0)
817 		sc->sc_maxent = (u_int)*size;
818 
819 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
820 	if (bsize == NULL) {
821 		gctl_error(req, "No '%s' argument", "blocksize");
822 		return;
823 	}
824 	if (*bsize < 0) {
825 		gctl_error(req, "Invalid '%s' argument", "blocksize");
826 		return;
827 	}
828 
829 	if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
830 		return;
831 
832 	strlcpy(md.md_name, name, sizeof(md.md_name));
833 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
834 	md.md_version = G_CACHE_VERSION;
835 	if ((u_int)*size != 0)
836 		md.md_size = (u_int)*size;
837 	else
838 		md.md_size = sc->sc_maxent;
839 	if ((u_int)*bsize != 0)
840 		md.md_bsize = (u_int)*bsize;
841 	else
842 		md.md_bsize = sc->sc_bsize;
843 	cp = LIST_FIRST(&sc->sc_geom->consumer);
844 	md.md_provsize = cp->provider->mediasize;
845 	error = g_cache_write_metadata(cp, &md);
846 	if (error == 0)
847 		G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
848 	else
849 		G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
850 		    cp->provider->name, error);
851 }
852 
853 static void
854 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
855 {
856 	int *nargs, *force, error, i;
857 	struct g_cache_softc *sc;
858 	const char *name;
859 	char param[16];
860 
861 	g_topology_assert();
862 
863 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
864 	if (nargs == NULL) {
865 		gctl_error(req, "No '%s' argument", "nargs");
866 		return;
867 	}
868 	if (*nargs <= 0) {
869 		gctl_error(req, "Missing device(s).");
870 		return;
871 	}
872 	force = gctl_get_paraml(req, "force", sizeof(*force));
873 	if (force == NULL) {
874 		gctl_error(req, "No 'force' argument");
875 		return;
876 	}
877 
878 	for (i = 0; i < *nargs; i++) {
879 		snprintf(param, sizeof(param), "arg%d", i);
880 		name = gctl_get_asciiparam(req, param);
881 		if (name == NULL) {
882 			gctl_error(req, "No 'arg%d' argument", i);
883 			return;
884 		}
885 		sc = g_cache_find_device(mp, name);
886 		if (sc == NULL) {
887 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
888 			gctl_error(req, "Device %s is invalid.", name);
889 			return;
890 		}
891 		error = g_cache_destroy(sc, *force);
892 		if (error != 0) {
893 			gctl_error(req, "Cannot destroy device %s (error=%d).",
894 			    sc->sc_name, error);
895 			return;
896 		}
897 	}
898 }
899 
900 static void
901 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
902 {
903 	struct g_cache_softc *sc;
904 	const char *name;
905 	char param[16];
906 	int i, *nargs;
907 
908 	g_topology_assert();
909 
910 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
911 	if (nargs == NULL) {
912 		gctl_error(req, "No '%s' argument", "nargs");
913 		return;
914 	}
915 	if (*nargs <= 0) {
916 		gctl_error(req, "Missing device(s).");
917 		return;
918 	}
919 
920 	for (i = 0; i < *nargs; i++) {
921 		snprintf(param, sizeof(param), "arg%d", i);
922 		name = gctl_get_asciiparam(req, param);
923 		if (name == NULL) {
924 			gctl_error(req, "No 'arg%d' argument", i);
925 			return;
926 		}
927 		sc = g_cache_find_device(mp, name);
928 		if (sc == NULL) {
929 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
930 			gctl_error(req, "Device %s is invalid.", name);
931 			return;
932 		}
933 		sc->sc_reads = 0;
934 		sc->sc_readbytes = 0;
935 		sc->sc_cachereads = 0;
936 		sc->sc_cachereadbytes = 0;
937 		sc->sc_cachehits = 0;
938 		sc->sc_cachemisses = 0;
939 		sc->sc_cachefull = 0;
940 		sc->sc_writes = 0;
941 		sc->sc_wrotebytes = 0;
942 	}
943 }
944 
945 static void
946 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
947 {
948 	uint32_t *version;
949 
950 	g_topology_assert();
951 
952 	version = gctl_get_paraml(req, "version", sizeof(*version));
953 	if (version == NULL) {
954 		gctl_error(req, "No '%s' argument.", "version");
955 		return;
956 	}
957 	if (*version != G_CACHE_VERSION) {
958 		gctl_error(req, "Userland and kernel parts are out of sync.");
959 		return;
960 	}
961 
962 	if (strcmp(verb, "create") == 0) {
963 		g_cache_ctl_create(req, mp);
964 		return;
965 	} else if (strcmp(verb, "configure") == 0) {
966 		g_cache_ctl_configure(req, mp);
967 		return;
968 	} else if (strcmp(verb, "destroy") == 0 ||
969 	    strcmp(verb, "stop") == 0) {
970 		g_cache_ctl_destroy(req, mp);
971 		return;
972 	} else if (strcmp(verb, "reset") == 0) {
973 		g_cache_ctl_reset(req, mp);
974 		return;
975 	}
976 
977 	gctl_error(req, "Unknown verb.");
978 }
979 
980 static void
981 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
982     struct g_consumer *cp, struct g_provider *pp)
983 {
984 	struct g_cache_softc *sc;
985 
986 	if (pp != NULL || cp != NULL)
987 		return;
988 	sc = gp->softc;
989 	sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
990 	sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
991 	sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
992 	    (uintmax_t)sc->sc_tail);
993 	sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
994 	sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
995 	    sc->sc_nused);
996 	sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
997 	    sc->sc_invalid);
998 	sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
999 	sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
1000 	    sc->sc_readbytes);
1001 	sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1002 	    sc->sc_cachereads);
1003 	sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1004 	    sc->sc_cachereadbytes);
1005 	sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1006 	    sc->sc_cachehits);
1007 	sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1008 	    sc->sc_cachemisses);
1009 	sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1010 	    sc->sc_cachefull);
1011 	sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1012 	sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1013 	    sc->sc_wrotebytes);
1014 }
1015 
1016 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1017