xref: /freebsd/sys/geom/cache/g_cache.c (revision d6eb98610fa65663bf0df4574b7cb2c5c4ffda71)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 #include <sys/time.h>
44 #include <vm/uma.h>
45 #include <geom/geom.h>
46 #include <geom/cache/g_cache.h>
47 
48 FEATURE(geom_cache, "GEOM cache module");
49 
50 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
51 
52 SYSCTL_DECL(_kern_geom);
53 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0,
54     "GEOM_CACHE stuff");
55 static u_int g_cache_debug = 0;
56 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
57     "Debug level");
58 static u_int g_cache_enable = 1;
59 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
60     "");
61 static u_int g_cache_timeout = 10;
62 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
63     0, "");
64 static u_int g_cache_idletime = 5;
65 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
66     0, "");
67 static u_int g_cache_used_lo = 5;
68 static u_int g_cache_used_hi = 20;
69 static int
70 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
71 {
72 	u_int val = *(u_int *)arg1;
73 	int error;
74 
75 	error = sysctl_handle_int(oidp, &val, 0, req);
76 	if (error || !req->newptr)
77 		return (error);
78 	if (val > 100)
79 		return (EINVAL);
80 	if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
81 	    (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
82 		return (EINVAL);
83 	*(u_int *)arg1 = val;
84 	return (0);
85 }
86 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
87 	&g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
88 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
89 	&g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
90 
91 
92 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
93 static g_ctl_destroy_geom_t g_cache_destroy_geom;
94 
95 static g_taste_t g_cache_taste;
96 static g_ctl_req_t g_cache_config;
97 static g_dumpconf_t g_cache_dumpconf;
98 
99 struct g_class g_cache_class = {
100 	.name = G_CACHE_CLASS_NAME,
101 	.version = G_VERSION,
102 	.ctlreq = g_cache_config,
103 	.taste = g_cache_taste,
104 	.destroy_geom = g_cache_destroy_geom
105 };
106 
107 #define	OFF2BNO(off, sc)	((off) >> (sc)->sc_bshift)
108 #define	BNO2OFF(bno, sc)	((bno) << (sc)->sc_bshift)
109 
110 
111 static struct g_cache_desc *
112 g_cache_alloc(struct g_cache_softc *sc)
113 {
114 	struct g_cache_desc *dp;
115 
116 	mtx_assert(&sc->sc_mtx, MA_OWNED);
117 
118 	if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
119 		dp = TAILQ_FIRST(&sc->sc_usedlist);
120 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
121 		sc->sc_nused--;
122 		dp->d_flags = 0;
123 		LIST_REMOVE(dp, d_next);
124 		return (dp);
125 	}
126 	if (sc->sc_nent > sc->sc_maxent) {
127 		sc->sc_cachefull++;
128 		return (NULL);
129 	}
130 	dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
131 	if (dp == NULL)
132 		return (NULL);
133 	dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
134 	if (dp->d_data == NULL) {
135 		free(dp, M_GCACHE);
136 		return (NULL);
137 	}
138 	sc->sc_nent++;
139 	return (dp);
140 }
141 
142 static void
143 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
144 {
145 
146 	mtx_assert(&sc->sc_mtx, MA_OWNED);
147 
148 	uma_zfree(sc->sc_zone, dp->d_data);
149 	free(dp, M_GCACHE);
150 	sc->sc_nent--;
151 }
152 
153 static void
154 g_cache_free_used(struct g_cache_softc *sc)
155 {
156 	struct g_cache_desc *dp;
157 	u_int n;
158 
159 	mtx_assert(&sc->sc_mtx, MA_OWNED);
160 
161 	n = g_cache_used_lo * sc->sc_maxent / 100;
162 	while (sc->sc_nused > n) {
163 		KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
164 		dp = TAILQ_FIRST(&sc->sc_usedlist);
165 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
166 		sc->sc_nused--;
167 		LIST_REMOVE(dp, d_next);
168 		g_cache_free(sc, dp);
169 	}
170 }
171 
172 static void
173 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
174     struct g_cache_desc *dp, int error)
175 {
176 	off_t off1, off, len;
177 
178 	mtx_assert(&sc->sc_mtx, MA_OWNED);
179 	KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
180 	KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
181 	    dp->d_bno, ("wrong entry"));
182 
183 	off1 = BNO2OFF(dp->d_bno, sc);
184 	off = MAX(bp->bio_offset, off1);
185 	len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
186 
187 	if (bp->bio_error == 0)
188 		bp->bio_error = error;
189 	if (bp->bio_error == 0) {
190 		bcopy(dp->d_data + (off - off1),
191 		    bp->bio_data + (off - bp->bio_offset), len);
192 	}
193 	bp->bio_completed += len;
194 	KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
195 	if (bp->bio_completed == bp->bio_length) {
196 		if (bp->bio_error != 0)
197 			bp->bio_completed = 0;
198 		g_io_deliver(bp, bp->bio_error);
199 	}
200 
201 	if (dp->d_flags & D_FLAG_USED) {
202 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
203 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
204 	} else if (OFF2BNO(off + len, sc) > dp->d_bno) {
205 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
206 		sc->sc_nused++;
207 		dp->d_flags |= D_FLAG_USED;
208 	}
209 	dp->d_atime = time_uptime;
210 }
211 
212 static void
213 g_cache_done(struct bio *bp)
214 {
215 	struct g_cache_softc *sc;
216 	struct g_cache_desc *dp;
217 	struct bio *bp2, *tmpbp;
218 
219 	sc = bp->bio_from->geom->softc;
220 	KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
221 	dp = G_CACHE_DESC2(bp);
222 	mtx_lock(&sc->sc_mtx);
223 	bp2 = dp->d_biolist;
224 	while (bp2 != NULL) {
225 		KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
226 		tmpbp = G_CACHE_NEXT_BIO2(bp2);
227 		g_cache_deliver(sc, bp2, dp, bp->bio_error);
228 		bp2 = tmpbp;
229 	}
230 	dp->d_biolist = NULL;
231 	if (dp->d_flags & D_FLAG_INVALID) {
232 		sc->sc_invalid--;
233 		g_cache_free(sc, dp);
234 	} else if (bp->bio_error) {
235 		LIST_REMOVE(dp, d_next);
236 		if (dp->d_flags & D_FLAG_USED) {
237 			TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
238 			sc->sc_nused--;
239 		}
240 		g_cache_free(sc, dp);
241 	}
242 	mtx_unlock(&sc->sc_mtx);
243 	g_destroy_bio(bp);
244 }
245 
246 static struct g_cache_desc *
247 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
248 {
249 	struct g_cache_desc *dp;
250 
251 	mtx_assert(&sc->sc_mtx, MA_OWNED);
252 
253 	LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
254 		if (dp->d_bno == bno)
255 			return (dp);
256 	return (NULL);
257 }
258 
259 static int
260 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
261 {
262 	struct bio *cbp;
263 	struct g_cache_desc *dp;
264 
265 	mtx_lock(&sc->sc_mtx);
266 	dp = g_cache_lookup(sc,
267 	    OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
268 	if (dp != NULL) {
269 		/* Add to waiters list or deliver. */
270 		sc->sc_cachehits++;
271 		if (dp->d_biolist != NULL) {
272 			G_CACHE_NEXT_BIO1(bp) = sc;
273 			G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
274 			dp->d_biolist = bp;
275 		} else
276 			g_cache_deliver(sc, bp, dp, 0);
277 		mtx_unlock(&sc->sc_mtx);
278 		return (0);
279 	}
280 
281 	/* Cache miss.  Allocate entry and schedule bio.  */
282 	sc->sc_cachemisses++;
283 	dp = g_cache_alloc(sc);
284 	if (dp == NULL) {
285 		mtx_unlock(&sc->sc_mtx);
286 		return (ENOMEM);
287 	}
288 	cbp = g_clone_bio(bp);
289 	if (cbp == NULL) {
290 		g_cache_free(sc, dp);
291 		mtx_unlock(&sc->sc_mtx);
292 		return (ENOMEM);
293 	}
294 
295 	dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
296 	G_CACHE_NEXT_BIO1(bp) = sc;
297 	G_CACHE_NEXT_BIO2(bp) = NULL;
298 	dp->d_biolist = bp;
299 	LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
300 	    dp, d_next);
301 	mtx_unlock(&sc->sc_mtx);
302 
303 	G_CACHE_DESC1(cbp) = sc;
304 	G_CACHE_DESC2(cbp) = dp;
305 	cbp->bio_done = g_cache_done;
306 	cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
307 	cbp->bio_data = dp->d_data;
308 	cbp->bio_length = sc->sc_bsize;
309 	g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
310 	return (0);
311 }
312 
313 static void
314 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
315 {
316 	struct g_cache_desc *dp;
317 	off_t bno, lim;
318 
319 	mtx_lock(&sc->sc_mtx);
320 	bno = OFF2BNO(bp->bio_offset, sc);
321 	lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
322 	do {
323 		if ((dp = g_cache_lookup(sc, bno)) != NULL) {
324 			LIST_REMOVE(dp, d_next);
325 			if (dp->d_flags & D_FLAG_USED) {
326 				TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
327 				sc->sc_nused--;
328 			}
329 			if (dp->d_biolist == NULL)
330 				g_cache_free(sc, dp);
331 			else {
332 				dp->d_flags = D_FLAG_INVALID;
333 				sc->sc_invalid++;
334 			}
335 		}
336 		bno++;
337 	} while (bno <= lim);
338 	mtx_unlock(&sc->sc_mtx);
339 }
340 
341 static void
342 g_cache_start(struct bio *bp)
343 {
344 	struct g_cache_softc *sc;
345 	struct g_geom *gp;
346 	struct g_cache_desc *dp;
347 	struct bio *cbp;
348 
349 	gp = bp->bio_to->geom;
350 	sc = gp->softc;
351 	G_CACHE_LOGREQ(bp, "Request received.");
352 	switch (bp->bio_cmd) {
353 	case BIO_READ:
354 		sc->sc_reads++;
355 		sc->sc_readbytes += bp->bio_length;
356 		if (!g_cache_enable)
357 			break;
358 		if (bp->bio_offset + bp->bio_length > sc->sc_tail)
359 			break;
360 		if (OFF2BNO(bp->bio_offset, sc) ==
361 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
362 			sc->sc_cachereads++;
363 			sc->sc_cachereadbytes += bp->bio_length;
364 			if (g_cache_read(sc, bp) == 0)
365 				return;
366 			sc->sc_cachereads--;
367 			sc->sc_cachereadbytes -= bp->bio_length;
368 			break;
369 		} else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
370 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
371 			mtx_lock(&sc->sc_mtx);
372 			dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
373 			if (dp == NULL || dp->d_biolist != NULL) {
374 				mtx_unlock(&sc->sc_mtx);
375 				break;
376 			}
377 			sc->sc_cachereads++;
378 			sc->sc_cachereadbytes += bp->bio_length;
379 			g_cache_deliver(sc, bp, dp, 0);
380 			mtx_unlock(&sc->sc_mtx);
381 			if (g_cache_read(sc, bp) == 0)
382 				return;
383 			sc->sc_cachereads--;
384 			sc->sc_cachereadbytes -= bp->bio_length;
385 			break;
386 		}
387 		break;
388 	case BIO_WRITE:
389 		sc->sc_writes++;
390 		sc->sc_wrotebytes += bp->bio_length;
391 		g_cache_invalidate(sc, bp);
392 		break;
393 	}
394 	cbp = g_clone_bio(bp);
395 	if (cbp == NULL) {
396 		g_io_deliver(bp, ENOMEM);
397 		return;
398 	}
399 	cbp->bio_done = g_std_done;
400 	G_CACHE_LOGREQ(cbp, "Sending request.");
401 	g_io_request(cbp, LIST_FIRST(&gp->consumer));
402 }
403 
404 static void
405 g_cache_go(void *arg)
406 {
407 	struct g_cache_softc *sc = arg;
408 	struct g_cache_desc *dp;
409 	int i;
410 
411 	mtx_assert(&sc->sc_mtx, MA_OWNED);
412 
413 	/* Forcibly mark idle ready entries as used. */
414 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
415 		LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
416 			if (dp->d_flags & D_FLAG_USED ||
417 			    dp->d_biolist != NULL ||
418 			    time_uptime - dp->d_atime < g_cache_idletime)
419 				continue;
420 			TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
421 			sc->sc_nused++;
422 			dp->d_flags |= D_FLAG_USED;
423 		}
424 	}
425 
426 	/* Keep the number of used entries low. */
427 	if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
428 		g_cache_free_used(sc);
429 
430 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
431 }
432 
433 static int
434 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
435 {
436 	struct g_geom *gp;
437 	struct g_consumer *cp;
438 	int error;
439 
440 	gp = pp->geom;
441 	cp = LIST_FIRST(&gp->consumer);
442 	error = g_access(cp, dr, dw, de);
443 
444 	return (error);
445 }
446 
447 static void
448 g_cache_orphan(struct g_consumer *cp)
449 {
450 
451 	g_topology_assert();
452 	g_cache_destroy(cp->geom->softc, 1);
453 }
454 
455 static struct g_cache_softc *
456 g_cache_find_device(struct g_class *mp, const char *name)
457 {
458 	struct g_geom *gp;
459 
460 	LIST_FOREACH(gp, &mp->geom, geom) {
461 		if (strcmp(gp->name, name) == 0)
462 			return (gp->softc);
463 	}
464 	return (NULL);
465 }
466 
467 static struct g_geom *
468 g_cache_create(struct g_class *mp, struct g_provider *pp,
469     const struct g_cache_metadata *md, u_int type)
470 {
471 	struct g_cache_softc *sc;
472 	struct g_geom *gp;
473 	struct g_provider *newpp;
474 	struct g_consumer *cp;
475 	u_int bshift;
476 	int i;
477 
478 	g_topology_assert();
479 
480 	gp = NULL;
481 	newpp = NULL;
482 	cp = NULL;
483 
484 	G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
485 
486 	/* Cache size is minimum 100. */
487 	if (md->md_size < 100) {
488 		G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
489 		return (NULL);
490 	}
491 
492 	/* Block size restrictions. */
493 	bshift = ffs(md->md_bsize) - 1;
494 	if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
495 	    md->md_bsize != 1 << bshift ||
496 	    (md->md_bsize % pp->sectorsize) != 0) {
497 		G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
498 		return (NULL);
499 	}
500 
501 	/* Check for duplicate unit. */
502 	if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
503 		G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
504 		return (NULL);
505 	}
506 
507 	gp = g_new_geomf(mp, "%s", md->md_name);
508 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
509 	sc->sc_type = type;
510 	sc->sc_bshift = bshift;
511 	sc->sc_bsize = 1 << bshift;
512 	sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
513 	    UMA_ALIGN_PTR, 0);
514 	mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
515 	for (i = 0; i < G_CACHE_BUCKETS; i++)
516 		LIST_INIT(&sc->sc_desclist[i]);
517 	TAILQ_INIT(&sc->sc_usedlist);
518 	sc->sc_maxent = md->md_size;
519 	callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
520 	gp->softc = sc;
521 	sc->sc_geom = gp;
522 	gp->start = g_cache_start;
523 	gp->orphan = g_cache_orphan;
524 	gp->access = g_cache_access;
525 	gp->dumpconf = g_cache_dumpconf;
526 
527 	newpp = g_new_providerf(gp, "cache/%s", gp->name);
528 	newpp->sectorsize = pp->sectorsize;
529 	newpp->mediasize = pp->mediasize;
530 	if (type == G_CACHE_TYPE_AUTOMATIC)
531 		newpp->mediasize -= pp->sectorsize;
532 	sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
533 
534 	cp = g_new_consumer(gp);
535 	if (g_attach(cp, pp) != 0) {
536 		G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
537 		g_destroy_consumer(cp);
538 		g_destroy_provider(newpp);
539 		mtx_destroy(&sc->sc_mtx);
540 		g_free(sc);
541 		g_destroy_geom(gp);
542 		return (NULL);
543 	}
544 
545 	g_error_provider(newpp, 0);
546 	G_CACHE_DEBUG(0, "Device %s created.", gp->name);
547 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
548 	return (gp);
549 }
550 
551 static int
552 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
553 {
554 	struct g_geom *gp;
555 	struct g_provider *pp;
556 	struct g_cache_desc *dp, *dp2;
557 	int i;
558 
559 	g_topology_assert();
560 	if (sc == NULL)
561 		return (ENXIO);
562 	gp = sc->sc_geom;
563 	pp = LIST_FIRST(&gp->provider);
564 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
565 		if (force) {
566 			G_CACHE_DEBUG(0, "Device %s is still open, so it "
567 			    "can't be definitely removed.", pp->name);
568 		} else {
569 			G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
570 			    pp->name, pp->acr, pp->acw, pp->ace);
571 			return (EBUSY);
572 		}
573 	} else {
574 		G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
575 	}
576 	callout_drain(&sc->sc_callout);
577 	mtx_lock(&sc->sc_mtx);
578 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
579 		dp = LIST_FIRST(&sc->sc_desclist[i]);
580 		while (dp != NULL) {
581 			dp2 = LIST_NEXT(dp, d_next);
582 			g_cache_free(sc, dp);
583 			dp = dp2;
584 		}
585 	}
586 	mtx_unlock(&sc->sc_mtx);
587 	mtx_destroy(&sc->sc_mtx);
588 	uma_zdestroy(sc->sc_zone);
589 	g_free(sc);
590 	gp->softc = NULL;
591 	g_wither_geom(gp, ENXIO);
592 
593 	return (0);
594 }
595 
596 static int
597 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
598 {
599 
600 	return (g_cache_destroy(gp->softc, 0));
601 }
602 
603 static int
604 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
605 {
606 	struct g_provider *pp;
607 	u_char *buf;
608 	int error;
609 
610 	g_topology_assert();
611 
612 	error = g_access(cp, 1, 0, 0);
613 	if (error != 0)
614 		return (error);
615 	pp = cp->provider;
616 	g_topology_unlock();
617 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
618 	    &error);
619 	g_topology_lock();
620 	g_access(cp, -1, 0, 0);
621 	if (buf == NULL)
622 		return (error);
623 
624 	/* Decode metadata. */
625 	cache_metadata_decode(buf, md);
626 	g_free(buf);
627 
628 	return (0);
629 }
630 
631 static int
632 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
633 {
634 	struct g_provider *pp;
635 	u_char *buf;
636 	int error;
637 
638 	g_topology_assert();
639 
640 	error = g_access(cp, 0, 1, 0);
641 	if (error != 0)
642 		return (error);
643 	pp = cp->provider;
644 	buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
645 	cache_metadata_encode(md, buf);
646 	g_topology_unlock();
647 	error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
648 	g_topology_lock();
649 	g_access(cp, 0, -1, 0);
650 	free(buf, M_GCACHE);
651 
652 	return (error);
653 }
654 
655 static struct g_geom *
656 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
657 {
658 	struct g_cache_metadata md;
659 	struct g_consumer *cp;
660 	struct g_geom *gp;
661 	int error;
662 
663 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
664 	g_topology_assert();
665 
666 	G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
667 
668 	gp = g_new_geomf(mp, "cache:taste");
669 	gp->start = g_cache_start;
670 	gp->orphan = g_cache_orphan;
671 	gp->access = g_cache_access;
672 	cp = g_new_consumer(gp);
673 	g_attach(cp, pp);
674 	error = g_cache_read_metadata(cp, &md);
675 	g_detach(cp);
676 	g_destroy_consumer(cp);
677 	g_destroy_geom(gp);
678 	if (error != 0)
679 		return (NULL);
680 
681 	if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
682 		return (NULL);
683 	if (md.md_version > G_CACHE_VERSION) {
684 		printf("geom_cache.ko module is too old to handle %s.\n",
685 		    pp->name);
686 		return (NULL);
687 	}
688 	if (md.md_provsize != pp->mediasize)
689 		return (NULL);
690 
691 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
692 	if (gp == NULL) {
693 		G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
694 		return (NULL);
695 	}
696 	return (gp);
697 }
698 
699 static void
700 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
701 {
702 	struct g_cache_metadata md;
703 	struct g_provider *pp;
704 	struct g_geom *gp;
705 	intmax_t *bsize, *size;
706 	const char *name;
707 	int *nargs;
708 
709 	g_topology_assert();
710 
711 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
712 	if (nargs == NULL) {
713 		gctl_error(req, "No '%s' argument", "nargs");
714 		return;
715 	}
716 	if (*nargs != 2) {
717 		gctl_error(req, "Invalid number of arguments.");
718 		return;
719 	}
720 
721 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
722 	md.md_version = G_CACHE_VERSION;
723 	name = gctl_get_asciiparam(req, "arg0");
724 	if (name == NULL) {
725 		gctl_error(req, "No 'arg0' argument");
726 		return;
727 	}
728 	strlcpy(md.md_name, name, sizeof(md.md_name));
729 
730 	size = gctl_get_paraml(req, "size", sizeof(*size));
731 	if (size == NULL) {
732 		gctl_error(req, "No '%s' argument", "size");
733 		return;
734 	}
735 	if ((u_int)*size < 100) {
736 		gctl_error(req, "Invalid '%s' argument", "size");
737 		return;
738 	}
739 	md.md_size = (u_int)*size;
740 
741 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
742 	if (bsize == NULL) {
743 		gctl_error(req, "No '%s' argument", "blocksize");
744 		return;
745 	}
746 	if (*bsize < 0) {
747 		gctl_error(req, "Invalid '%s' argument", "blocksize");
748 		return;
749 	}
750 	md.md_bsize = (u_int)*bsize;
751 
752 	/* This field is not important here. */
753 	md.md_provsize = 0;
754 
755 	name = gctl_get_asciiparam(req, "arg1");
756 	if (name == NULL) {
757 		gctl_error(req, "No 'arg1' argument");
758 		return;
759 	}
760 	if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
761 		name += strlen("/dev/");
762 	pp = g_provider_by_name(name);
763 	if (pp == NULL) {
764 		G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
765 		gctl_error(req, "Provider %s is invalid.", name);
766 		return;
767 	}
768 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
769 	if (gp == NULL) {
770 		gctl_error(req, "Can't create %s.", md.md_name);
771 		return;
772 	}
773 }
774 
775 static void
776 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
777 {
778 	struct g_cache_metadata md;
779 	struct g_cache_softc *sc;
780 	struct g_consumer *cp;
781 	intmax_t *bsize, *size;
782 	const char *name;
783 	int error, *nargs;
784 
785 	g_topology_assert();
786 
787 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
788 	if (nargs == NULL) {
789 		gctl_error(req, "No '%s' argument", "nargs");
790 		return;
791 	}
792 	if (*nargs != 1) {
793 		gctl_error(req, "Missing device.");
794 		return;
795 	}
796 
797 	name = gctl_get_asciiparam(req, "arg0");
798 	if (name == NULL) {
799 		gctl_error(req, "No 'arg0' argument");
800 		return;
801 	}
802 	sc = g_cache_find_device(mp, name);
803 	if (sc == NULL) {
804 		G_CACHE_DEBUG(1, "Device %s is invalid.", name);
805 		gctl_error(req, "Device %s is invalid.", name);
806 		return;
807 	}
808 
809 	size = gctl_get_paraml(req, "size", sizeof(*size));
810 	if (size == NULL) {
811 		gctl_error(req, "No '%s' argument", "size");
812 		return;
813 	}
814 	if ((u_int)*size != 0 && (u_int)*size < 100) {
815 		gctl_error(req, "Invalid '%s' argument", "size");
816 		return;
817 	}
818 	if ((u_int)*size != 0)
819 		sc->sc_maxent = (u_int)*size;
820 
821 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
822 	if (bsize == NULL) {
823 		gctl_error(req, "No '%s' argument", "blocksize");
824 		return;
825 	}
826 	if (*bsize < 0) {
827 		gctl_error(req, "Invalid '%s' argument", "blocksize");
828 		return;
829 	}
830 
831 	if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
832 		return;
833 
834 	strlcpy(md.md_name, name, sizeof(md.md_name));
835 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
836 	md.md_version = G_CACHE_VERSION;
837 	if ((u_int)*size != 0)
838 		md.md_size = (u_int)*size;
839 	else
840 		md.md_size = sc->sc_maxent;
841 	if ((u_int)*bsize != 0)
842 		md.md_bsize = (u_int)*bsize;
843 	else
844 		md.md_bsize = sc->sc_bsize;
845 	cp = LIST_FIRST(&sc->sc_geom->consumer);
846 	md.md_provsize = cp->provider->mediasize;
847 	error = g_cache_write_metadata(cp, &md);
848 	if (error == 0)
849 		G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
850 	else
851 		G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
852 		    cp->provider->name, error);
853 }
854 
855 static void
856 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
857 {
858 	int *nargs, *force, error, i;
859 	struct g_cache_softc *sc;
860 	const char *name;
861 	char param[16];
862 
863 	g_topology_assert();
864 
865 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
866 	if (nargs == NULL) {
867 		gctl_error(req, "No '%s' argument", "nargs");
868 		return;
869 	}
870 	if (*nargs <= 0) {
871 		gctl_error(req, "Missing device(s).");
872 		return;
873 	}
874 	force = gctl_get_paraml(req, "force", sizeof(*force));
875 	if (force == NULL) {
876 		gctl_error(req, "No 'force' argument");
877 		return;
878 	}
879 
880 	for (i = 0; i < *nargs; i++) {
881 		snprintf(param, sizeof(param), "arg%d", i);
882 		name = gctl_get_asciiparam(req, param);
883 		if (name == NULL) {
884 			gctl_error(req, "No 'arg%d' argument", i);
885 			return;
886 		}
887 		sc = g_cache_find_device(mp, name);
888 		if (sc == NULL) {
889 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
890 			gctl_error(req, "Device %s is invalid.", name);
891 			return;
892 		}
893 		error = g_cache_destroy(sc, *force);
894 		if (error != 0) {
895 			gctl_error(req, "Cannot destroy device %s (error=%d).",
896 			    sc->sc_name, error);
897 			return;
898 		}
899 	}
900 }
901 
902 static void
903 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
904 {
905 	struct g_cache_softc *sc;
906 	const char *name;
907 	char param[16];
908 	int i, *nargs;
909 
910 	g_topology_assert();
911 
912 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
913 	if (nargs == NULL) {
914 		gctl_error(req, "No '%s' argument", "nargs");
915 		return;
916 	}
917 	if (*nargs <= 0) {
918 		gctl_error(req, "Missing device(s).");
919 		return;
920 	}
921 
922 	for (i = 0; i < *nargs; i++) {
923 		snprintf(param, sizeof(param), "arg%d", i);
924 		name = gctl_get_asciiparam(req, param);
925 		if (name == NULL) {
926 			gctl_error(req, "No 'arg%d' argument", i);
927 			return;
928 		}
929 		sc = g_cache_find_device(mp, name);
930 		if (sc == NULL) {
931 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
932 			gctl_error(req, "Device %s is invalid.", name);
933 			return;
934 		}
935 		sc->sc_reads = 0;
936 		sc->sc_readbytes = 0;
937 		sc->sc_cachereads = 0;
938 		sc->sc_cachereadbytes = 0;
939 		sc->sc_cachehits = 0;
940 		sc->sc_cachemisses = 0;
941 		sc->sc_cachefull = 0;
942 		sc->sc_writes = 0;
943 		sc->sc_wrotebytes = 0;
944 	}
945 }
946 
947 static void
948 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
949 {
950 	uint32_t *version;
951 
952 	g_topology_assert();
953 
954 	version = gctl_get_paraml(req, "version", sizeof(*version));
955 	if (version == NULL) {
956 		gctl_error(req, "No '%s' argument.", "version");
957 		return;
958 	}
959 	if (*version != G_CACHE_VERSION) {
960 		gctl_error(req, "Userland and kernel parts are out of sync.");
961 		return;
962 	}
963 
964 	if (strcmp(verb, "create") == 0) {
965 		g_cache_ctl_create(req, mp);
966 		return;
967 	} else if (strcmp(verb, "configure") == 0) {
968 		g_cache_ctl_configure(req, mp);
969 		return;
970 	} else if (strcmp(verb, "destroy") == 0 ||
971 	    strcmp(verb, "stop") == 0) {
972 		g_cache_ctl_destroy(req, mp);
973 		return;
974 	} else if (strcmp(verb, "reset") == 0) {
975 		g_cache_ctl_reset(req, mp);
976 		return;
977 	}
978 
979 	gctl_error(req, "Unknown verb.");
980 }
981 
982 static void
983 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
984     struct g_consumer *cp, struct g_provider *pp)
985 {
986 	struct g_cache_softc *sc;
987 
988 	if (pp != NULL || cp != NULL)
989 		return;
990 	sc = gp->softc;
991 	sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
992 	sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
993 	sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
994 	    (uintmax_t)sc->sc_tail);
995 	sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
996 	sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
997 	    sc->sc_nused);
998 	sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
999 	    sc->sc_invalid);
1000 	sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
1001 	sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
1002 	    sc->sc_readbytes);
1003 	sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1004 	    sc->sc_cachereads);
1005 	sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1006 	    sc->sc_cachereadbytes);
1007 	sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1008 	    sc->sc_cachehits);
1009 	sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1010 	    sc->sc_cachemisses);
1011 	sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1012 	    sc->sc_cachefull);
1013 	sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1014 	sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1015 	    sc->sc_wrotebytes);
1016 }
1017 
1018 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1019 MODULE_VERSION(geom_cache, 0);
1020