xref: /freebsd/sys/geom/cache/g_cache.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/module.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/bio.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
38 #include <sys/queue.h>
39 #include <sys/sbuf.h>
40 #include <sys/time.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <geom/geom_dbg.h>
44 #include <geom/cache/g_cache.h>
45 
46 FEATURE(geom_cache, "GEOM cache module");
47 
48 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
49 
50 SYSCTL_DECL(_kern_geom);
51 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
52     "GEOM_CACHE stuff");
53 static u_int g_cache_debug = 0;
54 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
55     "Debug level");
56 static u_int g_cache_enable = 1;
57 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
58     "");
59 static u_int g_cache_timeout = 10;
60 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
61     0, "");
62 static u_int g_cache_idletime = 5;
63 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
64     0, "");
65 static u_int g_cache_used_lo = 5;
66 static u_int g_cache_used_hi = 20;
67 static int
sysctl_handle_pct(SYSCTL_HANDLER_ARGS)68 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
69 {
70 	u_int val = *(u_int *)arg1;
71 	int error;
72 
73 	error = sysctl_handle_int(oidp, &val, 0, req);
74 	if (error || !req->newptr)
75 		return (error);
76 	if (val > 100)
77 		return (EINVAL);
78 	if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
79 	    (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
80 		return (EINVAL);
81 	*(u_int *)arg1 = val;
82 	return (0);
83 }
84 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo,
85     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &g_cache_used_lo, 0,
86     sysctl_handle_pct, "IU",
87     "");
88 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi,
89     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &g_cache_used_hi, 0,
90     sysctl_handle_pct, "IU",
91     "");
92 
93 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
94 static g_ctl_destroy_geom_t g_cache_destroy_geom;
95 
96 static g_taste_t g_cache_taste;
97 static g_ctl_req_t g_cache_config;
98 static g_dumpconf_t g_cache_dumpconf;
99 
100 struct g_class g_cache_class = {
101 	.name = G_CACHE_CLASS_NAME,
102 	.version = G_VERSION,
103 	.ctlreq = g_cache_config,
104 	.taste = g_cache_taste,
105 	.destroy_geom = g_cache_destroy_geom
106 };
107 
108 #define	OFF2BNO(off, sc)	((off) >> (sc)->sc_bshift)
109 #define	BNO2OFF(bno, sc)	((bno) << (sc)->sc_bshift)
110 
111 static struct g_cache_desc *
g_cache_alloc(struct g_cache_softc * sc)112 g_cache_alloc(struct g_cache_softc *sc)
113 {
114 	struct g_cache_desc *dp;
115 
116 	mtx_assert(&sc->sc_mtx, MA_OWNED);
117 
118 	if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
119 		dp = TAILQ_FIRST(&sc->sc_usedlist);
120 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
121 		sc->sc_nused--;
122 		dp->d_flags = 0;
123 		LIST_REMOVE(dp, d_next);
124 		return (dp);
125 	}
126 	if (sc->sc_nent > sc->sc_maxent) {
127 		sc->sc_cachefull++;
128 		return (NULL);
129 	}
130 	dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
131 	if (dp == NULL)
132 		return (NULL);
133 	dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
134 	if (dp->d_data == NULL) {
135 		free(dp, M_GCACHE);
136 		return (NULL);
137 	}
138 	sc->sc_nent++;
139 	return (dp);
140 }
141 
142 static void
g_cache_free(struct g_cache_softc * sc,struct g_cache_desc * dp)143 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
144 {
145 
146 	mtx_assert(&sc->sc_mtx, MA_OWNED);
147 
148 	uma_zfree(sc->sc_zone, dp->d_data);
149 	free(dp, M_GCACHE);
150 	sc->sc_nent--;
151 }
152 
153 static void
g_cache_free_used(struct g_cache_softc * sc)154 g_cache_free_used(struct g_cache_softc *sc)
155 {
156 	struct g_cache_desc *dp;
157 	u_int n;
158 
159 	mtx_assert(&sc->sc_mtx, MA_OWNED);
160 
161 	n = g_cache_used_lo * sc->sc_maxent / 100;
162 	while (sc->sc_nused > n) {
163 		KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
164 		dp = TAILQ_FIRST(&sc->sc_usedlist);
165 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
166 		sc->sc_nused--;
167 		LIST_REMOVE(dp, d_next);
168 		g_cache_free(sc, dp);
169 	}
170 }
171 
172 static void
g_cache_deliver(struct g_cache_softc * sc,struct bio * bp,struct g_cache_desc * dp,int error)173 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
174     struct g_cache_desc *dp, int error)
175 {
176 	off_t off1, off, len;
177 
178 	mtx_assert(&sc->sc_mtx, MA_OWNED);
179 	KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
180 	KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
181 	    dp->d_bno, ("wrong entry"));
182 
183 	off1 = BNO2OFF(dp->d_bno, sc);
184 	off = MAX(bp->bio_offset, off1);
185 	len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
186 
187 	if (bp->bio_error == 0)
188 		bp->bio_error = error;
189 	if (bp->bio_error == 0) {
190 		bcopy(dp->d_data + (off - off1),
191 		    bp->bio_data + (off - bp->bio_offset), len);
192 	}
193 	bp->bio_completed += len;
194 	KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
195 	if (bp->bio_completed == bp->bio_length) {
196 		if (bp->bio_error != 0)
197 			bp->bio_completed = 0;
198 		g_io_deliver(bp, bp->bio_error);
199 	}
200 
201 	if (dp->d_flags & D_FLAG_USED) {
202 		TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
203 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
204 	} else if (OFF2BNO(off + len, sc) > dp->d_bno) {
205 		TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
206 		sc->sc_nused++;
207 		dp->d_flags |= D_FLAG_USED;
208 	}
209 	dp->d_atime = time_uptime;
210 }
211 
212 static void
g_cache_done(struct bio * bp)213 g_cache_done(struct bio *bp)
214 {
215 	struct g_cache_softc *sc;
216 	struct g_cache_desc *dp;
217 	struct bio *bp2, *tmpbp;
218 
219 	sc = bp->bio_from->geom->softc;
220 	KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
221 	dp = G_CACHE_DESC2(bp);
222 	mtx_lock(&sc->sc_mtx);
223 	bp2 = dp->d_biolist;
224 	while (bp2 != NULL) {
225 		KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
226 		tmpbp = G_CACHE_NEXT_BIO2(bp2);
227 		g_cache_deliver(sc, bp2, dp, bp->bio_error);
228 		bp2 = tmpbp;
229 	}
230 	dp->d_biolist = NULL;
231 	if (dp->d_flags & D_FLAG_INVALID) {
232 		sc->sc_invalid--;
233 		g_cache_free(sc, dp);
234 	} else if (bp->bio_error) {
235 		LIST_REMOVE(dp, d_next);
236 		if (dp->d_flags & D_FLAG_USED) {
237 			TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
238 			sc->sc_nused--;
239 		}
240 		g_cache_free(sc, dp);
241 	}
242 	mtx_unlock(&sc->sc_mtx);
243 	g_destroy_bio(bp);
244 }
245 
246 static struct g_cache_desc *
g_cache_lookup(struct g_cache_softc * sc,off_t bno)247 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
248 {
249 	struct g_cache_desc *dp;
250 
251 	mtx_assert(&sc->sc_mtx, MA_OWNED);
252 
253 	LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
254 		if (dp->d_bno == bno)
255 			return (dp);
256 	return (NULL);
257 }
258 
259 static int
g_cache_read(struct g_cache_softc * sc,struct bio * bp)260 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
261 {
262 	struct bio *cbp;
263 	struct g_cache_desc *dp;
264 
265 	mtx_lock(&sc->sc_mtx);
266 	dp = g_cache_lookup(sc,
267 	    OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
268 	if (dp != NULL) {
269 		/* Add to waiters list or deliver. */
270 		sc->sc_cachehits++;
271 		if (dp->d_biolist != NULL) {
272 			G_CACHE_NEXT_BIO1(bp) = sc;
273 			G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
274 			dp->d_biolist = bp;
275 		} else
276 			g_cache_deliver(sc, bp, dp, 0);
277 		mtx_unlock(&sc->sc_mtx);
278 		return (0);
279 	}
280 
281 	/* Cache miss.  Allocate entry and schedule bio.  */
282 	sc->sc_cachemisses++;
283 	dp = g_cache_alloc(sc);
284 	if (dp == NULL) {
285 		mtx_unlock(&sc->sc_mtx);
286 		return (ENOMEM);
287 	}
288 	cbp = g_clone_bio(bp);
289 	if (cbp == NULL) {
290 		g_cache_free(sc, dp);
291 		mtx_unlock(&sc->sc_mtx);
292 		return (ENOMEM);
293 	}
294 
295 	dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
296 	G_CACHE_NEXT_BIO1(bp) = sc;
297 	G_CACHE_NEXT_BIO2(bp) = NULL;
298 	dp->d_biolist = bp;
299 	LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
300 	    dp, d_next);
301 	mtx_unlock(&sc->sc_mtx);
302 
303 	G_CACHE_DESC1(cbp) = sc;
304 	G_CACHE_DESC2(cbp) = dp;
305 	cbp->bio_done = g_cache_done;
306 	cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
307 	cbp->bio_data = dp->d_data;
308 	cbp->bio_length = sc->sc_bsize;
309 	g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
310 	return (0);
311 }
312 
313 static void
g_cache_invalidate(struct g_cache_softc * sc,struct bio * bp)314 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
315 {
316 	struct g_cache_desc *dp;
317 	off_t bno, lim;
318 
319 	mtx_lock(&sc->sc_mtx);
320 	bno = OFF2BNO(bp->bio_offset, sc);
321 	lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
322 	do {
323 		if ((dp = g_cache_lookup(sc, bno)) != NULL) {
324 			LIST_REMOVE(dp, d_next);
325 			if (dp->d_flags & D_FLAG_USED) {
326 				TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
327 				sc->sc_nused--;
328 			}
329 			if (dp->d_biolist == NULL)
330 				g_cache_free(sc, dp);
331 			else {
332 				dp->d_flags = D_FLAG_INVALID;
333 				sc->sc_invalid++;
334 			}
335 		}
336 		bno++;
337 	} while (bno <= lim);
338 	mtx_unlock(&sc->sc_mtx);
339 }
340 
341 static void
g_cache_start(struct bio * bp)342 g_cache_start(struct bio *bp)
343 {
344 	struct g_cache_softc *sc;
345 	struct g_geom *gp;
346 	struct g_cache_desc *dp;
347 	struct bio *cbp;
348 
349 	gp = bp->bio_to->geom;
350 	sc = gp->softc;
351 	G_CACHE_LOGREQ(bp, "Request received.");
352 	switch (bp->bio_cmd) {
353 	case BIO_READ:
354 		sc->sc_reads++;
355 		sc->sc_readbytes += bp->bio_length;
356 		if (!g_cache_enable)
357 			break;
358 		if (bp->bio_offset + bp->bio_length > sc->sc_tail)
359 			break;
360 		if (OFF2BNO(bp->bio_offset, sc) ==
361 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
362 			sc->sc_cachereads++;
363 			sc->sc_cachereadbytes += bp->bio_length;
364 			if (g_cache_read(sc, bp) == 0)
365 				return;
366 			sc->sc_cachereads--;
367 			sc->sc_cachereadbytes -= bp->bio_length;
368 			break;
369 		} else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
370 		    OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
371 			mtx_lock(&sc->sc_mtx);
372 			dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
373 			if (dp == NULL || dp->d_biolist != NULL) {
374 				mtx_unlock(&sc->sc_mtx);
375 				break;
376 			}
377 			sc->sc_cachereads++;
378 			sc->sc_cachereadbytes += bp->bio_length;
379 			g_cache_deliver(sc, bp, dp, 0);
380 			mtx_unlock(&sc->sc_mtx);
381 			if (g_cache_read(sc, bp) == 0)
382 				return;
383 			sc->sc_cachereads--;
384 			sc->sc_cachereadbytes -= bp->bio_length;
385 			break;
386 		}
387 		break;
388 	case BIO_WRITE:
389 		sc->sc_writes++;
390 		sc->sc_wrotebytes += bp->bio_length;
391 		g_cache_invalidate(sc, bp);
392 		break;
393 	}
394 	cbp = g_clone_bio(bp);
395 	if (cbp == NULL) {
396 		g_io_deliver(bp, ENOMEM);
397 		return;
398 	}
399 	cbp->bio_done = g_std_done;
400 	G_CACHE_LOGREQ(cbp, "Sending request.");
401 	g_io_request(cbp, LIST_FIRST(&gp->consumer));
402 }
403 
404 static void
g_cache_go(void * arg)405 g_cache_go(void *arg)
406 {
407 	struct g_cache_softc *sc = arg;
408 	struct g_cache_desc *dp;
409 	int i;
410 
411 	mtx_assert(&sc->sc_mtx, MA_OWNED);
412 
413 	/* Forcibly mark idle ready entries as used. */
414 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
415 		LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
416 			if (dp->d_flags & D_FLAG_USED ||
417 			    dp->d_biolist != NULL ||
418 			    time_uptime - dp->d_atime < g_cache_idletime)
419 				continue;
420 			TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
421 			sc->sc_nused++;
422 			dp->d_flags |= D_FLAG_USED;
423 		}
424 	}
425 
426 	/* Keep the number of used entries low. */
427 	if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
428 		g_cache_free_used(sc);
429 
430 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
431 }
432 
433 static int
g_cache_access(struct g_provider * pp,int dr,int dw,int de)434 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
435 {
436 	struct g_geom *gp;
437 	struct g_consumer *cp;
438 	int error;
439 
440 	gp = pp->geom;
441 	cp = LIST_FIRST(&gp->consumer);
442 	error = g_access(cp, dr, dw, de);
443 
444 	return (error);
445 }
446 
447 static void
g_cache_orphan(struct g_consumer * cp)448 g_cache_orphan(struct g_consumer *cp)
449 {
450 
451 	g_topology_assert();
452 	g_cache_destroy(cp->geom->softc, 1);
453 }
454 
455 static struct g_cache_softc *
g_cache_find_device(struct g_class * mp,const char * name)456 g_cache_find_device(struct g_class *mp, const char *name)
457 {
458 	struct g_geom *gp;
459 
460 	LIST_FOREACH(gp, &mp->geom, geom) {
461 		if (strcmp(gp->name, name) == 0)
462 			return (gp->softc);
463 	}
464 	return (NULL);
465 }
466 
467 static struct g_geom *
g_cache_create(struct g_class * mp,struct g_provider * pp,const struct g_cache_metadata * md,u_int type)468 g_cache_create(struct g_class *mp, struct g_provider *pp,
469     const struct g_cache_metadata *md, u_int type)
470 {
471 	struct g_cache_softc *sc;
472 	struct g_geom *gp;
473 	struct g_provider *newpp;
474 	struct g_consumer *cp;
475 	u_int bshift;
476 	int i;
477 
478 	g_topology_assert();
479 
480 	gp = NULL;
481 	newpp = NULL;
482 	cp = NULL;
483 
484 	G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
485 
486 	/* Cache size is minimum 100. */
487 	if (md->md_size < 100) {
488 		G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
489 		return (NULL);
490 	}
491 
492 	/* Block size restrictions. */
493 	bshift = ffs(md->md_bsize) - 1;
494 	if (md->md_bsize == 0 || md->md_bsize > maxphys ||
495 	    md->md_bsize != 1 << bshift ||
496 	    (md->md_bsize % pp->sectorsize) != 0) {
497 		G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
498 		return (NULL);
499 	}
500 
501 	/* Check for duplicate unit. */
502 	if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
503 		G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
504 		return (NULL);
505 	}
506 
507 	gp = g_new_geomf(mp, "%s", md->md_name);
508 	sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
509 	sc->sc_type = type;
510 	sc->sc_bshift = bshift;
511 	sc->sc_bsize = 1 << bshift;
512 	sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
513 	    UMA_ALIGN_PTR, 0);
514 	mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
515 	for (i = 0; i < G_CACHE_BUCKETS; i++)
516 		LIST_INIT(&sc->sc_desclist[i]);
517 	TAILQ_INIT(&sc->sc_usedlist);
518 	sc->sc_maxent = md->md_size;
519 	callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
520 	gp->softc = sc;
521 	sc->sc_geom = gp;
522 	gp->start = g_cache_start;
523 	gp->orphan = g_cache_orphan;
524 	gp->access = g_cache_access;
525 	gp->dumpconf = g_cache_dumpconf;
526 
527 	newpp = g_new_providerf(gp, "cache/%s", gp->name);
528 	newpp->sectorsize = pp->sectorsize;
529 	newpp->mediasize = pp->mediasize;
530 	if (type == G_CACHE_TYPE_AUTOMATIC)
531 		newpp->mediasize -= pp->sectorsize;
532 	sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
533 
534 	cp = g_new_consumer(gp);
535 	if (g_attach(cp, pp) != 0) {
536 		G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
537 		g_destroy_consumer(cp);
538 		g_destroy_provider(newpp);
539 		mtx_destroy(&sc->sc_mtx);
540 		g_free(sc);
541 		g_destroy_geom(gp);
542 		return (NULL);
543 	}
544 
545 	g_error_provider(newpp, 0);
546 	G_CACHE_DEBUG(0, "Device %s created.", gp->name);
547 	callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
548 	return (gp);
549 }
550 
551 static int
g_cache_destroy(struct g_cache_softc * sc,boolean_t force)552 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
553 {
554 	struct g_geom *gp;
555 	struct g_provider *pp;
556 	struct g_cache_desc *dp, *dp2;
557 	int i;
558 
559 	g_topology_assert();
560 	if (sc == NULL)
561 		return (ENXIO);
562 	gp = sc->sc_geom;
563 	pp = LIST_FIRST(&gp->provider);
564 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
565 		if (force) {
566 			G_CACHE_DEBUG(0, "Device %s is still open, so it "
567 			    "can't be definitely removed.", pp->name);
568 		} else {
569 			G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
570 			    pp->name, pp->acr, pp->acw, pp->ace);
571 			return (EBUSY);
572 		}
573 	} else {
574 		G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
575 	}
576 	callout_drain(&sc->sc_callout);
577 	mtx_lock(&sc->sc_mtx);
578 	for (i = 0; i < G_CACHE_BUCKETS; i++) {
579 		dp = LIST_FIRST(&sc->sc_desclist[i]);
580 		while (dp != NULL) {
581 			dp2 = LIST_NEXT(dp, d_next);
582 			g_cache_free(sc, dp);
583 			dp = dp2;
584 		}
585 	}
586 	mtx_unlock(&sc->sc_mtx);
587 	mtx_destroy(&sc->sc_mtx);
588 	uma_zdestroy(sc->sc_zone);
589 	g_free(sc);
590 	gp->softc = NULL;
591 	g_wither_geom(gp, ENXIO);
592 
593 	return (0);
594 }
595 
596 static int
g_cache_destroy_geom(struct gctl_req * req,struct g_class * mp,struct g_geom * gp)597 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
598 {
599 
600 	return (g_cache_destroy(gp->softc, 0));
601 }
602 
603 static int
g_cache_read_metadata(struct g_consumer * cp,struct g_cache_metadata * md)604 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
605 {
606 	struct g_provider *pp;
607 	u_char *buf;
608 	int error;
609 
610 	g_topology_assert();
611 
612 	error = g_access(cp, 1, 0, 0);
613 	if (error != 0)
614 		return (error);
615 	pp = cp->provider;
616 	g_topology_unlock();
617 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
618 	    &error);
619 	g_topology_lock();
620 	g_access(cp, -1, 0, 0);
621 	if (buf == NULL)
622 		return (error);
623 
624 	/* Decode metadata. */
625 	cache_metadata_decode(buf, md);
626 	g_free(buf);
627 
628 	return (0);
629 }
630 
631 static int
g_cache_write_metadata(struct g_consumer * cp,struct g_cache_metadata * md)632 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
633 {
634 	struct g_provider *pp;
635 	u_char *buf;
636 	int error;
637 
638 	g_topology_assert();
639 
640 	error = g_access(cp, 0, 1, 0);
641 	if (error != 0)
642 		return (error);
643 	pp = cp->provider;
644 	buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
645 	cache_metadata_encode(md, buf);
646 	g_topology_unlock();
647 	error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
648 	g_topology_lock();
649 	g_access(cp, 0, -1, 0);
650 	free(buf, M_GCACHE);
651 
652 	return (error);
653 }
654 
655 static struct g_geom *
g_cache_taste(struct g_class * mp,struct g_provider * pp,int flags __unused)656 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
657 {
658 	struct g_cache_metadata md;
659 	struct g_consumer *cp;
660 	struct g_geom *gp;
661 	int error;
662 
663 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
664 	g_topology_assert();
665 
666 	G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
667 
668 	gp = g_new_geomf(mp, "cache:taste");
669 	gp->start = g_cache_start;
670 	gp->orphan = g_cache_orphan;
671 	gp->access = g_cache_access;
672 	cp = g_new_consumer(gp);
673 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
674 	error = g_attach(cp, pp);
675 	if (error == 0) {
676 		error = g_cache_read_metadata(cp, &md);
677 		g_detach(cp);
678 	}
679 	g_destroy_consumer(cp);
680 	g_destroy_geom(gp);
681 	if (error != 0)
682 		return (NULL);
683 
684 	if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
685 		return (NULL);
686 	if (md.md_version > G_CACHE_VERSION) {
687 		printf("geom_cache.ko module is too old to handle %s.\n",
688 		    pp->name);
689 		return (NULL);
690 	}
691 	if (md.md_provsize != pp->mediasize)
692 		return (NULL);
693 
694 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
695 	if (gp == NULL) {
696 		G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
697 		return (NULL);
698 	}
699 	return (gp);
700 }
701 
702 static void
g_cache_ctl_create(struct gctl_req * req,struct g_class * mp)703 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
704 {
705 	struct g_cache_metadata md;
706 	struct g_provider *pp;
707 	struct g_geom *gp;
708 	intmax_t *bsize, *size;
709 	const char *name;
710 	int *nargs;
711 
712 	g_topology_assert();
713 
714 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
715 	if (nargs == NULL) {
716 		gctl_error(req, "No '%s' argument", "nargs");
717 		return;
718 	}
719 	if (*nargs != 2) {
720 		gctl_error(req, "Invalid number of arguments.");
721 		return;
722 	}
723 
724 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
725 	md.md_version = G_CACHE_VERSION;
726 	name = gctl_get_asciiparam(req, "arg0");
727 	if (name == NULL) {
728 		gctl_error(req, "No 'arg0' argument");
729 		return;
730 	}
731 	strlcpy(md.md_name, name, sizeof(md.md_name));
732 
733 	size = gctl_get_paraml(req, "size", sizeof(*size));
734 	if (size == NULL) {
735 		gctl_error(req, "No '%s' argument", "size");
736 		return;
737 	}
738 	if ((u_int)*size < 100) {
739 		gctl_error(req, "Invalid '%s' argument", "size");
740 		return;
741 	}
742 	md.md_size = (u_int)*size;
743 
744 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
745 	if (bsize == NULL) {
746 		gctl_error(req, "No '%s' argument", "blocksize");
747 		return;
748 	}
749 	if (*bsize < 0) {
750 		gctl_error(req, "Invalid '%s' argument", "blocksize");
751 		return;
752 	}
753 	md.md_bsize = (u_int)*bsize;
754 
755 	/* This field is not important here. */
756 	md.md_provsize = 0;
757 
758 	pp = gctl_get_provider(req, "arg1");
759 	if (pp == NULL)
760 		return;
761 	gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
762 	if (gp == NULL) {
763 		gctl_error(req, "Can't create %s.", md.md_name);
764 		return;
765 	}
766 }
767 
768 static void
g_cache_ctl_configure(struct gctl_req * req,struct g_class * mp)769 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
770 {
771 	struct g_cache_metadata md;
772 	struct g_cache_softc *sc;
773 	struct g_consumer *cp;
774 	intmax_t *bsize, *size;
775 	const char *name;
776 	int error, *nargs;
777 
778 	g_topology_assert();
779 
780 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
781 	if (nargs == NULL) {
782 		gctl_error(req, "No '%s' argument", "nargs");
783 		return;
784 	}
785 	if (*nargs != 1) {
786 		gctl_error(req, "Missing device.");
787 		return;
788 	}
789 
790 	name = gctl_get_asciiparam(req, "arg0");
791 	if (name == NULL) {
792 		gctl_error(req, "No 'arg0' argument");
793 		return;
794 	}
795 	sc = g_cache_find_device(mp, name);
796 	if (sc == NULL) {
797 		G_CACHE_DEBUG(1, "Device %s is invalid.", name);
798 		gctl_error(req, "Device %s is invalid.", name);
799 		return;
800 	}
801 
802 	size = gctl_get_paraml(req, "size", sizeof(*size));
803 	if (size == NULL) {
804 		gctl_error(req, "No '%s' argument", "size");
805 		return;
806 	}
807 	if ((u_int)*size != 0 && (u_int)*size < 100) {
808 		gctl_error(req, "Invalid '%s' argument", "size");
809 		return;
810 	}
811 	if ((u_int)*size != 0)
812 		sc->sc_maxent = (u_int)*size;
813 
814 	bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
815 	if (bsize == NULL) {
816 		gctl_error(req, "No '%s' argument", "blocksize");
817 		return;
818 	}
819 	if (*bsize < 0) {
820 		gctl_error(req, "Invalid '%s' argument", "blocksize");
821 		return;
822 	}
823 
824 	if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
825 		return;
826 
827 	strlcpy(md.md_name, name, sizeof(md.md_name));
828 	strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
829 	md.md_version = G_CACHE_VERSION;
830 	if ((u_int)*size != 0)
831 		md.md_size = (u_int)*size;
832 	else
833 		md.md_size = sc->sc_maxent;
834 	if ((u_int)*bsize != 0)
835 		md.md_bsize = (u_int)*bsize;
836 	else
837 		md.md_bsize = sc->sc_bsize;
838 	cp = LIST_FIRST(&sc->sc_geom->consumer);
839 	md.md_provsize = cp->provider->mediasize;
840 	error = g_cache_write_metadata(cp, &md);
841 	if (error == 0)
842 		G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
843 	else
844 		G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
845 		    cp->provider->name, error);
846 }
847 
848 static void
g_cache_ctl_destroy(struct gctl_req * req,struct g_class * mp)849 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
850 {
851 	int *nargs, *force, error, i;
852 	struct g_cache_softc *sc;
853 	const char *name;
854 	char param[16];
855 
856 	g_topology_assert();
857 
858 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
859 	if (nargs == NULL) {
860 		gctl_error(req, "No '%s' argument", "nargs");
861 		return;
862 	}
863 	if (*nargs <= 0) {
864 		gctl_error(req, "Missing device(s).");
865 		return;
866 	}
867 	force = gctl_get_paraml(req, "force", sizeof(*force));
868 	if (force == NULL) {
869 		gctl_error(req, "No 'force' argument");
870 		return;
871 	}
872 
873 	for (i = 0; i < *nargs; i++) {
874 		snprintf(param, sizeof(param), "arg%d", i);
875 		name = gctl_get_asciiparam(req, param);
876 		if (name == NULL) {
877 			gctl_error(req, "No 'arg%d' argument", i);
878 			return;
879 		}
880 		sc = g_cache_find_device(mp, name);
881 		if (sc == NULL) {
882 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
883 			gctl_error(req, "Device %s is invalid.", name);
884 			return;
885 		}
886 		error = g_cache_destroy(sc, *force);
887 		if (error != 0) {
888 			gctl_error(req, "Cannot destroy device %s (error=%d).",
889 			    sc->sc_name, error);
890 			return;
891 		}
892 	}
893 }
894 
895 static void
g_cache_ctl_reset(struct gctl_req * req,struct g_class * mp)896 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
897 {
898 	struct g_cache_softc *sc;
899 	const char *name;
900 	char param[16];
901 	int i, *nargs;
902 
903 	g_topology_assert();
904 
905 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
906 	if (nargs == NULL) {
907 		gctl_error(req, "No '%s' argument", "nargs");
908 		return;
909 	}
910 	if (*nargs <= 0) {
911 		gctl_error(req, "Missing device(s).");
912 		return;
913 	}
914 
915 	for (i = 0; i < *nargs; i++) {
916 		snprintf(param, sizeof(param), "arg%d", i);
917 		name = gctl_get_asciiparam(req, param);
918 		if (name == NULL) {
919 			gctl_error(req, "No 'arg%d' argument", i);
920 			return;
921 		}
922 		sc = g_cache_find_device(mp, name);
923 		if (sc == NULL) {
924 			G_CACHE_DEBUG(1, "Device %s is invalid.", name);
925 			gctl_error(req, "Device %s is invalid.", name);
926 			return;
927 		}
928 		sc->sc_reads = 0;
929 		sc->sc_readbytes = 0;
930 		sc->sc_cachereads = 0;
931 		sc->sc_cachereadbytes = 0;
932 		sc->sc_cachehits = 0;
933 		sc->sc_cachemisses = 0;
934 		sc->sc_cachefull = 0;
935 		sc->sc_writes = 0;
936 		sc->sc_wrotebytes = 0;
937 	}
938 }
939 
940 static void
g_cache_config(struct gctl_req * req,struct g_class * mp,const char * verb)941 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
942 {
943 	uint32_t *version;
944 
945 	g_topology_assert();
946 
947 	version = gctl_get_paraml(req, "version", sizeof(*version));
948 	if (version == NULL) {
949 		gctl_error(req, "No '%s' argument.", "version");
950 		return;
951 	}
952 	if (*version != G_CACHE_VERSION) {
953 		gctl_error(req, "Userland and kernel parts are out of sync.");
954 		return;
955 	}
956 
957 	if (strcmp(verb, "create") == 0) {
958 		g_cache_ctl_create(req, mp);
959 		return;
960 	} else if (strcmp(verb, "configure") == 0) {
961 		g_cache_ctl_configure(req, mp);
962 		return;
963 	} else if (strcmp(verb, "destroy") == 0 ||
964 	    strcmp(verb, "stop") == 0) {
965 		g_cache_ctl_destroy(req, mp);
966 		return;
967 	} else if (strcmp(verb, "reset") == 0) {
968 		g_cache_ctl_reset(req, mp);
969 		return;
970 	}
971 
972 	gctl_error(req, "Unknown verb.");
973 }
974 
975 static void
g_cache_dumpconf(struct sbuf * sb,const char * indent,struct g_geom * gp,struct g_consumer * cp,struct g_provider * pp)976 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
977     struct g_consumer *cp, struct g_provider *pp)
978 {
979 	struct g_cache_softc *sc;
980 
981 	if (pp != NULL || cp != NULL)
982 		return;
983 	sc = gp->softc;
984 	sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
985 	sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
986 	sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
987 	    (uintmax_t)sc->sc_tail);
988 	sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
989 	sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
990 	    sc->sc_nused);
991 	sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
992 	    sc->sc_invalid);
993 	sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
994 	sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
995 	    sc->sc_readbytes);
996 	sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
997 	    sc->sc_cachereads);
998 	sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
999 	    sc->sc_cachereadbytes);
1000 	sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1001 	    sc->sc_cachehits);
1002 	sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1003 	    sc->sc_cachemisses);
1004 	sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1005 	    sc->sc_cachefull);
1006 	sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1007 	sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1008 	    sc->sc_wrotebytes);
1009 }
1010 
1011 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1012 MODULE_VERSION(geom_cache, 0);
1013