xref: /freebsd/stand/common/bcache.c (revision a0409676120c1e558d0ade943019934e0f15118d)
1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright 2015 Toomas Soome <tsoome@me.com>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * Simple hashed block cache
34  */
35 
36 #include <sys/stdint.h>
37 
38 #include <stand.h>
39 #include <string.h>
40 #include <strings.h>
41 
42 #include "bootstrap.h"
43 
44 /* #define BCACHE_DEBUG */
45 
46 #ifdef BCACHE_DEBUG
47 # define DPRINTF(fmt, args...)	printf("%s: " fmt "\n" , __func__ , ## args)
48 #else
49 # define DPRINTF(fmt, args...)	((void)0)
50 #endif
51 
52 struct bcachectl
53 {
54     daddr_t	bc_blkno;
55     int		bc_count;
56 };
57 
58 /*
59  * bcache per device node. cache is allocated on device first open and freed
60  * on last close, to save memory. The issue there is the size; biosdisk
61  * supports up to 31 (0x1f) devices. Classic setup would use single disk
62  * to boot from, but this has changed with zfs.
63  */
64 struct bcache {
65     struct bcachectl	*bcache_ctl;
66     caddr_t		bcache_data;
67     size_t		bcache_nblks;
68     size_t		ra;
69 };
70 
71 static u_int bcache_total_nblks;	/* set by bcache_init */
72 static u_int bcache_blksize;		/* set by bcache_init */
73 static u_int bcache_numdev;		/* set by bcache_add_dev */
74 /* statistics */
75 static u_int bcache_units;	/* number of devices with cache */
76 static u_int bcache_unit_nblks;	/* nblocks per unit */
77 static u_int bcache_hits;
78 static u_int bcache_misses;
79 static u_int bcache_ops;
80 static u_int bcache_bypasses;
81 static u_int bcache_bcount;
82 static u_int bcache_rablks;
83 
84 #define	BHASH(bc, blkno)	((blkno) & ((bc)->bcache_nblks - 1))
85 #define	BCACHE_LOOKUP(bc, blkno)	\
86 	((bc)->bcache_ctl[BHASH((bc), (blkno))].bc_blkno != (blkno))
87 #define	BCACHE_READAHEAD	256
88 #define	BCACHE_MINREADAHEAD	32
89 
90 static void	bcache_invalidate(struct bcache *bc, daddr_t blkno);
91 static void	bcache_insert(struct bcache *bc, daddr_t blkno);
92 static void	bcache_free_instance(struct bcache *bc);
93 
94 /*
95  * Initialise the cache for (nblks) of (bsize).
96  */
97 void
98 bcache_init(size_t nblks, size_t bsize)
99 {
100     /* set up control data */
101     bcache_total_nblks = nblks;
102     bcache_blksize = bsize;
103 }
104 
105 /*
106  * add number of devices to bcache. we have to divide cache space
107  * between the devices, so bcache_add_dev() can be used to set up the
108  * number. The issue is, we need to get the number before actual allocations.
109  * bcache_add_dev() is supposed to be called from device init() call, so the
110  * assumption is, devsw dv_init is called for plain devices first, and
111  * for zfs, last.
112  */
113 void
114 bcache_add_dev(int devices)
115 {
116     bcache_numdev += devices;
117 }
118 
119 void *
120 bcache_allocate(void)
121 {
122     u_int i;
123     struct bcache *bc = malloc(sizeof (struct bcache));
124     int disks = bcache_numdev;
125 
126     if (disks == 0)
127 	disks = 1;	/* safe guard */
128 
129     if (bc == NULL) {
130 	errno = ENOMEM;
131 	return (bc);
132     }
133 
134     /*
135      * the bcache block count must be power of 2 for hash function
136      */
137     i = fls(disks) - 1;		/* highbit - 1 */
138     if (disks > (1 << i))	/* next power of 2 */
139 	i++;
140 
141     bc->bcache_nblks = bcache_total_nblks >> i;
142     bcache_unit_nblks = bc->bcache_nblks;
143     bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize);
144     if (bc->bcache_data == NULL) {
145 	/* dont error out yet. fall back to 32 blocks and try again */
146 	bc->bcache_nblks = 32;
147 	bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize +
148 	sizeof(uint32_t));
149     }
150 
151     bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof(struct bcachectl));
152 
153     if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) {
154 	bcache_free_instance(bc);
155 	errno = ENOMEM;
156 	return (NULL);
157     }
158 
159     /* Flush the cache */
160     for (i = 0; i < bc->bcache_nblks; i++) {
161 	bc->bcache_ctl[i].bc_count = -1;
162 	bc->bcache_ctl[i].bc_blkno = -1;
163     }
164     bcache_units++;
165     bc->ra = BCACHE_READAHEAD;	/* optimistic read ahead */
166     return (bc);
167 }
168 
169 void
170 bcache_free(void *cache)
171 {
172     struct bcache *bc = cache;
173 
174     if (bc == NULL)
175 	return;
176 
177     bcache_free_instance(bc);
178     bcache_units--;
179 }
180 
181 /*
182  * Handle a write request; write directly to the disk, and populate the
183  * cache with the new values.
184  */
185 static int
186 write_strategy(void *devdata, int rw, daddr_t blk, size_t size,
187     char *buf, size_t *rsize)
188 {
189     struct bcache_devdata	*dd = (struct bcache_devdata *)devdata;
190     struct bcache		*bc = dd->dv_cache;
191     daddr_t			i, nblk;
192 
193     nblk = size / bcache_blksize;
194 
195     /* Invalidate the blocks being written */
196     for (i = 0; i < nblk; i++) {
197 	bcache_invalidate(bc, blk + i);
198     }
199 
200     /* Write the blocks */
201     return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
202 }
203 
204 /*
205  * Handle a read request; fill in parts of the request that can
206  * be satisfied by the cache, use the supplied strategy routine to do
207  * device I/O and then use the I/O results to populate the cache.
208  */
209 static int
210 read_strategy(void *devdata, int rw, daddr_t blk, size_t size,
211     char *buf, size_t *rsize)
212 {
213     struct bcache_devdata	*dd = (struct bcache_devdata *)devdata;
214     struct bcache		*bc = dd->dv_cache;
215     size_t			i, nblk, p_size, r_size, complete, ra;
216     int				result;
217     daddr_t			p_blk;
218     caddr_t			p_buf;
219 
220     if (bc == NULL) {
221 	errno = ENODEV;
222 	return (-1);
223     }
224 
225     if (rsize != NULL)
226 	*rsize = 0;
227 
228     nblk = size / bcache_blksize;
229     if (nblk == 0 && size != 0)
230 	nblk++;
231     result = 0;
232     complete = 1;
233 
234     /* Satisfy any cache hits up front, break on first miss */
235     for (i = 0; i < nblk; i++) {
236 	if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) {
237 	    bcache_misses += (nblk - i);
238 	    complete = 0;
239 	    if (nblk - i > BCACHE_MINREADAHEAD && bc->ra > BCACHE_MINREADAHEAD)
240 		bc->ra >>= 1;	/* reduce read ahead */
241 	    break;
242 	} else {
243 	    bcache_hits++;
244 	}
245     }
246 
247    if (complete) {	/* whole set was in cache, return it */
248 	if (bc->ra < BCACHE_READAHEAD)
249 		bc->ra <<= 1;	/* increase read ahead */
250 	bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
251 	goto done;
252    }
253 
254     /*
255      * Fill in any misses. From check we have i pointing to first missing
256      * block, read in all remaining blocks + readahead.
257      * We have space at least for nblk - i before bcache wraps.
258      */
259     p_blk = blk + i;
260     p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk));
261     r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */
262 
263     p_size = MIN(r_size, nblk - i);	/* read at least those blocks */
264 
265     /*
266      * The read ahead size setup.
267      * While the read ahead can save us IO, it also can complicate things:
268      * 1. We do not want to read ahead by wrapping around the
269      * bcache end - this would complicate the cache management.
270      * 2. We are using bc->ra as dynamic hint for read ahead size,
271      * detected cache hits will increase the read-ahead block count, and
272      * misses will decrease, see the code above.
273      * 3. The bcache is sized by 512B blocks, however, the underlying device
274      * may have a larger sector size, and we should perform the IO by
275      * taking into account these larger sector sizes. We could solve this by
276      * passing the sector size to bcache_allocate(), or by using ioctl(), but
277      * in this version we are using the constant, 16 blocks, and are rounding
278      * read ahead block count down to multiple of 16.
279      * Using the constant has two reasons, we are not entirely sure if the
280      * BIOS disk interface is providing the correct value for sector size.
281      * And secondly, this way we get the most conservative setup for the ra.
282      *
283      * The selection of multiple of 16 blocks (8KB) is quite arbitrary, however,
284      * we want to cover CDs (2K) and 4K disks.
285      * bcache_allocate() will always fall back to a minimum of 32 blocks.
286      * Our choice of 16 read ahead blocks will always fit inside the bcache.
287      */
288 
289     if ((rw & F_NORA) == F_NORA)
290 	ra = 0;
291     else
292 	ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size);
293 
294     if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */
295 	ra = MIN(bc->ra, ra - 1);
296 	ra = rounddown(ra, 16);		/* multiple of 16 blocks */
297 	p_size += ra;
298     }
299 
300     /* invalidate bcache */
301     for (i = 0; i < p_size; i++) {
302 	bcache_invalidate(bc, p_blk + i);
303     }
304 
305     r_size = 0;
306     /*
307      * with read-ahead, it may happen we are attempting to read past
308      * disk end, as bcache has no information about disk size.
309      * in such case we should get partial read if some blocks can be
310      * read or error, if no blocks can be read.
311      * in either case we should return the data in bcache and only
312      * return error if there is no data.
313      */
314     rw &= F_MASK;
315     result = dd->dv_strategy(dd->dv_devdata, rw, p_blk,
316 	p_size * bcache_blksize, p_buf, &r_size);
317 
318     r_size /= bcache_blksize;
319     for (i = 0; i < r_size; i++)
320 	bcache_insert(bc, p_blk + i);
321 
322     /* update ra statistics */
323     if (r_size != 0) {
324 	if (r_size < p_size)
325 	    bcache_rablks += (p_size - r_size);
326 	else
327 	    bcache_rablks += ra;
328     }
329 
330     /* check how much data can we copy */
331     for (i = 0; i < nblk; i++) {
332 	if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i)))
333 	    break;
334     }
335 
336     if (size > i * bcache_blksize)
337 	size = i * bcache_blksize;
338 
339     if (size != 0) {
340 	bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
341 	result = 0;
342     }
343 
344  done:
345     if ((result == 0) && (rsize != NULL))
346 	*rsize = size;
347     return(result);
348 }
349 
350 /*
351  * Requests larger than 1/2 cache size will be bypassed and go
352  * directly to the disk.  XXX tune this.
353  */
354 int
355 bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
356     char *buf, size_t *rsize)
357 {
358     struct bcache_devdata	*dd = (struct bcache_devdata *)devdata;
359     struct bcache		*bc = dd->dv_cache;
360     u_int bcache_nblks = 0;
361     int nblk, cblk, ret;
362     size_t csize, isize, total;
363 
364     bcache_ops++;
365 
366     if (bc != NULL)
367 	bcache_nblks = bc->bcache_nblks;
368 
369     /* bypass large requests, or when the cache is inactive */
370     if (bc == NULL ||
371 	((size * 2 / bcache_blksize) > bcache_nblks)) {
372 	DPRINTF("bypass %zu from %qu", size / bcache_blksize, blk);
373 	bcache_bypasses++;
374 	rw &= F_MASK;
375 	return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
376     }
377 
378     switch (rw & F_MASK) {
379     case F_READ:
380 	nblk = size / bcache_blksize;
381 	if (size != 0 && nblk == 0)
382 	    nblk++;	/* read at least one block */
383 
384 	ret = 0;
385 	total = 0;
386 	while(size) {
387 	    cblk = bcache_nblks - BHASH(bc, blk); /* # of blocks left */
388 	    cblk = MIN(cblk, nblk);
389 
390 	    if (size <= bcache_blksize)
391 		csize = size;
392 	    else
393 		csize = cblk * bcache_blksize;
394 
395 	    ret = read_strategy(devdata, rw, blk, csize, buf+total, &isize);
396 
397 	    /*
398 	     * we may have error from read ahead, if we have read some data
399 	     * return partial read.
400 	     */
401 	    if (ret != 0 || isize == 0) {
402 		if (total != 0)
403 		    ret = 0;
404 		break;
405 	    }
406 	    blk += isize / bcache_blksize;
407 	    total += isize;
408 	    size -= isize;
409 	    nblk = size / bcache_blksize;
410 	}
411 
412 	if (rsize)
413 	    *rsize = total;
414 
415 	return (ret);
416     case F_WRITE:
417 	return write_strategy(devdata, F_WRITE, blk, size, buf, rsize);
418     }
419     return -1;
420 }
421 
422 /*
423  * Free allocated bcache instance
424  */
425 static void
426 bcache_free_instance(struct bcache *bc)
427 {
428     if (bc != NULL) {
429 	free(bc->bcache_ctl);
430 	free(bc->bcache_data);
431 	free(bc);
432     }
433 }
434 
435 /*
436  * Insert a block into the cache.
437  */
438 static void
439 bcache_insert(struct bcache *bc, daddr_t blkno)
440 {
441     u_int	cand;
442 
443     cand = BHASH(bc, blkno);
444 
445     DPRINTF("insert blk %llu -> %u # %d", blkno, cand, bcache_bcount);
446     bc->bcache_ctl[cand].bc_blkno = blkno;
447     bc->bcache_ctl[cand].bc_count = bcache_bcount++;
448 }
449 
450 /*
451  * Invalidate a block from the cache.
452  */
453 static void
454 bcache_invalidate(struct bcache *bc, daddr_t blkno)
455 {
456     u_int	i;
457 
458     i = BHASH(bc, blkno);
459     if (bc->bcache_ctl[i].bc_blkno == blkno) {
460 	bc->bcache_ctl[i].bc_count = -1;
461 	bc->bcache_ctl[i].bc_blkno = -1;
462 	DPRINTF("invalidate blk %llu", blkno);
463     }
464 }
465 
466 #ifndef BOOT2
467 COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache);
468 
469 static int
470 command_bcache(int argc, char *argv[] __unused)
471 {
472     if (argc != 1) {
473 	command_errmsg = "wrong number of arguments";
474 	return(CMD_ERROR);
475     }
476 
477     printf("\ncache blocks: %u\n", bcache_total_nblks);
478     printf("cache blocksz: %u\n", bcache_blksize);
479     printf("cache readahead: %u\n", bcache_rablks);
480     printf("unit cache blocks: %u\n", bcache_unit_nblks);
481     printf("cached units: %u\n", bcache_units);
482     printf("%u ops %d bypasses %u hits %u misses\n", bcache_ops,
483 	bcache_bypasses, bcache_hits, bcache_misses);
484     return(CMD_OK);
485 }
486 #endif
487