Lines Matching +full:d +full:- +full:cache +full:- +full:block +full:- +full:size
1 /*-
30 * Simple hashed block cache
56 * bcache per device node. cache is allocated on device first open and freed
57 * on last close, to save memory. The issue there is the size; biosdisk
74 static u_int bcache_units; /* number of devices with cache */
83 #define BHASH(bc, blkno) ((blkno) & ((bc)->bcache_nblks - 1))
85 ((bc)->bcache_ctl[BHASH((bc), (blkno))].bc_blkno != (blkno))
95 * Initialise the cache for (nblks) of (bsize).
106 * add number of devices to bcache. we have to divide cache space
135 * the bcache block count must be power of 2 for hash function in bcache_allocate()
137 i = fls(disks) - 1; /* highbit - 1 */ in bcache_allocate()
141 bc->bcache_nblks = bcache_total_nblks >> i; in bcache_allocate()
142 bcache_unit_nblks = bc->bcache_nblks; in bcache_allocate()
143 bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize); in bcache_allocate()
144 if (bc->bcache_data == NULL) { in bcache_allocate()
146 bc->bcache_nblks = 32; in bcache_allocate()
147 bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize + in bcache_allocate()
151 bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof(struct bcachectl)); in bcache_allocate()
153 if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) { in bcache_allocate()
159 /* Flush the cache */ in bcache_allocate()
160 for (i = 0; i < bc->bcache_nblks; i++) { in bcache_allocate()
161 bc->bcache_ctl[i].bc_count = -1; in bcache_allocate()
162 bc->bcache_ctl[i].bc_blkno = -1; in bcache_allocate()
165 bc->ra = BCACHE_READAHEAD; /* optimistic read ahead */ in bcache_allocate()
166 bc->bcache_nextblkno = -1; in bcache_allocate()
171 bcache_free(void *cache) in bcache_free() argument
173 struct bcache *bc = cache; in bcache_free()
179 bcache_units--; in bcache_free()
184 * cache with the new values.
187 write_strategy(void *devdata, int rw, daddr_t blk, size_t size, in write_strategy() argument
191 struct bcache *bc = dd->dv_cache; in write_strategy()
194 nblk = size / bcache_blksize; in write_strategy()
202 return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); in write_strategy()
207 * be satisfied by the cache, use the supplied strategy routine to do
208 * device I/O and then use the I/O results to populate the cache.
211 read_strategy(void *devdata, int rw, daddr_t blk, size_t size, in read_strategy() argument
215 struct bcache *bc = dd->dv_cache; in read_strategy()
223 return (-1); in read_strategy()
229 nblk = size / bcache_blksize; in read_strategy()
230 if (nblk == 0 && size != 0) in read_strategy()
235 /* Satisfy any cache hits up front, break on first miss */ in read_strategy()
238 bcache_misses += (nblk - i); in read_strategy()
247 * Adjust read-ahead size if appropriate. Subject to the requirement in read_strategy()
248 * that bc->ra must stay in between MINREADAHEAD and READAHEAD, we in read_strategy()
252 if (complete || (i == bc->ralen && bc->ralen > 0)) { in read_strategy()
253 if (bc->ra < BCACHE_READAHEAD) in read_strategy()
254 bc->ra <<= 1; /* increase read ahead */ in read_strategy()
256 if (nblk - i > BCACHE_MINREADAHEAD && bc->ralen > 0 && in read_strategy()
257 bc->ra > BCACHE_MINREADAHEAD) in read_strategy()
258 bc->ra >>= 1; /* reduce read ahead */ in read_strategy()
262 if (blk == bc->bcache_nextblkno) { in read_strategy()
263 if (nblk > bc->ralen) in read_strategy()
264 bc->ralen = 0; in read_strategy()
266 bc->ralen -= nblk; in read_strategy()
269 if (complete) { /* whole set was in cache, return it */ in read_strategy()
270 bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size); in read_strategy()
276 * block, read in all remaining blocks + readahead. in read_strategy()
277 * We have space at least for nblk - i before bcache wraps. in read_strategy()
280 p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk)); in read_strategy()
281 r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */ in read_strategy()
283 p_size = MIN(r_size, nblk - i); /* read at least those blocks */ in read_strategy()
286 * The read ahead size setup. in read_strategy()
289 * bcache end - this would complicate the cache management. in read_strategy()
290 * 2. We are using bc->ra as dynamic hint for read ahead size, in read_strategy()
291 * detected cache hits will increase the read-ahead block count, and in read_strategy()
294 * may have a larger sector size, and we should perform the IO by in read_strategy()
296 * passing the sector size to bcache_allocate(), or by using ioctl(), but in read_strategy()
298 * read ahead block count down to multiple of 16. in read_strategy()
300 * BIOS disk interface is providing the correct value for sector size. in read_strategy()
312 ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size); in read_strategy()
315 * Only trigger read-ahead if we detect two blocks being read in read_strategy()
318 if ((bc->bcache_nextblkno != blk) && ra != 0) { in read_strategy()
322 if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */ in read_strategy()
323 ra = MIN(bc->ra, ra - 1); in read_strategy()
326 ra = BCACHE_MAXIOWRA - p_size; in read_strategy()
327 bc->ralen = ra; in read_strategy()
330 bc->ralen = 0; in read_strategy()
340 * with read-ahead, it may happen we are attempting to read past in read_strategy()
341 * disk end, as bcache has no information about disk size. in read_strategy()
348 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, in read_strategy()
358 bcache_rablks += (p_size - r_size); in read_strategy()
369 if (size > i * bcache_blksize) in read_strategy()
370 size = i * bcache_blksize; in read_strategy()
372 if (size != 0) { in read_strategy()
373 bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size); in read_strategy()
380 *rsize = size; in read_strategy()
381 bc->bcache_nextblkno = blk + (size / DEV_BSIZE); in read_strategy()
387 * Requests larger than 1/2 cache size will be bypassed and go
391 bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size, in bcache_strategy() argument
395 struct bcache *bc = dd->dv_cache; in bcache_strategy()
403 bcache_nblks = bc->bcache_nblks; in bcache_strategy()
405 /* bypass large requests, or when the cache is inactive */ in bcache_strategy()
407 ((size * 2 / bcache_blksize) > bcache_nblks)) { in bcache_strategy()
408 DPRINTF("bypass %zu from %jd", size / bcache_blksize, blk); in bcache_strategy()
411 return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize)); in bcache_strategy()
416 nblk = size / bcache_blksize; in bcache_strategy()
417 if (size != 0 && nblk == 0) in bcache_strategy()
418 nblk++; /* read at least one block */ in bcache_strategy()
422 while(size) { in bcache_strategy()
423 cblk = bcache_nblks - BHASH(bc, blk); /* # of blocks left */ in bcache_strategy()
426 if (size <= bcache_blksize) in bcache_strategy()
427 csize = size; in bcache_strategy()
444 size -= isize; in bcache_strategy()
445 nblk = size / bcache_blksize; in bcache_strategy()
453 return write_strategy(devdata, F_WRITE, blk, size, buf, rsize); in bcache_strategy()
455 return -1; in bcache_strategy()
465 free(bc->bcache_ctl); in bcache_free_instance()
466 free(bc->bcache_data); in bcache_free_instance()
472 * Insert a block into the cache.
481 DPRINTF("insert blk %jd -> %u # %d", blkno, cand, bcache_bcount); in bcache_insert()
482 bc->bcache_ctl[cand].bc_blkno = blkno; in bcache_insert()
483 bc->bcache_ctl[cand].bc_count = bcache_bcount++; in bcache_insert()
487 * Invalidate a block from the cache.
495 if (bc->bcache_ctl[i].bc_blkno == blkno) { in bcache_invalidate()
496 bc->bcache_ctl[i].bc_count = -1; in bcache_invalidate()
497 bc->bcache_ctl[i].bc_blkno = -1; in bcache_invalidate()
503 COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache);
514 printf("cache blocksz: %u\n", bcache_blksize); in command_bcache()
515 printf("cache readahead: %u\n", bcache_rablks); in command_bcache()
516 printf("unit cache blocks: %u\n", bcache_unit_nblks); in command_bcache()
518 printf("%u ops %d bypasses %u hits %u misses\n", bcache_ops, in command_bcache()