1 /* 2 * Register cache access API - rbtree caching support 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/debugfs.h> 14 #include <linux/device.h> 15 #include <linux/rbtree.h> 16 #include <linux/seq_file.h> 17 #include <linux/slab.h> 18 19 #include "internal.h" 20 21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 22 unsigned int value); 23 static int regcache_rbtree_exit(struct regmap *map); 24 25 struct regcache_rbtree_node { 26 /* block of adjacent registers */ 27 void *block; 28 /* Which registers are present */ 29 long *cache_present; 30 /* base register handled by this block */ 31 unsigned int base_reg; 32 /* number of registers available in the block */ 33 unsigned int blklen; 34 /* the actual rbtree node holding this block */ 35 struct rb_node node; 36 }; 37 38 struct regcache_rbtree_ctx { 39 struct rb_root root; 40 struct regcache_rbtree_node *cached_rbnode; 41 }; 42 43 static inline void regcache_rbtree_get_base_top_reg( 44 struct regmap *map, 45 struct regcache_rbtree_node *rbnode, 46 unsigned int *base, unsigned int *top) 47 { 48 *base = rbnode->base_reg; 49 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); 50 } 51 52 static unsigned int regcache_rbtree_get_register(struct regmap *map, 53 struct regcache_rbtree_node *rbnode, unsigned int idx) 54 { 55 return regcache_get_val(map, rbnode->block, idx); 56 } 57 58 static void regcache_rbtree_set_register(struct regmap *map, 59 struct regcache_rbtree_node *rbnode, 60 unsigned int idx, unsigned int val) 61 { 62 set_bit(idx, rbnode->cache_present); 63 regcache_set_val(map, rbnode->block, idx, val); 64 } 65 66 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 67 unsigned int reg) 68 { 69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 70 struct rb_node *node; 71 struct regcache_rbtree_node *rbnode; 72 unsigned int base_reg, top_reg; 73 74 rbnode = rbtree_ctx->cached_rbnode; 75 if (rbnode) { 76 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 77 &top_reg); 78 if (reg >= base_reg && reg <= top_reg) 79 return rbnode; 80 } 81 82 node = rbtree_ctx->root.rb_node; 83 while (node) { 84 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 85 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 86 &top_reg); 87 if (reg >= base_reg && reg <= top_reg) { 88 rbtree_ctx->cached_rbnode = rbnode; 89 return rbnode; 90 } else if (reg > top_reg) { 91 node = node->rb_right; 92 } else if (reg < base_reg) { 93 node = node->rb_left; 94 } 95 } 96 97 return NULL; 98 } 99 100 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, 101 struct regcache_rbtree_node *rbnode) 102 { 103 struct rb_node **new, *parent; 104 struct regcache_rbtree_node *rbnode_tmp; 105 unsigned int base_reg_tmp, top_reg_tmp; 106 unsigned int base_reg; 107 108 parent = NULL; 109 new = &root->rb_node; 110 while (*new) { 111 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); 112 /* base and top registers of the current rbnode */ 113 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, 114 &top_reg_tmp); 115 /* base register of the rbnode to be added */ 116 base_reg = rbnode->base_reg; 117 parent = *new; 118 /* if this register has already been inserted, just return */ 119 if (base_reg >= base_reg_tmp && 120 base_reg <= top_reg_tmp) 121 return 0; 122 else if (base_reg > top_reg_tmp) 123 new = &((*new)->rb_right); 124 else if (base_reg < base_reg_tmp) 125 new = &((*new)->rb_left); 126 } 127 128 /* insert the node into the rbtree */ 129 rb_link_node(&rbnode->node, parent, new); 130 rb_insert_color(&rbnode->node, root); 131 132 return 1; 133 } 134 135 #ifdef CONFIG_DEBUG_FS 136 static int rbtree_show(struct seq_file *s, void *ignored) 137 { 138 struct regmap *map = s->private; 139 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 140 struct regcache_rbtree_node *n; 141 struct rb_node *node; 142 unsigned int base, top; 143 size_t mem_size; 144 int nodes = 0; 145 int registers = 0; 146 int this_registers, average; 147 148 map->lock(map->lock_arg); 149 150 mem_size = sizeof(*rbtree_ctx); 151 152 for (node = rb_first(&rbtree_ctx->root); node != NULL; 153 node = rb_next(node)) { 154 n = rb_entry(node, struct regcache_rbtree_node, node); 155 mem_size += sizeof(*n); 156 mem_size += (n->blklen * map->cache_word_size); 157 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); 158 159 regcache_rbtree_get_base_top_reg(map, n, &base, &top); 160 this_registers = ((top - base) / map->reg_stride) + 1; 161 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers); 162 163 nodes++; 164 registers += this_registers; 165 } 166 167 if (nodes) 168 average = registers / nodes; 169 else 170 average = 0; 171 172 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", 173 nodes, registers, average, mem_size); 174 175 map->unlock(map->lock_arg); 176 177 return 0; 178 } 179 180 DEFINE_SHOW_ATTRIBUTE(rbtree); 181 182 static void rbtree_debugfs_init(struct regmap *map) 183 { 184 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); 185 } 186 #endif 187 188 static int regcache_rbtree_init(struct regmap *map) 189 { 190 struct regcache_rbtree_ctx *rbtree_ctx; 191 int i; 192 int ret; 193 194 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); 195 if (!map->cache) 196 return -ENOMEM; 197 198 rbtree_ctx = map->cache; 199 rbtree_ctx->root = RB_ROOT; 200 rbtree_ctx->cached_rbnode = NULL; 201 202 for (i = 0; i < map->num_reg_defaults; i++) { 203 ret = regcache_rbtree_write(map, 204 map->reg_defaults[i].reg, 205 map->reg_defaults[i].def); 206 if (ret) 207 goto err; 208 } 209 210 return 0; 211 212 err: 213 regcache_rbtree_exit(map); 214 return ret; 215 } 216 217 static int regcache_rbtree_exit(struct regmap *map) 218 { 219 struct rb_node *next; 220 struct regcache_rbtree_ctx *rbtree_ctx; 221 struct regcache_rbtree_node *rbtree_node; 222 223 /* if we've already been called then just return */ 224 rbtree_ctx = map->cache; 225 if (!rbtree_ctx) 226 return 0; 227 228 /* free up the rbtree */ 229 next = rb_first(&rbtree_ctx->root); 230 while (next) { 231 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); 232 next = rb_next(&rbtree_node->node); 233 rb_erase(&rbtree_node->node, &rbtree_ctx->root); 234 kfree(rbtree_node->cache_present); 235 kfree(rbtree_node->block); 236 kfree(rbtree_node); 237 } 238 239 /* release the resources */ 240 kfree(map->cache); 241 map->cache = NULL; 242 243 return 0; 244 } 245 246 static int regcache_rbtree_read(struct regmap *map, 247 unsigned int reg, unsigned int *value) 248 { 249 struct regcache_rbtree_node *rbnode; 250 unsigned int reg_tmp; 251 252 rbnode = regcache_rbtree_lookup(map, reg); 253 if (rbnode) { 254 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 255 if (!test_bit(reg_tmp, rbnode->cache_present)) 256 return -ENOENT; 257 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); 258 } else { 259 return -ENOENT; 260 } 261 262 return 0; 263 } 264 265 266 static int regcache_rbtree_insert_to_block(struct regmap *map, 267 struct regcache_rbtree_node *rbnode, 268 unsigned int base_reg, 269 unsigned int top_reg, 270 unsigned int reg, 271 unsigned int value) 272 { 273 unsigned int blklen; 274 unsigned int pos, offset; 275 unsigned long *present; 276 u8 *blk; 277 278 blklen = (top_reg - base_reg) / map->reg_stride + 1; 279 pos = (reg - base_reg) / map->reg_stride; 280 offset = (rbnode->base_reg - base_reg) / map->reg_stride; 281 282 blk = krealloc(rbnode->block, 283 blklen * map->cache_word_size, 284 GFP_KERNEL); 285 if (!blk) 286 return -ENOMEM; 287 288 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { 289 present = krealloc(rbnode->cache_present, 290 BITS_TO_LONGS(blklen) * sizeof(*present), 291 GFP_KERNEL); 292 if (!present) { 293 kfree(blk); 294 return -ENOMEM; 295 } 296 297 memset(present + BITS_TO_LONGS(rbnode->blklen), 0, 298 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) 299 * sizeof(*present)); 300 } else { 301 present = rbnode->cache_present; 302 } 303 304 /* insert the register value in the correct place in the rbnode block */ 305 if (pos == 0) { 306 memmove(blk + offset * map->cache_word_size, 307 blk, rbnode->blklen * map->cache_word_size); 308 bitmap_shift_left(present, present, offset, blklen); 309 } 310 311 /* update the rbnode block, its size and the base register */ 312 rbnode->block = blk; 313 rbnode->blklen = blklen; 314 rbnode->base_reg = base_reg; 315 rbnode->cache_present = present; 316 317 regcache_rbtree_set_register(map, rbnode, pos, value); 318 return 0; 319 } 320 321 static struct regcache_rbtree_node * 322 regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) 323 { 324 struct regcache_rbtree_node *rbnode; 325 const struct regmap_range *range; 326 int i; 327 328 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); 329 if (!rbnode) 330 return NULL; 331 332 /* If there is a read table then use it to guess at an allocation */ 333 if (map->rd_table) { 334 for (i = 0; i < map->rd_table->n_yes_ranges; i++) { 335 if (regmap_reg_in_range(reg, 336 &map->rd_table->yes_ranges[i])) 337 break; 338 } 339 340 if (i != map->rd_table->n_yes_ranges) { 341 range = &map->rd_table->yes_ranges[i]; 342 rbnode->blklen = (range->range_max - range->range_min) / 343 map->reg_stride + 1; 344 rbnode->base_reg = range->range_min; 345 } 346 } 347 348 if (!rbnode->blklen) { 349 rbnode->blklen = 1; 350 rbnode->base_reg = reg; 351 } 352 353 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, 354 GFP_KERNEL); 355 if (!rbnode->block) 356 goto err_free; 357 358 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), 359 sizeof(*rbnode->cache_present), 360 GFP_KERNEL); 361 if (!rbnode->cache_present) 362 goto err_free_block; 363 364 return rbnode; 365 366 err_free_block: 367 kfree(rbnode->block); 368 err_free: 369 kfree(rbnode); 370 return NULL; 371 } 372 373 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 374 unsigned int value) 375 { 376 struct regcache_rbtree_ctx *rbtree_ctx; 377 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 378 struct rb_node *node; 379 unsigned int reg_tmp; 380 int ret; 381 382 rbtree_ctx = map->cache; 383 384 /* if we can't locate it in the cached rbnode we'll have 385 * to traverse the rbtree looking for it. 386 */ 387 rbnode = regcache_rbtree_lookup(map, reg); 388 if (rbnode) { 389 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 390 regcache_rbtree_set_register(map, rbnode, reg_tmp, value); 391 } else { 392 unsigned int base_reg, top_reg; 393 unsigned int new_base_reg, new_top_reg; 394 unsigned int min, max; 395 unsigned int max_dist; 396 unsigned int dist, best_dist = UINT_MAX; 397 398 max_dist = map->reg_stride * sizeof(*rbnode_tmp) / 399 map->cache_word_size; 400 if (reg < max_dist) 401 min = 0; 402 else 403 min = reg - max_dist; 404 max = reg + max_dist; 405 406 /* look for an adjacent register to the one we are about to add */ 407 node = rbtree_ctx->root.rb_node; 408 while (node) { 409 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, 410 node); 411 412 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, 413 &base_reg, &top_reg); 414 415 if (base_reg <= max && top_reg >= min) { 416 if (reg < base_reg) 417 dist = base_reg - reg; 418 else if (reg > top_reg) 419 dist = reg - top_reg; 420 else 421 dist = 0; 422 if (dist < best_dist) { 423 rbnode = rbnode_tmp; 424 best_dist = dist; 425 new_base_reg = min(reg, base_reg); 426 new_top_reg = max(reg, top_reg); 427 } 428 } 429 430 /* 431 * Keep looking, we want to choose the closest block, 432 * otherwise we might end up creating overlapping 433 * blocks, which breaks the rbtree. 434 */ 435 if (reg < base_reg) 436 node = node->rb_left; 437 else if (reg > top_reg) 438 node = node->rb_right; 439 else 440 break; 441 } 442 443 if (rbnode) { 444 ret = regcache_rbtree_insert_to_block(map, rbnode, 445 new_base_reg, 446 new_top_reg, reg, 447 value); 448 if (ret) 449 return ret; 450 rbtree_ctx->cached_rbnode = rbnode; 451 return 0; 452 } 453 454 /* We did not manage to find a place to insert it in 455 * an existing block so create a new rbnode. 456 */ 457 rbnode = regcache_rbtree_node_alloc(map, reg); 458 if (!rbnode) 459 return -ENOMEM; 460 regcache_rbtree_set_register(map, rbnode, 461 reg - rbnode->base_reg, value); 462 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); 463 rbtree_ctx->cached_rbnode = rbnode; 464 } 465 466 return 0; 467 } 468 469 static int regcache_rbtree_sync(struct regmap *map, unsigned int min, 470 unsigned int max) 471 { 472 struct regcache_rbtree_ctx *rbtree_ctx; 473 struct rb_node *node; 474 struct regcache_rbtree_node *rbnode; 475 unsigned int base_reg, top_reg; 476 unsigned int start, end; 477 int ret; 478 479 rbtree_ctx = map->cache; 480 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 481 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 482 483 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 484 &top_reg); 485 if (base_reg > max) 486 break; 487 if (top_reg < min) 488 continue; 489 490 if (min > base_reg) 491 start = (min - base_reg) / map->reg_stride; 492 else 493 start = 0; 494 495 if (max < top_reg) 496 end = (max - base_reg) / map->reg_stride + 1; 497 else 498 end = rbnode->blklen; 499 500 ret = regcache_sync_block(map, rbnode->block, 501 rbnode->cache_present, 502 rbnode->base_reg, start, end); 503 if (ret != 0) 504 return ret; 505 } 506 507 return regmap_async_complete(map); 508 } 509 510 static int regcache_rbtree_drop(struct regmap *map, unsigned int min, 511 unsigned int max) 512 { 513 struct regcache_rbtree_ctx *rbtree_ctx; 514 struct regcache_rbtree_node *rbnode; 515 struct rb_node *node; 516 unsigned int base_reg, top_reg; 517 unsigned int start, end; 518 519 rbtree_ctx = map->cache; 520 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 521 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 522 523 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, 524 &top_reg); 525 if (base_reg > max) 526 break; 527 if (top_reg < min) 528 continue; 529 530 if (min > base_reg) 531 start = (min - base_reg) / map->reg_stride; 532 else 533 start = 0; 534 535 if (max < top_reg) 536 end = (max - base_reg) / map->reg_stride + 1; 537 else 538 end = rbnode->blklen; 539 540 bitmap_clear(rbnode->cache_present, start, end - start); 541 } 542 543 return 0; 544 } 545 546 struct regcache_ops regcache_rbtree_ops = { 547 .type = REGCACHE_RBTREE, 548 .name = "rbtree", 549 .init = regcache_rbtree_init, 550 .exit = regcache_rbtree_exit, 551 #ifdef CONFIG_DEBUG_FS 552 .debugfs_init = rbtree_debugfs_init, 553 #endif 554 .read = regcache_rbtree_read, 555 .write = regcache_rbtree_write, 556 .sync = regcache_rbtree_sync, 557 .drop = regcache_rbtree_drop, 558 }; 559