1 /* 2 * Register cache access API - rbtree caching support 3 * 4 * Copyright 2011 Wolfson Microelectronics plc 5 * 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/slab.h> 14 #include <linux/debugfs.h> 15 #include <linux/rbtree.h> 16 #include <linux/seq_file.h> 17 18 #include "internal.h" 19 20 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 21 unsigned int value); 22 static int regcache_rbtree_exit(struct regmap *map); 23 24 struct regcache_rbtree_node { 25 /* the actual rbtree node holding this block */ 26 struct rb_node node; 27 /* base register handled by this block */ 28 unsigned int base_reg; 29 /* block of adjacent registers */ 30 void *block; 31 /* number of registers available in the block */ 32 unsigned int blklen; 33 } __attribute__ ((packed)); 34 35 struct regcache_rbtree_ctx { 36 struct rb_root root; 37 struct regcache_rbtree_node *cached_rbnode; 38 }; 39 40 static inline void regcache_rbtree_get_base_top_reg( 41 struct regcache_rbtree_node *rbnode, 42 unsigned int *base, unsigned int *top) 43 { 44 *base = rbnode->base_reg; 45 *top = rbnode->base_reg + rbnode->blklen - 1; 46 } 47 48 static unsigned int regcache_rbtree_get_register( 49 struct regcache_rbtree_node *rbnode, unsigned int idx, 50 unsigned int word_size) 51 { 52 return regcache_get_val(rbnode->block, idx, word_size); 53 } 54 55 static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, 56 unsigned int idx, unsigned int val, 57 unsigned int word_size) 58 { 59 regcache_set_val(rbnode->block, idx, val, word_size); 60 } 61 62 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 63 unsigned int reg) 64 { 65 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 66 struct rb_node *node; 67 struct regcache_rbtree_node *rbnode; 68 unsigned int base_reg, top_reg; 69 70 rbnode = rbtree_ctx->cached_rbnode; 71 if (rbnode) { 72 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 73 if (reg >= base_reg && reg <= top_reg) 74 return rbnode; 75 } 76 77 node = rbtree_ctx->root.rb_node; 78 while (node) { 79 rbnode = container_of(node, struct regcache_rbtree_node, node); 80 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 81 if (reg >= base_reg && reg <= top_reg) { 82 rbtree_ctx->cached_rbnode = rbnode; 83 return rbnode; 84 } else if (reg > top_reg) { 85 node = node->rb_right; 86 } else if (reg < base_reg) { 87 node = node->rb_left; 88 } 89 } 90 91 return NULL; 92 } 93 94 static int regcache_rbtree_insert(struct rb_root *root, 95 struct regcache_rbtree_node *rbnode) 96 { 97 struct rb_node **new, *parent; 98 struct regcache_rbtree_node *rbnode_tmp; 99 unsigned int base_reg_tmp, top_reg_tmp; 100 unsigned int base_reg; 101 102 parent = NULL; 103 new = &root->rb_node; 104 while (*new) { 105 rbnode_tmp = container_of(*new, struct regcache_rbtree_node, 106 node); 107 /* base and top registers of the current rbnode */ 108 regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, 109 &top_reg_tmp); 110 /* base register of the rbnode to be added */ 111 base_reg = rbnode->base_reg; 112 parent = *new; 113 /* if this register has already been inserted, just return */ 114 if (base_reg >= base_reg_tmp && 115 base_reg <= top_reg_tmp) 116 return 0; 117 else if (base_reg > top_reg_tmp) 118 new = &((*new)->rb_right); 119 else if (base_reg < base_reg_tmp) 120 new = &((*new)->rb_left); 121 } 122 123 /* insert the node into the rbtree */ 124 rb_link_node(&rbnode->node, parent, new); 125 rb_insert_color(&rbnode->node, root); 126 127 return 1; 128 } 129 130 #ifdef CONFIG_DEBUG_FS 131 static int rbtree_show(struct seq_file *s, void *ignored) 132 { 133 struct regmap *map = s->private; 134 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 135 struct regcache_rbtree_node *n; 136 struct rb_node *node; 137 unsigned int base, top; 138 int nodes = 0; 139 int registers = 0; 140 141 mutex_lock(&map->lock); 142 143 for (node = rb_first(&rbtree_ctx->root); node != NULL; 144 node = rb_next(node)) { 145 n = container_of(node, struct regcache_rbtree_node, node); 146 147 regcache_rbtree_get_base_top_reg(n, &base, &top); 148 seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1); 149 150 nodes++; 151 registers += top - base + 1; 152 } 153 154 seq_printf(s, "%d nodes, %d registers, average %d registers\n", 155 nodes, registers, registers / nodes); 156 157 mutex_unlock(&map->lock); 158 159 return 0; 160 } 161 162 static int rbtree_open(struct inode *inode, struct file *file) 163 { 164 return single_open(file, rbtree_show, inode->i_private); 165 } 166 167 static const struct file_operations rbtree_fops = { 168 .open = rbtree_open, 169 .read = seq_read, 170 .llseek = seq_lseek, 171 .release = single_release, 172 }; 173 174 static void rbtree_debugfs_init(struct regmap *map) 175 { 176 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); 177 } 178 #else 179 static void rbtree_debugfs_init(struct regmap *map) 180 { 181 } 182 #endif 183 184 static int regcache_rbtree_init(struct regmap *map) 185 { 186 struct regcache_rbtree_ctx *rbtree_ctx; 187 int i; 188 int ret; 189 190 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); 191 if (!map->cache) 192 return -ENOMEM; 193 194 rbtree_ctx = map->cache; 195 rbtree_ctx->root = RB_ROOT; 196 rbtree_ctx->cached_rbnode = NULL; 197 198 for (i = 0; i < map->num_reg_defaults; i++) { 199 ret = regcache_rbtree_write(map, 200 map->reg_defaults[i].reg, 201 map->reg_defaults[i].def); 202 if (ret) 203 goto err; 204 } 205 206 rbtree_debugfs_init(map); 207 208 return 0; 209 210 err: 211 regcache_rbtree_exit(map); 212 return ret; 213 } 214 215 static int regcache_rbtree_exit(struct regmap *map) 216 { 217 struct rb_node *next; 218 struct regcache_rbtree_ctx *rbtree_ctx; 219 struct regcache_rbtree_node *rbtree_node; 220 221 /* if we've already been called then just return */ 222 rbtree_ctx = map->cache; 223 if (!rbtree_ctx) 224 return 0; 225 226 /* free up the rbtree */ 227 next = rb_first(&rbtree_ctx->root); 228 while (next) { 229 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); 230 next = rb_next(&rbtree_node->node); 231 rb_erase(&rbtree_node->node, &rbtree_ctx->root); 232 kfree(rbtree_node->block); 233 kfree(rbtree_node); 234 } 235 236 /* release the resources */ 237 kfree(map->cache); 238 map->cache = NULL; 239 240 return 0; 241 } 242 243 static int regcache_rbtree_read(struct regmap *map, 244 unsigned int reg, unsigned int *value) 245 { 246 struct regcache_rbtree_node *rbnode; 247 unsigned int reg_tmp; 248 249 rbnode = regcache_rbtree_lookup(map, reg); 250 if (rbnode) { 251 reg_tmp = reg - rbnode->base_reg; 252 *value = regcache_rbtree_get_register(rbnode, reg_tmp, 253 map->cache_word_size); 254 } else { 255 return -ENOENT; 256 } 257 258 return 0; 259 } 260 261 262 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, 263 unsigned int pos, unsigned int reg, 264 unsigned int value, unsigned int word_size) 265 { 266 u8 *blk; 267 268 blk = krealloc(rbnode->block, 269 (rbnode->blklen + 1) * word_size, GFP_KERNEL); 270 if (!blk) 271 return -ENOMEM; 272 273 /* insert the register value in the correct place in the rbnode block */ 274 memmove(blk + (pos + 1) * word_size, 275 blk + pos * word_size, 276 (rbnode->blklen - pos) * word_size); 277 278 /* update the rbnode block, its size and the base register */ 279 rbnode->block = blk; 280 rbnode->blklen++; 281 if (!pos) 282 rbnode->base_reg = reg; 283 284 regcache_rbtree_set_register(rbnode, pos, value, word_size); 285 return 0; 286 } 287 288 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 289 unsigned int value) 290 { 291 struct regcache_rbtree_ctx *rbtree_ctx; 292 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 293 struct rb_node *node; 294 unsigned int val; 295 unsigned int reg_tmp; 296 unsigned int pos; 297 int i; 298 int ret; 299 300 rbtree_ctx = map->cache; 301 /* if we can't locate it in the cached rbnode we'll have 302 * to traverse the rbtree looking for it. 303 */ 304 rbnode = regcache_rbtree_lookup(map, reg); 305 if (rbnode) { 306 reg_tmp = reg - rbnode->base_reg; 307 val = regcache_rbtree_get_register(rbnode, reg_tmp, 308 map->cache_word_size); 309 if (val == value) 310 return 0; 311 regcache_rbtree_set_register(rbnode, reg_tmp, value, 312 map->cache_word_size); 313 } else { 314 /* look for an adjacent register to the one we are about to add */ 315 for (node = rb_first(&rbtree_ctx->root); node; 316 node = rb_next(node)) { 317 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); 318 for (i = 0; i < rbnode_tmp->blklen; i++) { 319 reg_tmp = rbnode_tmp->base_reg + i; 320 if (abs(reg_tmp - reg) != 1) 321 continue; 322 /* decide where in the block to place our register */ 323 if (reg_tmp + 1 == reg) 324 pos = i + 1; 325 else 326 pos = i; 327 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, 328 reg, value, 329 map->cache_word_size); 330 if (ret) 331 return ret; 332 rbtree_ctx->cached_rbnode = rbnode_tmp; 333 return 0; 334 } 335 } 336 /* we did not manage to find a place to insert it in an existing 337 * block so create a new rbnode with a single register in its block. 338 * This block will get populated further if any other adjacent 339 * registers get modified in the future. 340 */ 341 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); 342 if (!rbnode) 343 return -ENOMEM; 344 rbnode->blklen = 1; 345 rbnode->base_reg = reg; 346 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, 347 GFP_KERNEL); 348 if (!rbnode->block) { 349 kfree(rbnode); 350 return -ENOMEM; 351 } 352 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); 353 regcache_rbtree_insert(&rbtree_ctx->root, rbnode); 354 rbtree_ctx->cached_rbnode = rbnode; 355 } 356 357 return 0; 358 } 359 360 static int regcache_rbtree_sync(struct regmap *map) 361 { 362 struct regcache_rbtree_ctx *rbtree_ctx; 363 struct rb_node *node; 364 struct regcache_rbtree_node *rbnode; 365 unsigned int regtmp; 366 unsigned int val; 367 int ret; 368 int i; 369 370 rbtree_ctx = map->cache; 371 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 372 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 373 for (i = 0; i < rbnode->blklen; i++) { 374 regtmp = rbnode->base_reg + i; 375 val = regcache_rbtree_get_register(rbnode, i, 376 map->cache_word_size); 377 378 /* Is this the hardware default? If so skip. */ 379 ret = regcache_lookup_reg(map, i); 380 if (ret > 0 && val == map->reg_defaults[ret].def) 381 continue; 382 383 map->cache_bypass = 1; 384 ret = _regmap_write(map, regtmp, val); 385 map->cache_bypass = 0; 386 if (ret) 387 return ret; 388 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 389 regtmp, val); 390 } 391 } 392 393 return 0; 394 } 395 396 struct regcache_ops regcache_rbtree_ops = { 397 .type = REGCACHE_RBTREE, 398 .name = "rbtree", 399 .init = regcache_rbtree_init, 400 .exit = regcache_rbtree_exit, 401 .read = regcache_rbtree_read, 402 .write = regcache_rbtree_write, 403 .sync = regcache_rbtree_sync 404 }; 405