1 #include <linux/clk.h> 2 #include <linux/compiler.h> 3 #include <linux/slab.h> 4 #include <linux/io.h> 5 #include <asm/clock.h> 6 7 static int sh_clk_mstp32_enable(struct clk *clk) 8 { 9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit), 10 clk->enable_reg); 11 return 0; 12 } 13 14 static void sh_clk_mstp32_disable(struct clk *clk) 15 { 16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit), 17 clk->enable_reg); 18 } 19 20 static struct clk_ops sh_clk_mstp32_clk_ops = { 21 .enable = sh_clk_mstp32_enable, 22 .disable = sh_clk_mstp32_disable, 23 .recalc = followparent_recalc, 24 }; 25 26 int __init sh_clk_mstp32_register(struct clk *clks, int nr) 27 { 28 struct clk *clkp; 29 int ret = 0; 30 int k; 31 32 for (k = 0; !ret && (k < nr); k++) { 33 clkp = clks + k; 34 clkp->ops = &sh_clk_mstp32_clk_ops; 35 ret |= clk_register(clkp); 36 } 37 38 return ret; 39 } 40 41 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate) 42 { 43 return clk_rate_table_round(clk, clk->freq_table, rate); 44 } 45 46 static int sh_clk_div6_divisors[64] = { 47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 51 }; 52 53 static struct clk_div_mult_table sh_clk_div6_table = { 54 .divisors = sh_clk_div6_divisors, 55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors), 56 }; 57 58 static unsigned long sh_clk_div6_recalc(struct clk *clk) 59 { 60 struct clk_div_mult_table *table = &sh_clk_div6_table; 61 unsigned int idx; 62 63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, 64 table, NULL); 65 66 idx = __raw_readl(clk->enable_reg) & 0x003f; 67 68 return clk->freq_table[idx].frequency; 69 } 70 71 static int sh_clk_div6_set_rate(struct clk *clk, 72 unsigned long rate, int algo_id) 73 { 74 unsigned long value; 75 int idx; 76 77 idx = clk_rate_table_find(clk, clk->freq_table, rate); 78 if (idx < 0) 79 return idx; 80 81 value = __raw_readl(clk->enable_reg); 82 value &= ~0x3f; 83 value |= idx; 84 __raw_writel(value, clk->enable_reg); 85 return 0; 86 } 87 88 static int sh_clk_div6_enable(struct clk *clk) 89 { 90 unsigned long value; 91 int ret; 92 93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0); 94 if (ret == 0) { 95 value = __raw_readl(clk->enable_reg); 96 value &= ~0x100; /* clear stop bit to enable clock */ 97 __raw_writel(value, clk->enable_reg); 98 } 99 return ret; 100 } 101 102 static void sh_clk_div6_disable(struct clk *clk) 103 { 104 unsigned long value; 105 106 value = __raw_readl(clk->enable_reg); 107 value |= 0x100; /* stop clock */ 108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */ 109 __raw_writel(value, clk->enable_reg); 110 } 111 112 static struct clk_ops sh_clk_div6_clk_ops = { 113 .recalc = sh_clk_div6_recalc, 114 .round_rate = sh_clk_div_round_rate, 115 .set_rate = sh_clk_div6_set_rate, 116 .enable = sh_clk_div6_enable, 117 .disable = sh_clk_div6_disable, 118 }; 119 120 int __init sh_clk_div6_register(struct clk *clks, int nr) 121 { 122 struct clk *clkp; 123 void *freq_table; 124 int nr_divs = sh_clk_div6_table.nr_divisors; 125 int freq_table_size = sizeof(struct cpufreq_frequency_table); 126 int ret = 0; 127 int k; 128 129 freq_table_size *= (nr_divs + 1); 130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); 131 if (!freq_table) { 132 pr_err("sh_clk_div6_register: unable to alloc memory\n"); 133 return -ENOMEM; 134 } 135 136 for (k = 0; !ret && (k < nr); k++) { 137 clkp = clks + k; 138 139 clkp->ops = &sh_clk_div6_clk_ops; 140 clkp->id = -1; 141 clkp->freq_table = freq_table + (k * freq_table_size); 142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; 143 144 ret = clk_register(clkp); 145 } 146 147 return ret; 148 } 149 150 static unsigned long sh_clk_div4_recalc(struct clk *clk) 151 { 152 struct clk_div4_table *d4t = clk->priv; 153 struct clk_div_mult_table *table = d4t->div_mult_table; 154 unsigned int idx; 155 156 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, 157 table, &clk->arch_flags); 158 159 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f; 160 161 return clk->freq_table[idx].frequency; 162 } 163 164 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) 165 { 166 struct clk_div4_table *d4t = clk->priv; 167 struct clk_div_mult_table *table = d4t->div_mult_table; 168 u32 value; 169 int ret; 170 171 if (!strcmp("pll_clk", parent->name)) 172 value = __raw_readl(clk->enable_reg) & ~(1 << 7); 173 else 174 value = __raw_readl(clk->enable_reg) | (1 << 7); 175 176 ret = clk_reparent(clk, parent); 177 if (ret < 0) 178 return ret; 179 180 __raw_writel(value, clk->enable_reg); 181 182 /* Rebiuld the frequency table */ 183 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, 184 table, &clk->arch_flags); 185 186 return 0; 187 } 188 189 static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) 190 { 191 struct clk_div4_table *d4t = clk->priv; 192 unsigned long value; 193 int idx = clk_rate_table_find(clk, clk->freq_table, rate); 194 if (idx < 0) 195 return idx; 196 197 value = __raw_readl(clk->enable_reg); 198 value &= ~(0xf << clk->enable_bit); 199 value |= (idx << clk->enable_bit); 200 __raw_writel(value, clk->enable_reg); 201 202 if (d4t->kick) 203 d4t->kick(clk); 204 205 return 0; 206 } 207 208 static int sh_clk_div4_enable(struct clk *clk) 209 { 210 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg); 211 return 0; 212 } 213 214 static void sh_clk_div4_disable(struct clk *clk) 215 { 216 __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg); 217 } 218 219 static struct clk_ops sh_clk_div4_clk_ops = { 220 .recalc = sh_clk_div4_recalc, 221 .set_rate = sh_clk_div4_set_rate, 222 .round_rate = sh_clk_div_round_rate, 223 }; 224 225 static struct clk_ops sh_clk_div4_enable_clk_ops = { 226 .recalc = sh_clk_div4_recalc, 227 .set_rate = sh_clk_div4_set_rate, 228 .round_rate = sh_clk_div_round_rate, 229 .enable = sh_clk_div4_enable, 230 .disable = sh_clk_div4_disable, 231 }; 232 233 static struct clk_ops sh_clk_div4_reparent_clk_ops = { 234 .recalc = sh_clk_div4_recalc, 235 .set_rate = sh_clk_div4_set_rate, 236 .round_rate = sh_clk_div_round_rate, 237 .enable = sh_clk_div4_enable, 238 .disable = sh_clk_div4_disable, 239 .set_parent = sh_clk_div4_set_parent, 240 }; 241 242 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr, 243 struct clk_div4_table *table, struct clk_ops *ops) 244 { 245 struct clk *clkp; 246 void *freq_table; 247 int nr_divs = table->div_mult_table->nr_divisors; 248 int freq_table_size = sizeof(struct cpufreq_frequency_table); 249 int ret = 0; 250 int k; 251 252 freq_table_size *= (nr_divs + 1); 253 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL); 254 if (!freq_table) { 255 pr_err("sh_clk_div4_register: unable to alloc memory\n"); 256 return -ENOMEM; 257 } 258 259 for (k = 0; !ret && (k < nr); k++) { 260 clkp = clks + k; 261 262 clkp->ops = ops; 263 clkp->id = -1; 264 clkp->priv = table; 265 266 clkp->freq_table = freq_table + (k * freq_table_size); 267 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; 268 269 ret = clk_register(clkp); 270 } 271 272 return ret; 273 } 274 275 int __init sh_clk_div4_register(struct clk *clks, int nr, 276 struct clk_div4_table *table) 277 { 278 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops); 279 } 280 281 int __init sh_clk_div4_enable_register(struct clk *clks, int nr, 282 struct clk_div4_table *table) 283 { 284 return sh_clk_div4_register_ops(clks, nr, table, 285 &sh_clk_div4_enable_clk_ops); 286 } 287 288 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr, 289 struct clk_div4_table *table) 290 { 291 return sh_clk_div4_register_ops(clks, nr, table, 292 &sh_clk_div4_reparent_clk_ops); 293 } 294 295 #ifdef CONFIG_SH_CLK_CPG_LEGACY 296 static struct clk master_clk = { 297 .name = "master_clk", 298 .flags = CLK_ENABLE_ON_INIT, 299 .rate = CONFIG_SH_PCLK_FREQ, 300 }; 301 302 static struct clk peripheral_clk = { 303 .name = "peripheral_clk", 304 .parent = &master_clk, 305 .flags = CLK_ENABLE_ON_INIT, 306 }; 307 308 static struct clk bus_clk = { 309 .name = "bus_clk", 310 .parent = &master_clk, 311 .flags = CLK_ENABLE_ON_INIT, 312 }; 313 314 static struct clk cpu_clk = { 315 .name = "cpu_clk", 316 .parent = &master_clk, 317 .flags = CLK_ENABLE_ON_INIT, 318 }; 319 320 /* 321 * The ordering of these clocks matters, do not change it. 322 */ 323 static struct clk *onchip_clocks[] = { 324 &master_clk, 325 &peripheral_clk, 326 &bus_clk, 327 &cpu_clk, 328 }; 329 330 int __init __deprecated cpg_clk_init(void) 331 { 332 int i, ret = 0; 333 334 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { 335 struct clk *clk = onchip_clocks[i]; 336 arch_init_clk_ops(&clk->ops, i); 337 if (clk->ops) 338 ret |= clk_register(clk); 339 } 340 341 return ret; 342 } 343 344 /* 345 * Placeholder for compatability, until the lazy CPUs do this 346 * on their own. 347 */ 348 int __init __weak arch_clk_init(void) 349 { 350 return cpg_clk_init(); 351 } 352 #endif /* CONFIG_SH_CPG_CLK_LEGACY */ 353