1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015 Atmel Corporation, 4 * Nicolas Ferre <nicolas.ferre@atmel.com> 5 * 6 * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON. 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/clk-provider.h> 11 #include <linux/clkdev.h> 12 #include <linux/clk/at91_pmc.h> 13 #include <linux/of.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/regmap.h> 16 17 #include "pmc.h" 18 19 #define GENERATED_MAX_DIV 255 20 21 struct clk_generated { 22 struct clk_hw hw; 23 struct regmap *regmap; 24 struct clk_range range; 25 spinlock_t *lock; 26 u32 *mux_table; 27 u32 id; 28 u32 gckdiv; 29 const struct clk_pcr_layout *layout; 30 struct at91_clk_pms pms; 31 u8 parent_id; 32 int chg_pid; 33 }; 34 35 #define to_clk_generated(hw) \ 36 container_of(hw, struct clk_generated, hw) 37 38 static int clk_generated_set(struct clk_generated *gck, int status) 39 { 40 unsigned long flags; 41 unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0; 42 43 spin_lock_irqsave(gck->lock, flags); 44 regmap_write(gck->regmap, gck->layout->offset, 45 (gck->id & gck->layout->pid_mask)); 46 regmap_update_bits(gck->regmap, gck->layout->offset, 47 AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask | 48 gck->layout->cmd | enable, 49 field_prep(gck->layout->gckcss_mask, gck->parent_id) | 50 gck->layout->cmd | 51 FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) | 52 enable); 53 spin_unlock_irqrestore(gck->lock, flags); 54 55 return 0; 56 } 57 58 static int clk_generated_enable(struct clk_hw *hw) 59 { 60 struct clk_generated *gck = to_clk_generated(hw); 61 62 pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", 63 __func__, gck->gckdiv, gck->parent_id); 64 65 clk_generated_set(gck, 1); 66 67 return 0; 68 } 69 70 static void clk_generated_disable(struct clk_hw *hw) 71 { 72 struct clk_generated *gck = to_clk_generated(hw); 73 unsigned long flags; 74 75 spin_lock_irqsave(gck->lock, flags); 76 regmap_write(gck->regmap, gck->layout->offset, 77 (gck->id & gck->layout->pid_mask)); 78 regmap_update_bits(gck->regmap, gck->layout->offset, 79 gck->layout->cmd | AT91_PMC_PCR_GCKEN, 80 gck->layout->cmd); 81 spin_unlock_irqrestore(gck->lock, flags); 82 } 83 84 static int clk_generated_is_enabled(struct clk_hw *hw) 85 { 86 struct clk_generated *gck = to_clk_generated(hw); 87 unsigned long flags; 88 unsigned int status; 89 90 spin_lock_irqsave(gck->lock, flags); 91 regmap_write(gck->regmap, gck->layout->offset, 92 (gck->id & gck->layout->pid_mask)); 93 regmap_read(gck->regmap, gck->layout->offset, &status); 94 spin_unlock_irqrestore(gck->lock, flags); 95 96 return !!(status & AT91_PMC_PCR_GCKEN); 97 } 98 99 static unsigned long 100 clk_generated_recalc_rate(struct clk_hw *hw, 101 unsigned long parent_rate) 102 { 103 struct clk_generated *gck = to_clk_generated(hw); 104 105 return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1); 106 } 107 108 static void clk_generated_best_diff(struct clk_rate_request *req, 109 struct clk_hw *parent, 110 unsigned long parent_rate, u32 div, 111 int *best_diff, long *best_rate) 112 { 113 unsigned long tmp_rate; 114 int tmp_diff; 115 116 if (!div) 117 tmp_rate = parent_rate; 118 else 119 tmp_rate = parent_rate / div; 120 121 if (tmp_rate < req->min_rate || tmp_rate > req->max_rate) 122 return; 123 124 tmp_diff = abs(req->rate - tmp_rate); 125 126 if (*best_diff < 0 || *best_diff >= tmp_diff) { 127 *best_rate = tmp_rate; 128 *best_diff = tmp_diff; 129 req->best_parent_rate = parent_rate; 130 req->best_parent_hw = parent; 131 } 132 } 133 134 static int clk_generated_determine_rate(struct clk_hw *hw, 135 struct clk_rate_request *req) 136 { 137 struct clk_generated *gck = to_clk_generated(hw); 138 struct clk_hw *parent = NULL; 139 long best_rate = -EINVAL; 140 unsigned long min_rate, parent_rate; 141 int best_diff = -1; 142 int i; 143 u32 div; 144 145 /* do not look for a rate that is outside of our range */ 146 if (gck->range.max && req->rate > gck->range.max) 147 req->rate = gck->range.max; 148 if (gck->range.min && req->rate < gck->range.min) 149 req->rate = gck->range.min; 150 151 for (i = 0; i < clk_hw_get_num_parents(hw); i++) { 152 if (gck->chg_pid == i) 153 continue; 154 155 parent = clk_hw_get_parent_by_index(hw, i); 156 if (!parent) 157 continue; 158 159 parent_rate = clk_hw_get_rate(parent); 160 min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1); 161 if (!parent_rate || 162 (gck->range.max && min_rate > gck->range.max)) 163 continue; 164 165 div = DIV_ROUND_CLOSEST(parent_rate, req->rate); 166 if (div > GENERATED_MAX_DIV + 1) 167 div = GENERATED_MAX_DIV + 1; 168 169 clk_generated_best_diff(req, parent, parent_rate, div, 170 &best_diff, &best_rate); 171 172 if (!best_diff) 173 break; 174 } 175 176 /* 177 * The audio_pll rate can be modified, unlike the five others clocks 178 * that should never be altered. 179 * The audio_pll can technically be used by multiple consumers. However, 180 * with the rate locking, the first consumer to enable to clock will be 181 * the one definitely setting the rate of the clock. 182 * Since audio IPs are most likely to request the same rate, we enforce 183 * that the only clks able to modify gck rate are those of audio IPs. 184 */ 185 186 if (gck->chg_pid < 0) 187 goto end; 188 189 parent = clk_hw_get_parent_by_index(hw, gck->chg_pid); 190 if (!parent) 191 goto end; 192 193 for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { 194 struct clk_rate_request req_parent; 195 196 clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate * div); 197 if (__clk_determine_rate(parent, &req_parent)) 198 continue; 199 clk_generated_best_diff(req, parent, req_parent.rate, div, 200 &best_diff, &best_rate); 201 202 if (!best_diff) 203 break; 204 } 205 206 end: 207 pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n", 208 __func__, best_rate, 209 __clk_get_name((req->best_parent_hw)->clk), 210 req->best_parent_rate); 211 212 if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max)) 213 return -EINVAL; 214 215 req->rate = best_rate; 216 return 0; 217 } 218 219 /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */ 220 static int clk_generated_set_parent(struct clk_hw *hw, u8 index) 221 { 222 struct clk_generated *gck = to_clk_generated(hw); 223 224 if (index >= clk_hw_get_num_parents(hw)) 225 return -EINVAL; 226 227 if (gck->mux_table) 228 gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index); 229 else 230 gck->parent_id = index; 231 232 return 0; 233 } 234 235 static u8 clk_generated_get_parent(struct clk_hw *hw) 236 { 237 struct clk_generated *gck = to_clk_generated(hw); 238 239 return gck->parent_id; 240 } 241 242 /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */ 243 static int clk_generated_set_rate(struct clk_hw *hw, 244 unsigned long rate, 245 unsigned long parent_rate) 246 { 247 struct clk_generated *gck = to_clk_generated(hw); 248 u32 div; 249 250 if (!rate) 251 return -EINVAL; 252 253 if (gck->range.max && rate > gck->range.max) 254 return -EINVAL; 255 256 div = DIV_ROUND_CLOSEST(parent_rate, rate); 257 if (div > GENERATED_MAX_DIV + 1 || !div) 258 return -EINVAL; 259 260 gck->gckdiv = div - 1; 261 return 0; 262 } 263 264 static int clk_generated_save_context(struct clk_hw *hw) 265 { 266 struct clk_generated *gck = to_clk_generated(hw); 267 268 gck->pms.status = clk_generated_is_enabled(&gck->hw); 269 270 return 0; 271 } 272 273 static void clk_generated_restore_context(struct clk_hw *hw) 274 { 275 struct clk_generated *gck = to_clk_generated(hw); 276 277 if (gck->pms.status) 278 clk_generated_set(gck, gck->pms.status); 279 } 280 281 static const struct clk_ops generated_ops = { 282 .enable = clk_generated_enable, 283 .disable = clk_generated_disable, 284 .is_enabled = clk_generated_is_enabled, 285 .recalc_rate = clk_generated_recalc_rate, 286 .determine_rate = clk_generated_determine_rate, 287 .get_parent = clk_generated_get_parent, 288 .set_parent = clk_generated_set_parent, 289 .set_rate = clk_generated_set_rate, 290 .save_context = clk_generated_save_context, 291 .restore_context = clk_generated_restore_context, 292 }; 293 294 /** 295 * clk_generated_startup - Initialize a given clock to its default parent and 296 * divisor parameter. 297 * 298 * @gck: Generated clock to set the startup parameters for. 299 * 300 * Take parameters from the hardware and update local clock configuration 301 * accordingly. 302 */ 303 static void clk_generated_startup(struct clk_generated *gck) 304 { 305 u32 tmp; 306 unsigned long flags; 307 308 spin_lock_irqsave(gck->lock, flags); 309 regmap_write(gck->regmap, gck->layout->offset, 310 (gck->id & gck->layout->pid_mask)); 311 regmap_read(gck->regmap, gck->layout->offset, &tmp); 312 spin_unlock_irqrestore(gck->lock, flags); 313 314 gck->parent_id = field_get(gck->layout->gckcss_mask, tmp); 315 gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp); 316 } 317 318 struct clk_hw * __init 319 at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, 320 const struct clk_pcr_layout *layout, 321 const char *name, const char **parent_names, 322 u32 *mux_table, u8 num_parents, u8 id, 323 const struct clk_range *range, 324 int chg_pid) 325 { 326 struct clk_generated *gck; 327 struct clk_init_data init; 328 struct clk_hw *hw; 329 int ret; 330 331 gck = kzalloc(sizeof(*gck), GFP_KERNEL); 332 if (!gck) 333 return ERR_PTR(-ENOMEM); 334 335 init.name = name; 336 init.ops = &generated_ops; 337 init.parent_names = parent_names; 338 init.num_parents = num_parents; 339 init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; 340 if (chg_pid >= 0) 341 init.flags |= CLK_SET_RATE_PARENT; 342 343 gck->id = id; 344 gck->hw.init = &init; 345 gck->regmap = regmap; 346 gck->lock = lock; 347 gck->range = *range; 348 gck->chg_pid = chg_pid; 349 gck->layout = layout; 350 gck->mux_table = mux_table; 351 352 clk_generated_startup(gck); 353 hw = &gck->hw; 354 ret = clk_hw_register(NULL, &gck->hw); 355 if (ret) { 356 kfree(gck); 357 hw = ERR_PTR(ret); 358 } 359 360 return hw; 361 } 362