1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Zynq UltraScale+ MPSoC Divider support 4 * 5 * Copyright (C) 2016-2019 Xilinx 6 * 7 * Adjustable divider clock implementation 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/clk-provider.h> 12 #include <linux/slab.h> 13 #include "clk-zynqmp.h" 14 15 /* 16 * DOC: basic adjustable divider clock that cannot gate 17 * 18 * Traits of this clock: 19 * prepare - clk_prepare only ensures that parents are prepared 20 * enable - clk_enable only ensures that parents are enabled 21 * rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor) 22 * parent - fixed parent. No clk_set_parent support 23 */ 24 25 #define to_zynqmp_clk_divider(_hw) \ 26 container_of(_hw, struct zynqmp_clk_divider, hw) 27 28 #define CLK_FRAC BIT(13) /* has a fractional parent */ 29 #define CUSTOM_FLAG_CLK_FRAC BIT(0) /* has a fractional parent in custom type flag */ 30 31 /** 32 * struct zynqmp_clk_divider - adjustable divider clock 33 * @hw: handle between common and hardware-specific interfaces 34 * @flags: Hardware specific flags 35 * @is_frac: The divider is a fractional divider 36 * @clk_id: Id of clock 37 * @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2) 38 * @max_div: maximum supported divisor (fetched from firmware) 39 */ 40 struct zynqmp_clk_divider { 41 struct clk_hw hw; 42 u8 flags; 43 bool is_frac; 44 u32 clk_id; 45 u32 div_type; 46 u16 max_div; 47 }; 48 49 static inline int zynqmp_divider_get_val(unsigned long parent_rate, 50 unsigned long rate, u16 flags) 51 { 52 int up, down; 53 unsigned long up_rate, down_rate; 54 55 if (flags & CLK_DIVIDER_POWER_OF_TWO) { 56 up = DIV_ROUND_UP_ULL((u64)parent_rate, rate); 57 down = DIV_ROUND_DOWN_ULL((u64)parent_rate, rate); 58 59 up = __roundup_pow_of_two(up); 60 down = __rounddown_pow_of_two(down); 61 62 up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up); 63 down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down); 64 65 return (rate - up_rate) <= (down_rate - rate) ? up : down; 66 67 } else { 68 return DIV_ROUND_CLOSEST(parent_rate, rate); 69 } 70 } 71 72 /** 73 * zynqmp_clk_divider_recalc_rate() - Recalc rate of divider clock 74 * @hw: handle between common and hardware-specific interfaces 75 * @parent_rate: rate of parent clock 76 * 77 * Return: 0 on success else error+reason 78 */ 79 static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw, 80 unsigned long parent_rate) 81 { 82 struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw); 83 const char *clk_name = clk_hw_get_name(hw); 84 u32 clk_id = divider->clk_id; 85 u32 div_type = divider->div_type; 86 u32 div, value; 87 int ret; 88 89 ret = zynqmp_pm_clock_getdivider(clk_id, &div); 90 91 if (ret) 92 pr_debug("%s() get divider failed for %s, ret = %d\n", 93 __func__, clk_name, ret); 94 95 if (div_type == TYPE_DIV1) 96 value = div & 0xFFFF; 97 else 98 value = div >> 16; 99 100 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 101 value = 1 << value; 102 103 if (!value) { 104 WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), 105 "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", 106 clk_name); 107 return parent_rate; 108 } 109 110 return DIV_ROUND_UP_ULL(parent_rate, value); 111 } 112 113 /** 114 * zynqmp_clk_divider_round_rate() - Round rate of divider clock 115 * @hw: handle between common and hardware-specific interfaces 116 * @rate: rate of clock to be set 117 * @prate: rate of parent clock 118 * 119 * Return: 0 on success else error+reason 120 */ 121 static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw, 122 struct clk_rate_request *req) 123 { 124 struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw); 125 const char *clk_name = clk_hw_get_name(hw); 126 u32 clk_id = divider->clk_id; 127 u32 div_type = divider->div_type; 128 u32 bestdiv; 129 int ret; 130 u8 width; 131 132 /* if read only, just return current value */ 133 if (divider->flags & CLK_DIVIDER_READ_ONLY) { 134 ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv); 135 136 if (ret) 137 pr_debug("%s() get divider failed for %s, ret = %d\n", 138 __func__, clk_name, ret); 139 if (div_type == TYPE_DIV1) 140 bestdiv = bestdiv & 0xFFFF; 141 else 142 bestdiv = bestdiv >> 16; 143 144 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 145 bestdiv = 1 << bestdiv; 146 147 req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv); 148 149 return 0; 150 } 151 152 width = fls(divider->max_div); 153 154 req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, 155 NULL, width, divider->flags); 156 157 if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && 158 (req->rate % req->best_parent_rate)) 159 req->best_parent_rate = req->rate; 160 161 return 0; 162 } 163 164 /** 165 * zynqmp_clk_divider_set_rate() - Set rate of divider clock 166 * @hw: handle between common and hardware-specific interfaces 167 * @rate: rate of clock to be set 168 * @parent_rate: rate of parent clock 169 * 170 * Return: 0 on success else error+reason 171 */ 172 static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, 173 unsigned long parent_rate) 174 { 175 struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw); 176 const char *clk_name = clk_hw_get_name(hw); 177 u32 clk_id = divider->clk_id; 178 u32 div_type = divider->div_type; 179 u32 value, div; 180 int ret; 181 182 value = zynqmp_divider_get_val(parent_rate, rate, divider->flags); 183 if (div_type == TYPE_DIV1) { 184 div = value & 0xFFFF; 185 div |= 0xffff << 16; 186 } else { 187 div = 0xffff; 188 div |= value << 16; 189 } 190 191 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) 192 div = __ffs(div); 193 194 ret = zynqmp_pm_clock_setdivider(clk_id, div); 195 196 if (ret) 197 pr_debug("%s() set divider failed for %s, ret = %d\n", 198 __func__, clk_name, ret); 199 200 return ret; 201 } 202 203 static const struct clk_ops zynqmp_clk_divider_ops = { 204 .recalc_rate = zynqmp_clk_divider_recalc_rate, 205 .determine_rate = zynqmp_clk_divider_determine_rate, 206 .set_rate = zynqmp_clk_divider_set_rate, 207 }; 208 209 static const struct clk_ops zynqmp_clk_divider_ro_ops = { 210 .recalc_rate = zynqmp_clk_divider_recalc_rate, 211 .determine_rate = zynqmp_clk_divider_determine_rate, 212 }; 213 214 /** 215 * zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware. 216 * @clk_id: Id of clock 217 * @type: Divider type 218 * 219 * Return: Maximum divisor of a clock if query data is successful 220 * U16_MAX in case of query data is not success 221 */ 222 static u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type) 223 { 224 struct zynqmp_pm_query_data qdata = {0}; 225 u32 ret_payload[PAYLOAD_ARG_CNT]; 226 int ret; 227 228 qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR; 229 qdata.arg1 = clk_id; 230 qdata.arg2 = type; 231 ret = zynqmp_pm_query_data(qdata, ret_payload); 232 /* 233 * To maintain backward compatibility return maximum possible value 234 * (0xFFFF) if query for max divisor is not successful. 235 */ 236 if (ret) 237 return U16_MAX; 238 239 return ret_payload[1]; 240 } 241 242 static inline unsigned long zynqmp_clk_map_divider_ccf_flags( 243 const u32 zynqmp_type_flag) 244 { 245 unsigned long ccf_flag = 0; 246 247 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ONE_BASED) 248 ccf_flag |= CLK_DIVIDER_ONE_BASED; 249 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_POWER_OF_TWO) 250 ccf_flag |= CLK_DIVIDER_POWER_OF_TWO; 251 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ALLOW_ZERO) 252 ccf_flag |= CLK_DIVIDER_ALLOW_ZERO; 253 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_POWER_OF_TWO) 254 ccf_flag |= CLK_DIVIDER_HIWORD_MASK; 255 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ROUND_CLOSEST) 256 ccf_flag |= CLK_DIVIDER_ROUND_CLOSEST; 257 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_READ_ONLY) 258 ccf_flag |= CLK_DIVIDER_READ_ONLY; 259 if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_MAX_AT_ZERO) 260 ccf_flag |= CLK_DIVIDER_MAX_AT_ZERO; 261 262 return ccf_flag; 263 } 264 265 /** 266 * zynqmp_clk_register_divider() - Register a divider clock 267 * @name: Name of this clock 268 * @clk_id: Id of clock 269 * @parents: Name of this clock's parents 270 * @num_parents: Number of parents 271 * @nodes: Clock topology node 272 * 273 * Return: clock hardware to registered clock divider 274 */ 275 struct clk_hw *zynqmp_clk_register_divider(const char *name, 276 u32 clk_id, 277 const char * const *parents, 278 u8 num_parents, 279 const struct clock_topology *nodes) 280 { 281 struct zynqmp_clk_divider *div; 282 struct clk_hw *hw; 283 struct clk_init_data init; 284 int ret; 285 286 /* allocate the divider */ 287 div = kzalloc(sizeof(*div), GFP_KERNEL); 288 if (!div) 289 return ERR_PTR(-ENOMEM); 290 291 init.name = name; 292 if (nodes->type_flag & CLK_DIVIDER_READ_ONLY) 293 init.ops = &zynqmp_clk_divider_ro_ops; 294 else 295 init.ops = &zynqmp_clk_divider_ops; 296 297 init.flags = zynqmp_clk_map_common_ccf_flags(nodes->flag); 298 299 init.parent_names = parents; 300 init.num_parents = 1; 301 302 /* struct clk_divider assignments */ 303 div->is_frac = !!((nodes->flag & CLK_FRAC) | 304 (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC)); 305 div->flags = zynqmp_clk_map_divider_ccf_flags(nodes->type_flag); 306 div->hw.init = &init; 307 div->clk_id = clk_id; 308 div->div_type = nodes->type; 309 310 /* 311 * To achieve best possible rate, maximum limit of divider is required 312 * while computation. 313 */ 314 div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type); 315 316 hw = &div->hw; 317 ret = clk_hw_register(NULL, hw); 318 if (ret) { 319 kfree(div); 320 hw = ERR_PTR(ret); 321 } 322 323 return hw; 324 } 325