1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller Platform bus based glue driver
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 *
6 * Authors:
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/of.h>
17
18 #include <ufs/ufshcd.h>
19 #include "ufshcd-pltfrm.h"
20 #include <ufs/unipro.h>
21
22 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
23
ufshcd_parse_clock_info(struct ufs_hba * hba)24 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
25 {
26 int ret = 0;
27 int cnt;
28 int i;
29 struct device *dev = hba->dev;
30 struct device_node *np = dev->of_node;
31 const char *name;
32 u32 *clkfreq = NULL;
33 struct ufs_clk_info *clki;
34 ssize_t sz = 0;
35
36 if (!np)
37 goto out;
38
39 cnt = of_property_count_strings(np, "clock-names");
40 if (!cnt || (cnt == -EINVAL)) {
41 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
42 __func__);
43 } else if (cnt < 0) {
44 dev_err(dev, "%s: count clock strings failed, err %d\n",
45 __func__, cnt);
46 ret = cnt;
47 }
48
49 if (cnt <= 0)
50 goto out;
51
52 sz = of_property_count_u32_elems(np, "freq-table-hz");
53 if (sz <= 0) {
54 dev_info(dev, "freq-table-hz property not specified\n");
55 goto out;
56 }
57
58 if (sz != 2 * cnt) {
59 dev_err(dev, "%s len mismatch\n", "freq-table-hz");
60 ret = -EINVAL;
61 goto out;
62 }
63
64 clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
65 GFP_KERNEL);
66 if (!clkfreq) {
67 ret = -ENOMEM;
68 goto out;
69 }
70
71 ret = of_property_read_u32_array(np, "freq-table-hz",
72 clkfreq, sz);
73 if (ret && (ret != -EINVAL)) {
74 dev_err(dev, "%s: error reading array %d\n",
75 "freq-table-hz", ret);
76 return ret;
77 }
78
79 for (i = 0; i < sz; i += 2) {
80 ret = of_property_read_string_index(np, "clock-names", i/2,
81 &name);
82 if (ret)
83 goto out;
84
85 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
86 if (!clki) {
87 ret = -ENOMEM;
88 goto out;
89 }
90
91 clki->min_freq = clkfreq[i];
92 clki->max_freq = clkfreq[i+1];
93 clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
94 if (!clki->name) {
95 ret = -ENOMEM;
96 goto out;
97 }
98
99 if (!strcmp(name, "ref_clk"))
100 clki->keep_link_active = true;
101 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
102 clki->min_freq, clki->max_freq, clki->name);
103 list_add_tail(&clki->list, &hba->clk_list_head);
104 }
105 out:
106 return ret;
107 }
108
phandle_exists(const struct device_node * np,const char * phandle_name,int index)109 static bool phandle_exists(const struct device_node *np,
110 const char *phandle_name, int index)
111 {
112 struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
113
114 if (parse_np)
115 of_node_put(parse_np);
116
117 return parse_np != NULL;
118 }
119
120 #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg,bool skip_current)121 int ufshcd_populate_vreg(struct device *dev, const char *name,
122 struct ufs_vreg **out_vreg, bool skip_current)
123 {
124 char prop_name[MAX_PROP_SIZE];
125 struct ufs_vreg *vreg = NULL;
126 struct device_node *np = dev->of_node;
127
128 if (!np) {
129 dev_err(dev, "%s: non DT initialization\n", __func__);
130 goto out;
131 }
132
133 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
134 if (!phandle_exists(np, prop_name, 0)) {
135 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
136 __func__, prop_name);
137 goto out;
138 }
139
140 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
141 if (!vreg)
142 return -ENOMEM;
143
144 vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
145 if (!vreg->name)
146 return -ENOMEM;
147
148 if (skip_current) {
149 vreg->max_uA = 0;
150 goto out;
151 }
152
153 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
154 if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
155 dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
156 vreg->max_uA = 0;
157 }
158 out:
159 *out_vreg = vreg;
160 return 0;
161 }
162 EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
163
164 /**
165 * ufshcd_parse_regulator_info - get regulator info from device tree
166 * @hba: per adapter instance
167 *
168 * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
169 * If any of the supplies are not defined it is assumed that they are always-on
170 * and hence return zero. If the property is defined but parsing is failed
171 * then return corresponding error.
172 *
173 * Return: 0 upon success; < 0 upon failure.
174 */
ufshcd_parse_regulator_info(struct ufs_hba * hba)175 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
176 {
177 int err;
178 struct device *dev = hba->dev;
179 struct ufs_vreg_info *info = &hba->vreg_info;
180
181 err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba, true);
182 if (err)
183 goto out;
184
185 err = ufshcd_populate_vreg(dev, "vcc", &info->vcc, false);
186 if (err)
187 goto out;
188
189 err = ufshcd_populate_vreg(dev, "vccq", &info->vccq, false);
190 if (err)
191 goto out;
192
193 err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2, false);
194 out:
195 return err;
196 }
197
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)198 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
199 {
200 struct device *dev = hba->dev;
201 int ret;
202
203 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
204 &hba->lanes_per_direction);
205 if (ret) {
206 dev_dbg(hba->dev,
207 "%s: failed to read lanes-per-direction, ret=%d\n",
208 __func__, ret);
209 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
210 }
211 }
212
213 /**
214 * ufshcd_parse_clock_min_max_freq - Parse MIN and MAX clocks freq
215 * @hba: per adapter instance
216 *
217 * This function parses MIN and MAX frequencies of all clocks required
218 * by the host drivers.
219 *
220 * Returns 0 for success and non-zero for failure
221 */
ufshcd_parse_clock_min_max_freq(struct ufs_hba * hba)222 static int ufshcd_parse_clock_min_max_freq(struct ufs_hba *hba)
223 {
224 struct list_head *head = &hba->clk_list_head;
225 struct ufs_clk_info *clki;
226 struct dev_pm_opp *opp;
227 unsigned long freq;
228 u8 idx = 0;
229
230 list_for_each_entry(clki, head, list) {
231 if (!clki->name)
232 continue;
233
234 clki->clk = devm_clk_get(hba->dev, clki->name);
235 if (IS_ERR(clki->clk))
236 continue;
237
238 /* Find Max Freq */
239 freq = ULONG_MAX;
240 opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, &freq, idx);
241 if (IS_ERR(opp)) {
242 dev_err(hba->dev, "Failed to find OPP for MAX frequency\n");
243 return PTR_ERR(opp);
244 }
245 clki->max_freq = dev_pm_opp_get_freq_indexed(opp, idx);
246 dev_pm_opp_put(opp);
247
248 /* Find Min Freq */
249 freq = 0;
250 opp = dev_pm_opp_find_freq_ceil_indexed(hba->dev, &freq, idx);
251 if (IS_ERR(opp)) {
252 dev_err(hba->dev, "Failed to find OPP for MIN frequency\n");
253 return PTR_ERR(opp);
254 }
255 clki->min_freq = dev_pm_opp_get_freq_indexed(opp, idx++);
256 dev_pm_opp_put(opp);
257 }
258
259 return 0;
260 }
261
ufshcd_parse_operating_points(struct ufs_hba * hba)262 static int ufshcd_parse_operating_points(struct ufs_hba *hba)
263 {
264 struct device *dev = hba->dev;
265 struct device_node *np = dev->of_node;
266 struct dev_pm_opp_config config = {};
267 struct ufs_clk_info *clki;
268 const char **clk_names;
269 int cnt, i, ret;
270
271 if (!of_property_present(np, "operating-points-v2"))
272 return 0;
273
274 if (of_property_present(np, "freq-table-hz")) {
275 dev_err(dev, "%s: operating-points and freq-table-hz are incompatible\n",
276 __func__);
277 return -EINVAL;
278 }
279
280 cnt = of_property_count_strings(np, "clock-names");
281 if (cnt <= 0) {
282 dev_err(dev, "%s: Missing clock-names\n", __func__);
283 return -ENODEV;
284 }
285
286 /* OPP expects clk_names to be NULL terminated */
287 clk_names = devm_kcalloc(dev, cnt + 1, sizeof(*clk_names), GFP_KERNEL);
288 if (!clk_names)
289 return -ENOMEM;
290
291 /*
292 * We still need to get reference to all clocks as the UFS core uses
293 * them separately.
294 */
295 for (i = 0; i < cnt; i++) {
296 ret = of_property_read_string_index(np, "clock-names", i,
297 &clk_names[i]);
298 if (ret)
299 return ret;
300
301 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
302 if (!clki)
303 return -ENOMEM;
304
305 clki->name = devm_kstrdup(dev, clk_names[i], GFP_KERNEL);
306 if (!clki->name)
307 return -ENOMEM;
308
309 if (!strcmp(clk_names[i], "ref_clk"))
310 clki->keep_link_active = true;
311
312 list_add_tail(&clki->list, &hba->clk_list_head);
313 }
314
315 config.clk_names = clk_names,
316 config.config_clks = ufshcd_opp_config_clks;
317
318 ret = devm_pm_opp_set_config(dev, &config);
319 if (ret)
320 return ret;
321
322 ret = devm_pm_opp_of_add_table(dev);
323 if (ret) {
324 dev_err(dev, "Failed to add OPP table: %d\n", ret);
325 return ret;
326 }
327
328 ret = ufshcd_parse_clock_min_max_freq(hba);
329 if (ret)
330 return ret;
331
332 hba->use_pm_opp = true;
333
334 return 0;
335 }
336
337 /**
338 * ufshcd_negotiate_pwr_params - find power mode settings that are supported by
339 * both the controller and the device
340 * @host_params: pointer to host parameters
341 * @dev_max: pointer to device attributes
342 * @agreed_pwr: returned agreed attributes
343 *
344 * Return: 0 on success, non-zero value on failure.
345 */
ufshcd_negotiate_pwr_params(const struct ufs_host_params * host_params,const struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)346 int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
347 const struct ufs_pa_layer_attr *dev_max,
348 struct ufs_pa_layer_attr *agreed_pwr)
349 {
350 int min_host_gear;
351 int min_dev_gear;
352 bool is_dev_sup_hs = false;
353 bool is_host_max_hs = false;
354
355 if (dev_max->pwr_rx == FAST_MODE)
356 is_dev_sup_hs = true;
357
358 if (host_params->desired_working_mode == UFS_HS_MODE) {
359 is_host_max_hs = true;
360 min_host_gear = min_t(u32, host_params->hs_rx_gear,
361 host_params->hs_tx_gear);
362 } else {
363 min_host_gear = min_t(u32, host_params->pwm_rx_gear,
364 host_params->pwm_tx_gear);
365 }
366
367 /*
368 * device doesn't support HS but host_params->desired_working_mode is HS,
369 * thus device and host_params don't agree
370 */
371 if (!is_dev_sup_hs && is_host_max_hs) {
372 pr_info("%s: device doesn't support HS\n",
373 __func__);
374 return -ENOTSUPP;
375 } else if (is_dev_sup_hs && is_host_max_hs) {
376 /*
377 * since device supports HS, it supports FAST_MODE.
378 * since host_params->desired_working_mode is also HS
379 * then final decision (FAST/FASTAUTO) is done according
380 * to pltfrm_params as it is the restricting factor
381 */
382 agreed_pwr->pwr_rx = host_params->rx_pwr_hs;
383 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
384 } else {
385 /*
386 * here host_params->desired_working_mode is PWM.
387 * it doesn't matter whether device supports HS or PWM,
388 * in both cases host_params->desired_working_mode will
389 * determine the mode
390 */
391 agreed_pwr->pwr_rx = host_params->rx_pwr_pwm;
392 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
393 }
394
395 /*
396 * we would like tx to work in the minimum number of lanes
397 * between device capability and vendor preferences.
398 * the same decision will be made for rx
399 */
400 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
401 host_params->tx_lanes);
402 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
403 host_params->rx_lanes);
404
405 /* device maximum gear is the minimum between device rx and tx gears */
406 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
407
408 /*
409 * if both device capabilities and vendor pre-defined preferences are
410 * both HS or both PWM then set the minimum gear to be the chosen
411 * working gear.
412 * if one is PWM and one is HS then the one that is PWM get to decide
413 * what is the gear, as it is the one that also decided previously what
414 * pwr the device will be configured to.
415 */
416 if ((is_dev_sup_hs && is_host_max_hs) ||
417 (!is_dev_sup_hs && !is_host_max_hs)) {
418 agreed_pwr->gear_rx =
419 min_t(u32, min_dev_gear, min_host_gear);
420 } else if (!is_dev_sup_hs) {
421 agreed_pwr->gear_rx = min_dev_gear;
422 } else {
423 agreed_pwr->gear_rx = min_host_gear;
424 }
425 agreed_pwr->gear_tx = agreed_pwr->gear_rx;
426
427 agreed_pwr->hs_rate = host_params->hs_rate;
428
429 return 0;
430 }
431 EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
432
ufshcd_init_host_params(struct ufs_host_params * host_params)433 void ufshcd_init_host_params(struct ufs_host_params *host_params)
434 {
435 *host_params = (struct ufs_host_params){
436 .tx_lanes = UFS_LANE_2,
437 .rx_lanes = UFS_LANE_2,
438 .hs_rx_gear = UFS_HS_G3,
439 .hs_tx_gear = UFS_HS_G3,
440 .pwm_rx_gear = UFS_PWM_G4,
441 .pwm_tx_gear = UFS_PWM_G4,
442 .rx_pwr_pwm = SLOW_MODE,
443 .tx_pwr_pwm = SLOW_MODE,
444 .rx_pwr_hs = FAST_MODE,
445 .tx_pwr_hs = FAST_MODE,
446 .hs_rate = PA_HS_MODE_B,
447 .desired_working_mode = UFS_HS_MODE,
448 };
449 }
450 EXPORT_SYMBOL_GPL(ufshcd_init_host_params);
451
452 /**
453 * ufshcd_pltfrm_init - probe routine of the driver
454 * @pdev: pointer to Platform device handle
455 * @vops: pointer to variant ops
456 *
457 * Return: 0 on success, non-zero value on failure.
458 */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)459 int ufshcd_pltfrm_init(struct platform_device *pdev,
460 const struct ufs_hba_variant_ops *vops)
461 {
462 struct ufs_hba *hba;
463 void __iomem *mmio_base;
464 int irq, err;
465 struct device *dev = &pdev->dev;
466
467 mmio_base = devm_platform_ioremap_resource(pdev, 0);
468 if (IS_ERR(mmio_base)) {
469 err = PTR_ERR(mmio_base);
470 goto out;
471 }
472
473 irq = platform_get_irq(pdev, 0);
474 if (irq < 0) {
475 err = irq;
476 goto out;
477 }
478
479 err = ufshcd_alloc_host(dev, &hba);
480 if (err) {
481 dev_err(dev, "Allocation failed\n");
482 goto out;
483 }
484
485 hba->vops = vops;
486
487 err = ufshcd_parse_clock_info(hba);
488 if (err) {
489 dev_err(dev, "%s: clock parse failed %d\n",
490 __func__, err);
491 goto dealloc_host;
492 }
493 err = ufshcd_parse_regulator_info(hba);
494 if (err) {
495 dev_err(dev, "%s: regulator init failed %d\n",
496 __func__, err);
497 goto dealloc_host;
498 }
499
500 ufshcd_init_lanes_per_dir(hba);
501
502 err = ufshcd_parse_operating_points(hba);
503 if (err) {
504 dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
505 goto dealloc_host;
506 }
507
508 err = ufshcd_init(hba, mmio_base, irq);
509 if (err) {
510 dev_err_probe(dev, err, "Initialization failed with error %d\n",
511 err);
512 goto dealloc_host;
513 }
514
515 pm_runtime_set_active(dev);
516 pm_runtime_enable(dev);
517
518 return 0;
519
520 dealloc_host:
521 ufshcd_dealloc_host(hba);
522 out:
523 return err;
524 }
525 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
526
527 /**
528 * ufshcd_pltfrm_remove - Remove ufshcd platform
529 * @pdev: pointer to Platform device handle
530 */
ufshcd_pltfrm_remove(struct platform_device * pdev)531 void ufshcd_pltfrm_remove(struct platform_device *pdev)
532 {
533 struct ufs_hba *hba = platform_get_drvdata(pdev);
534
535 pm_runtime_get_sync(&pdev->dev);
536 ufshcd_remove(hba);
537 ufshcd_dealloc_host(hba);
538 pm_runtime_disable(&pdev->dev);
539 pm_runtime_put_noidle(&pdev->dev);
540 }
541 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_remove);
542
543 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
544 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
545 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
546 MODULE_LICENSE("GPL");
547