1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Sunrisepoint LPSS core support.
4 *
5 * Copyright (C) 2015, Intel Corporation
6 *
7 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 * Heikki Krogerus <heikki.krogerus@linux.intel.com>
10 * Jarkko Nikula <jarkko.nikula@linux.intel.com>
11 */
12
13 #include <linux/array_size.h>
14 #include <linux/bits.h>
15 #include <linux/clkdev.h>
16 #include <linux/clk.h>
17 #include <linux/clk-provider.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/gfp_types.h>
22 #include <linux/idr.h>
23 #include <linux/io.h>
24 #include <linux/ioport.h>
25 #include <linux/mfd/core.h>
26 #include <linux/module.h>
27 #include <linux/pm.h>
28 #include <linux/pm_qos.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/sprintf.h>
31 #include <linux/types.h>
32
33 #include <linux/io-64-nonatomic-lo-hi.h>
34
35 #include <linux/dma/idma64.h>
36
37 #include "intel-lpss.h"
38
39 struct dentry;
40
41 #define LPSS_DEV_OFFSET 0x000
42 #define LPSS_DEV_SIZE 0x200
43 #define LPSS_PRIV_OFFSET 0x200
44 #define LPSS_PRIV_SIZE 0x100
45 #define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4)
46 #define LPSS_IDMA64_OFFSET 0x800
47 #define LPSS_IDMA64_SIZE 0x800
48
49 /* Offsets from lpss->priv */
50 #define LPSS_PRIV_RESETS 0x04
51 #define LPSS_PRIV_RESETS_IDMA BIT(2)
52 #define LPSS_PRIV_RESETS_FUNC 0x3
53
54 #define LPSS_PRIV_ACTIVELTR 0x10
55 #define LPSS_PRIV_IDLELTR 0x14
56
57 #define LPSS_PRIV_LTR_REQ BIT(15)
58 #define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10)
59 #define LPSS_PRIV_LTR_SCALE_1US (2 << 10)
60 #define LPSS_PRIV_LTR_SCALE_32US (3 << 10)
61 #define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0)
62
63 #define LPSS_PRIV_SSP_REG 0x20
64 #define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0)
65
66 #define LPSS_PRIV_REMAP_ADDR 0x40
67
68 #define LPSS_PRIV_CAPS 0xfc
69 #define LPSS_PRIV_CAPS_NO_IDMA BIT(8)
70 #define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4)
71 #define LPSS_PRIV_CAPS_TYPE_SHIFT 4
72
73 /* This matches the type field in CAPS register */
74 enum intel_lpss_dev_type {
75 LPSS_DEV_I2C = 0,
76 LPSS_DEV_UART,
77 LPSS_DEV_SPI,
78 };
79
80 struct intel_lpss {
81 const struct intel_lpss_platform_info *info;
82 enum intel_lpss_dev_type type;
83 struct clk *clk;
84 struct clk_lookup *clock;
85 struct mfd_cell *cell;
86 struct device *dev;
87 void __iomem *priv;
88 u32 priv_ctx[LPSS_PRIV_REG_COUNT];
89 int devid;
90 u32 caps;
91 u32 active_ltr;
92 u32 idle_ltr;
93 struct dentry *debugfs;
94 };
95
96 static const struct resource intel_lpss_dev_resources[] = {
97 DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
98 DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
99 DEFINE_RES_IRQ(0),
100 };
101
102 static const struct resource intel_lpss_idma64_resources[] = {
103 DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
104 DEFINE_RES_IRQ(0),
105 };
106
107 /*
108 * Cells needs to be ordered so that the iDMA is created first. This is
109 * because we need to be sure the DMA is available when the host controller
110 * driver is probed.
111 */
112 static const struct mfd_cell intel_lpss_idma64_cell = {
113 .name = LPSS_IDMA64_DRIVER_NAME,
114 .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
115 .resources = intel_lpss_idma64_resources,
116 };
117
118 static const struct mfd_cell intel_lpss_i2c_cell = {
119 .name = "i2c_designware",
120 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
121 .resources = intel_lpss_dev_resources,
122 };
123
124 static const struct mfd_cell intel_lpss_uart_cell = {
125 .name = "dw-apb-uart",
126 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
127 .resources = intel_lpss_dev_resources,
128 };
129
130 static const struct mfd_cell intel_lpss_spi_cell = {
131 .name = "pxa2xx-spi",
132 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
133 .resources = intel_lpss_dev_resources,
134 };
135
136 static DEFINE_IDA(intel_lpss_devid_ida);
137 static struct dentry *intel_lpss_debugfs;
138
intel_lpss_cache_ltr(struct intel_lpss * lpss)139 static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
140 {
141 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
142 lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
143 }
144
intel_lpss_debugfs_add(struct intel_lpss * lpss)145 static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
146 {
147 struct dentry *dir;
148
149 dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
150 if (IS_ERR(dir))
151 return PTR_ERR(dir);
152
153 /* Cache the values into lpss structure */
154 intel_lpss_cache_ltr(lpss);
155
156 debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
157 debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
158 debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
159
160 lpss->debugfs = dir;
161 return 0;
162 }
163
intel_lpss_debugfs_remove(struct intel_lpss * lpss)164 static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
165 {
166 debugfs_remove_recursive(lpss->debugfs);
167 }
168
intel_lpss_ltr_set(struct device * dev,s32 val)169 static void intel_lpss_ltr_set(struct device *dev, s32 val)
170 {
171 struct intel_lpss *lpss = dev_get_drvdata(dev);
172 u32 ltr;
173
174 /*
175 * Program latency tolerance (LTR) accordingly what has been asked
176 * by the PM QoS layer or disable it in case we were passed
177 * negative value or PM_QOS_LATENCY_ANY.
178 */
179 ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
180
181 if (val == PM_QOS_LATENCY_ANY || val < 0) {
182 ltr &= ~LPSS_PRIV_LTR_REQ;
183 } else {
184 ltr |= LPSS_PRIV_LTR_REQ;
185 ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
186 ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
187
188 if (val > LPSS_PRIV_LTR_VALUE_MASK)
189 ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
190 else
191 ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
192 }
193
194 if (ltr == lpss->active_ltr)
195 return;
196
197 writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
198 writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
199
200 /* Cache the values into lpss structure */
201 intel_lpss_cache_ltr(lpss);
202 }
203
intel_lpss_ltr_expose(struct intel_lpss * lpss)204 static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
205 {
206 lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
207 dev_pm_qos_expose_latency_tolerance(lpss->dev);
208 }
209
intel_lpss_ltr_hide(struct intel_lpss * lpss)210 static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
211 {
212 dev_pm_qos_hide_latency_tolerance(lpss->dev);
213 lpss->dev->power.set_latency_tolerance = NULL;
214 }
215
intel_lpss_assign_devs(struct intel_lpss * lpss)216 static int intel_lpss_assign_devs(struct intel_lpss *lpss)
217 {
218 const struct mfd_cell *cell;
219 unsigned int type;
220
221 type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
222 type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
223
224 switch (type) {
225 case LPSS_DEV_I2C:
226 cell = &intel_lpss_i2c_cell;
227 break;
228 case LPSS_DEV_UART:
229 cell = &intel_lpss_uart_cell;
230 break;
231 case LPSS_DEV_SPI:
232 cell = &intel_lpss_spi_cell;
233 break;
234 default:
235 return -ENODEV;
236 }
237
238 lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL);
239 if (!lpss->cell)
240 return -ENOMEM;
241
242 lpss->type = type;
243
244 return 0;
245 }
246
intel_lpss_has_idma(const struct intel_lpss * lpss)247 static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
248 {
249 return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
250 }
251
intel_lpss_set_remap_addr(const struct intel_lpss * lpss)252 static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
253 {
254 resource_size_t addr = lpss->info->mem->start;
255
256 lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR);
257 }
258
intel_lpss_deassert_reset(const struct intel_lpss * lpss)259 static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
260 {
261 u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
262
263 /* Bring out the device from reset */
264 writel(value, lpss->priv + LPSS_PRIV_RESETS);
265 }
266
intel_lpss_init_dev(const struct intel_lpss * lpss)267 static void intel_lpss_init_dev(const struct intel_lpss *lpss)
268 {
269 u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
270
271 /* Set the device in reset state */
272 writel(0, lpss->priv + LPSS_PRIV_RESETS);
273
274 intel_lpss_deassert_reset(lpss);
275
276 intel_lpss_set_remap_addr(lpss);
277
278 if (!intel_lpss_has_idma(lpss))
279 return;
280
281 /* Make sure that SPI multiblock DMA transfers are re-enabled */
282 if (lpss->type == LPSS_DEV_SPI)
283 writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
284 }
285
intel_lpss_unregister_clock_tree(struct clk * clk)286 static void intel_lpss_unregister_clock_tree(struct clk *clk)
287 {
288 struct clk *parent;
289
290 while (clk) {
291 parent = clk_get_parent(clk);
292 clk_unregister(clk);
293 clk = parent;
294 }
295 }
296
intel_lpss_register_clock_divider(struct intel_lpss * lpss,const char * devname,struct clk ** clk)297 static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
298 const char *devname,
299 struct clk **clk)
300 {
301 char name[32];
302 struct clk *tmp = *clk;
303 int ret;
304
305 snprintf(name, sizeof(name), "%s-enable", devname);
306 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
307 lpss->priv, 0, 0, NULL);
308 if (IS_ERR(tmp))
309 return PTR_ERR(tmp);
310
311 snprintf(name, sizeof(name), "%s-div", devname);
312 tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
313 0, lpss->priv, 1, 15, 16, 15,
314 CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
315 NULL);
316 if (IS_ERR(tmp))
317 return PTR_ERR(tmp);
318 *clk = tmp;
319
320 if (lpss->info->quirks & QUIRK_CLOCK_DIVIDER_UNITY) {
321 ret = clk_set_rate(tmp, lpss->info->clk_rate);
322 if (ret)
323 return ret;
324 }
325
326 snprintf(name, sizeof(name), "%s-update", devname);
327 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
328 CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
329 if (IS_ERR(tmp))
330 return PTR_ERR(tmp);
331 *clk = tmp;
332
333 return 0;
334 }
335
intel_lpss_register_clock(struct intel_lpss * lpss)336 static int intel_lpss_register_clock(struct intel_lpss *lpss)
337 {
338 const struct mfd_cell *cell = lpss->cell;
339 struct clk *clk;
340 char devname[24];
341 int ret;
342
343 if (!lpss->info->clk_rate)
344 return 0;
345
346 /* Root clock */
347 clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
348 lpss->info->clk_rate);
349 if (IS_ERR(clk))
350 return PTR_ERR(clk);
351
352 snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
353
354 /*
355 * Support for clock divider only if it has some preset value.
356 * Otherwise we assume that the divider is not used.
357 */
358 if (lpss->type != LPSS_DEV_I2C) {
359 ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
360 if (ret)
361 goto err_clk_register;
362 }
363
364 ret = -ENOMEM;
365
366 /* Clock for the host controller */
367 lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
368 if (!lpss->clock)
369 goto err_clk_register;
370
371 lpss->clk = clk;
372
373 return 0;
374
375 err_clk_register:
376 intel_lpss_unregister_clock_tree(clk);
377
378 return ret;
379 }
380
intel_lpss_unregister_clock(struct intel_lpss * lpss)381 static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
382 {
383 if (IS_ERR_OR_NULL(lpss->clk))
384 return;
385
386 clkdev_drop(lpss->clock);
387 intel_lpss_unregister_clock_tree(lpss->clk);
388 }
389
intel_lpss_probe(struct device * dev,const struct intel_lpss_platform_info * info)390 int intel_lpss_probe(struct device *dev,
391 const struct intel_lpss_platform_info *info)
392 {
393 struct intel_lpss *lpss;
394 int ret;
395
396 if (!info || !info->mem)
397 return -EINVAL;
398
399 if (info->irq < 0)
400 return info->irq;
401
402 lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
403 if (!lpss)
404 return -ENOMEM;
405
406 lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
407 LPSS_PRIV_SIZE);
408 if (!lpss->priv)
409 return -ENOMEM;
410
411 lpss->info = info;
412 lpss->dev = dev;
413 lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
414
415 dev_set_drvdata(dev, lpss);
416
417 ret = intel_lpss_assign_devs(lpss);
418 if (ret)
419 return ret;
420
421 lpss->cell->swnode = info->swnode;
422 lpss->cell->ignore_resource_conflicts = info->quirks & QUIRK_IGNORE_RESOURCE_CONFLICTS;
423
424 intel_lpss_init_dev(lpss);
425
426 lpss->devid = ida_alloc(&intel_lpss_devid_ida, GFP_KERNEL);
427 if (lpss->devid < 0)
428 return lpss->devid;
429
430 ret = intel_lpss_register_clock(lpss);
431 if (ret)
432 goto err_clk_register;
433
434 intel_lpss_ltr_expose(lpss);
435
436 ret = intel_lpss_debugfs_add(lpss);
437 if (ret)
438 dev_warn(dev, "Failed to create debugfs entries\n");
439
440 if (intel_lpss_has_idma(lpss)) {
441 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
442 1, info->mem, info->irq, NULL);
443 if (ret)
444 dev_warn(dev, "Failed to add %s, fallback to PIO\n",
445 LPSS_IDMA64_DRIVER_NAME);
446 }
447
448 ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
449 1, info->mem, info->irq, NULL);
450 if (ret)
451 goto err_remove_ltr;
452
453 dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
454
455 return 0;
456
457 err_remove_ltr:
458 intel_lpss_debugfs_remove(lpss);
459 intel_lpss_ltr_hide(lpss);
460 intel_lpss_unregister_clock(lpss);
461
462 err_clk_register:
463 ida_free(&intel_lpss_devid_ida, lpss->devid);
464
465 return ret;
466 }
467 EXPORT_SYMBOL_NS_GPL(intel_lpss_probe, "INTEL_LPSS");
468
intel_lpss_remove(struct device * dev)469 void intel_lpss_remove(struct device *dev)
470 {
471 struct intel_lpss *lpss = dev_get_drvdata(dev);
472
473 mfd_remove_devices(dev);
474 intel_lpss_debugfs_remove(lpss);
475 intel_lpss_ltr_hide(lpss);
476 intel_lpss_unregister_clock(lpss);
477 ida_free(&intel_lpss_devid_ida, lpss->devid);
478 }
479 EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, "INTEL_LPSS");
480
resume_lpss_device(struct device * dev,void * data)481 static int resume_lpss_device(struct device *dev, void *data)
482 {
483 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
484 pm_runtime_resume(dev);
485
486 return 0;
487 }
488
intel_lpss_prepare(struct device * dev)489 static int intel_lpss_prepare(struct device *dev)
490 {
491 /*
492 * Resume both child devices before entering system sleep. This
493 * ensures that they are in proper state before they get suspended.
494 */
495 device_for_each_child_reverse(dev, NULL, resume_lpss_device);
496 return 0;
497 }
498
intel_lpss_suspend(struct device * dev)499 static int intel_lpss_suspend(struct device *dev)
500 {
501 struct intel_lpss *lpss = dev_get_drvdata(dev);
502 unsigned int i;
503
504 /* Save device context */
505 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
506 lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
507
508 /*
509 * If the device type is not UART, then put the controller into
510 * reset. UART cannot be put into reset since S3/S0ix fail when
511 * no_console_suspend flag is enabled.
512 */
513 if (lpss->type != LPSS_DEV_UART)
514 writel(0, lpss->priv + LPSS_PRIV_RESETS);
515
516 return 0;
517 }
518
intel_lpss_resume(struct device * dev)519 static int intel_lpss_resume(struct device *dev)
520 {
521 struct intel_lpss *lpss = dev_get_drvdata(dev);
522 unsigned int i;
523
524 intel_lpss_deassert_reset(lpss);
525
526 /* Restore device context */
527 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
528 writel(lpss->priv_ctx[i], lpss->priv + i * 4);
529
530 return 0;
531 }
532
533 EXPORT_NS_GPL_DEV_PM_OPS(intel_lpss_pm_ops, INTEL_LPSS) = {
534 .prepare = pm_sleep_ptr(&intel_lpss_prepare),
535 LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
536 RUNTIME_PM_OPS(intel_lpss_suspend, intel_lpss_resume, NULL)
537 };
538
intel_lpss_init(void)539 static int __init intel_lpss_init(void)
540 {
541 intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
542 return 0;
543 }
544 module_init(intel_lpss_init);
545
intel_lpss_exit(void)546 static void __exit intel_lpss_exit(void)
547 {
548 ida_destroy(&intel_lpss_devid_ida);
549 debugfs_remove(intel_lpss_debugfs);
550 }
551 module_exit(intel_lpss_exit);
552
553 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
554 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
555 MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
556 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
557 MODULE_DESCRIPTION("Intel LPSS core driver");
558 MODULE_LICENSE("GPL v2");
559 /*
560 * Ensure the DMA driver is loaded before the host controller device appears,
561 * so that the host controller driver can request its DMA channels as early
562 * as possible.
563 *
564 * If the DMA module is not there that's OK as well.
565 */
566 MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);
567