xref: /linux/drivers/mfd/intel-lpss.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel Sunrisepoint LPSS core support.
4  *
5  * Copyright (C) 2015, Intel Corporation
6  *
7  * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *          Heikki Krogerus <heikki.krogerus@linux.intel.com>
10  *          Jarkko Nikula <jarkko.nikula@linux.intel.com>
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/clkdev.h>
15 #include <linux/clk-provider.h>
16 #include <linux/debugfs.h>
17 #include <linux/idr.h>
18 #include <linux/io.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/mfd/core.h>
23 #include <linux/pm_qos.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/property.h>
26 #include <linux/seq_file.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 
29 #include <linux/dma/idma64.h>
30 
31 #include "intel-lpss.h"
32 
33 #define LPSS_DEV_OFFSET		0x000
34 #define LPSS_DEV_SIZE		0x200
35 #define LPSS_PRIV_OFFSET	0x200
36 #define LPSS_PRIV_SIZE		0x100
37 #define LPSS_PRIV_REG_COUNT	(LPSS_PRIV_SIZE / 4)
38 #define LPSS_IDMA64_OFFSET	0x800
39 #define LPSS_IDMA64_SIZE	0x800
40 
41 /* Offsets from lpss->priv */
42 #define LPSS_PRIV_RESETS		0x04
43 #define LPSS_PRIV_RESETS_IDMA		BIT(2)
44 #define LPSS_PRIV_RESETS_FUNC		0x3
45 
46 #define LPSS_PRIV_ACTIVELTR		0x10
47 #define LPSS_PRIV_IDLELTR		0x14
48 
49 #define LPSS_PRIV_LTR_REQ		BIT(15)
50 #define LPSS_PRIV_LTR_SCALE_MASK	GENMASK(11, 10)
51 #define LPSS_PRIV_LTR_SCALE_1US		(2 << 10)
52 #define LPSS_PRIV_LTR_SCALE_32US	(3 << 10)
53 #define LPSS_PRIV_LTR_VALUE_MASK	GENMASK(9, 0)
54 
55 #define LPSS_PRIV_SSP_REG		0x20
56 #define LPSS_PRIV_SSP_REG_DIS_DMA_FIN	BIT(0)
57 
58 #define LPSS_PRIV_REMAP_ADDR		0x40
59 
60 #define LPSS_PRIV_CAPS			0xfc
61 #define LPSS_PRIV_CAPS_NO_IDMA		BIT(8)
62 #define LPSS_PRIV_CAPS_TYPE_MASK	GENMASK(7, 4)
63 #define LPSS_PRIV_CAPS_TYPE_SHIFT	4
64 
65 /* This matches the type field in CAPS register */
66 enum intel_lpss_dev_type {
67 	LPSS_DEV_I2C = 0,
68 	LPSS_DEV_UART,
69 	LPSS_DEV_SPI,
70 };
71 
72 struct intel_lpss {
73 	const struct intel_lpss_platform_info *info;
74 	enum intel_lpss_dev_type type;
75 	struct clk *clk;
76 	struct clk_lookup *clock;
77 	struct mfd_cell *cell;
78 	struct device *dev;
79 	void __iomem *priv;
80 	u32 priv_ctx[LPSS_PRIV_REG_COUNT];
81 	int devid;
82 	u32 caps;
83 	u32 active_ltr;
84 	u32 idle_ltr;
85 	struct dentry *debugfs;
86 };
87 
88 static const struct resource intel_lpss_dev_resources[] = {
89 	DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
90 	DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
91 	DEFINE_RES_IRQ(0),
92 };
93 
94 static const struct resource intel_lpss_idma64_resources[] = {
95 	DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
96 	DEFINE_RES_IRQ(0),
97 };
98 
99 /*
100  * Cells needs to be ordered so that the iDMA is created first. This is
101  * because we need to be sure the DMA is available when the host controller
102  * driver is probed.
103  */
104 static const struct mfd_cell intel_lpss_idma64_cell = {
105 	.name = LPSS_IDMA64_DRIVER_NAME,
106 	.num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
107 	.resources = intel_lpss_idma64_resources,
108 };
109 
110 static const struct mfd_cell intel_lpss_i2c_cell = {
111 	.name = "i2c_designware",
112 	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
113 	.resources = intel_lpss_dev_resources,
114 };
115 
116 static const struct mfd_cell intel_lpss_uart_cell = {
117 	.name = "dw-apb-uart",
118 	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
119 	.resources = intel_lpss_dev_resources,
120 };
121 
122 static const struct mfd_cell intel_lpss_spi_cell = {
123 	.name = "pxa2xx-spi",
124 	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
125 	.resources = intel_lpss_dev_resources,
126 };
127 
128 static DEFINE_IDA(intel_lpss_devid_ida);
129 static struct dentry *intel_lpss_debugfs;
130 
131 static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
132 {
133 	lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
134 	lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
135 }
136 
137 static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
138 {
139 	struct dentry *dir;
140 
141 	dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
142 	if (IS_ERR(dir))
143 		return PTR_ERR(dir);
144 
145 	/* Cache the values into lpss structure */
146 	intel_lpss_cache_ltr(lpss);
147 
148 	debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
149 	debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
150 	debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
151 
152 	lpss->debugfs = dir;
153 	return 0;
154 }
155 
156 static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
157 {
158 	debugfs_remove_recursive(lpss->debugfs);
159 }
160 
161 static void intel_lpss_ltr_set(struct device *dev, s32 val)
162 {
163 	struct intel_lpss *lpss = dev_get_drvdata(dev);
164 	u32 ltr;
165 
166 	/*
167 	 * Program latency tolerance (LTR) accordingly what has been asked
168 	 * by the PM QoS layer or disable it in case we were passed
169 	 * negative value or PM_QOS_LATENCY_ANY.
170 	 */
171 	ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
172 
173 	if (val == PM_QOS_LATENCY_ANY || val < 0) {
174 		ltr &= ~LPSS_PRIV_LTR_REQ;
175 	} else {
176 		ltr |= LPSS_PRIV_LTR_REQ;
177 		ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
178 		ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
179 
180 		if (val > LPSS_PRIV_LTR_VALUE_MASK)
181 			ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
182 		else
183 			ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
184 	}
185 
186 	if (ltr == lpss->active_ltr)
187 		return;
188 
189 	writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
190 	writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
191 
192 	/* Cache the values into lpss structure */
193 	intel_lpss_cache_ltr(lpss);
194 }
195 
196 static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
197 {
198 	lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
199 	dev_pm_qos_expose_latency_tolerance(lpss->dev);
200 }
201 
202 static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
203 {
204 	dev_pm_qos_hide_latency_tolerance(lpss->dev);
205 	lpss->dev->power.set_latency_tolerance = NULL;
206 }
207 
208 static int intel_lpss_assign_devs(struct intel_lpss *lpss)
209 {
210 	const struct mfd_cell *cell;
211 	unsigned int type;
212 
213 	type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
214 	type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
215 
216 	switch (type) {
217 	case LPSS_DEV_I2C:
218 		cell = &intel_lpss_i2c_cell;
219 		break;
220 	case LPSS_DEV_UART:
221 		cell = &intel_lpss_uart_cell;
222 		break;
223 	case LPSS_DEV_SPI:
224 		cell = &intel_lpss_spi_cell;
225 		break;
226 	default:
227 		return -ENODEV;
228 	}
229 
230 	lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL);
231 	if (!lpss->cell)
232 		return -ENOMEM;
233 
234 	lpss->type = type;
235 
236 	return 0;
237 }
238 
239 static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
240 {
241 	return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
242 }
243 
244 static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
245 {
246 	resource_size_t addr = lpss->info->mem->start;
247 
248 	lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR);
249 }
250 
251 static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
252 {
253 	u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
254 
255 	/* Bring out the device from reset */
256 	writel(value, lpss->priv + LPSS_PRIV_RESETS);
257 }
258 
259 static void intel_lpss_init_dev(const struct intel_lpss *lpss)
260 {
261 	u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
262 
263 	/* Set the device in reset state */
264 	writel(0, lpss->priv + LPSS_PRIV_RESETS);
265 
266 	intel_lpss_deassert_reset(lpss);
267 
268 	intel_lpss_set_remap_addr(lpss);
269 
270 	if (!intel_lpss_has_idma(lpss))
271 		return;
272 
273 	/* Make sure that SPI multiblock DMA transfers are re-enabled */
274 	if (lpss->type == LPSS_DEV_SPI)
275 		writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
276 }
277 
278 static void intel_lpss_unregister_clock_tree(struct clk *clk)
279 {
280 	struct clk *parent;
281 
282 	while (clk) {
283 		parent = clk_get_parent(clk);
284 		clk_unregister(clk);
285 		clk = parent;
286 	}
287 }
288 
289 static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
290 					     const char *devname,
291 					     struct clk **clk)
292 {
293 	char name[32];
294 	struct clk *tmp = *clk;
295 
296 	snprintf(name, sizeof(name), "%s-enable", devname);
297 	tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
298 				lpss->priv, 0, 0, NULL);
299 	if (IS_ERR(tmp))
300 		return PTR_ERR(tmp);
301 
302 	snprintf(name, sizeof(name), "%s-div", devname);
303 	tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
304 					      CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
305 					      lpss->priv, 1, 15, 16, 15, 0,
306 					      NULL);
307 	if (IS_ERR(tmp))
308 		return PTR_ERR(tmp);
309 	*clk = tmp;
310 
311 	snprintf(name, sizeof(name), "%s-update", devname);
312 	tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
313 				CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
314 	if (IS_ERR(tmp))
315 		return PTR_ERR(tmp);
316 	*clk = tmp;
317 
318 	return 0;
319 }
320 
321 static int intel_lpss_register_clock(struct intel_lpss *lpss)
322 {
323 	const struct mfd_cell *cell = lpss->cell;
324 	struct clk *clk;
325 	char devname[24];
326 	int ret;
327 
328 	if (!lpss->info->clk_rate)
329 		return 0;
330 
331 	/* Root clock */
332 	clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
333 				      lpss->info->clk_rate);
334 	if (IS_ERR(clk))
335 		return PTR_ERR(clk);
336 
337 	snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
338 
339 	/*
340 	 * Support for clock divider only if it has some preset value.
341 	 * Otherwise we assume that the divider is not used.
342 	 */
343 	if (lpss->type != LPSS_DEV_I2C) {
344 		ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
345 		if (ret)
346 			goto err_clk_register;
347 	}
348 
349 	ret = -ENOMEM;
350 
351 	/* Clock for the host controller */
352 	lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
353 	if (!lpss->clock)
354 		goto err_clk_register;
355 
356 	lpss->clk = clk;
357 
358 	return 0;
359 
360 err_clk_register:
361 	intel_lpss_unregister_clock_tree(clk);
362 
363 	return ret;
364 }
365 
366 static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
367 {
368 	if (IS_ERR_OR_NULL(lpss->clk))
369 		return;
370 
371 	clkdev_drop(lpss->clock);
372 	intel_lpss_unregister_clock_tree(lpss->clk);
373 }
374 
375 int intel_lpss_probe(struct device *dev,
376 		     const struct intel_lpss_platform_info *info)
377 {
378 	struct intel_lpss *lpss;
379 	int ret;
380 
381 	if (!info || !info->mem || info->irq <= 0)
382 		return -EINVAL;
383 
384 	lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
385 	if (!lpss)
386 		return -ENOMEM;
387 
388 	lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
389 				  LPSS_PRIV_SIZE);
390 	if (!lpss->priv)
391 		return -ENOMEM;
392 
393 	lpss->info = info;
394 	lpss->dev = dev;
395 	lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
396 
397 	dev_set_drvdata(dev, lpss);
398 
399 	ret = intel_lpss_assign_devs(lpss);
400 	if (ret)
401 		return ret;
402 
403 	lpss->cell->swnode = info->swnode;
404 	lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
405 
406 	intel_lpss_init_dev(lpss);
407 
408 	lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
409 	if (lpss->devid < 0)
410 		return lpss->devid;
411 
412 	ret = intel_lpss_register_clock(lpss);
413 	if (ret)
414 		goto err_clk_register;
415 
416 	intel_lpss_ltr_expose(lpss);
417 
418 	ret = intel_lpss_debugfs_add(lpss);
419 	if (ret)
420 		dev_warn(dev, "Failed to create debugfs entries\n");
421 
422 	if (intel_lpss_has_idma(lpss)) {
423 		ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
424 				      1, info->mem, info->irq, NULL);
425 		if (ret)
426 			dev_warn(dev, "Failed to add %s, fallback to PIO\n",
427 				 LPSS_IDMA64_DRIVER_NAME);
428 	}
429 
430 	ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
431 			      1, info->mem, info->irq, NULL);
432 	if (ret)
433 		goto err_remove_ltr;
434 
435 	dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
436 
437 	return 0;
438 
439 err_remove_ltr:
440 	intel_lpss_debugfs_remove(lpss);
441 	intel_lpss_ltr_hide(lpss);
442 	intel_lpss_unregister_clock(lpss);
443 
444 err_clk_register:
445 	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
446 
447 	return ret;
448 }
449 EXPORT_SYMBOL_GPL(intel_lpss_probe);
450 
451 void intel_lpss_remove(struct device *dev)
452 {
453 	struct intel_lpss *lpss = dev_get_drvdata(dev);
454 
455 	mfd_remove_devices(dev);
456 	intel_lpss_debugfs_remove(lpss);
457 	intel_lpss_ltr_hide(lpss);
458 	intel_lpss_unregister_clock(lpss);
459 	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
460 }
461 EXPORT_SYMBOL_GPL(intel_lpss_remove);
462 
463 static int resume_lpss_device(struct device *dev, void *data)
464 {
465 	if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
466 		pm_runtime_resume(dev);
467 
468 	return 0;
469 }
470 
471 int intel_lpss_prepare(struct device *dev)
472 {
473 	/*
474 	 * Resume both child devices before entering system sleep. This
475 	 * ensures that they are in proper state before they get suspended.
476 	 */
477 	device_for_each_child_reverse(dev, NULL, resume_lpss_device);
478 	return 0;
479 }
480 EXPORT_SYMBOL_GPL(intel_lpss_prepare);
481 
482 int intel_lpss_suspend(struct device *dev)
483 {
484 	struct intel_lpss *lpss = dev_get_drvdata(dev);
485 	unsigned int i;
486 
487 	/* Save device context */
488 	for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
489 		lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
490 
491 	/*
492 	 * If the device type is not UART, then put the controller into
493 	 * reset. UART cannot be put into reset since S3/S0ix fail when
494 	 * no_console_suspend flag is enabled.
495 	 */
496 	if (lpss->type != LPSS_DEV_UART)
497 		writel(0, lpss->priv + LPSS_PRIV_RESETS);
498 
499 	return 0;
500 }
501 EXPORT_SYMBOL_GPL(intel_lpss_suspend);
502 
503 int intel_lpss_resume(struct device *dev)
504 {
505 	struct intel_lpss *lpss = dev_get_drvdata(dev);
506 	unsigned int i;
507 
508 	intel_lpss_deassert_reset(lpss);
509 
510 	/* Restore device context */
511 	for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
512 		writel(lpss->priv_ctx[i], lpss->priv + i * 4);
513 
514 	return 0;
515 }
516 EXPORT_SYMBOL_GPL(intel_lpss_resume);
517 
518 static int __init intel_lpss_init(void)
519 {
520 	intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
521 	return 0;
522 }
523 module_init(intel_lpss_init);
524 
525 static void __exit intel_lpss_exit(void)
526 {
527 	ida_destroy(&intel_lpss_devid_ida);
528 	debugfs_remove(intel_lpss_debugfs);
529 }
530 module_exit(intel_lpss_exit);
531 
532 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
533 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
534 MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
535 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
536 MODULE_DESCRIPTION("Intel LPSS core driver");
537 MODULE_LICENSE("GPL v2");
538 /*
539  * Ensure the DMA driver is loaded before the host controller device appears,
540  * so that the host controller driver can request its DMA channels as early
541  * as possible.
542  *
543  * If the DMA module is not there that's OK as well.
544  */
545 MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME);
546