1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel Sunrisepoint LPSS core support. 4 * 5 * Copyright (C) 2015, Intel Corporation 6 * 7 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com> 9 * Heikki Krogerus <heikki.krogerus@linux.intel.com> 10 * Jarkko Nikula <jarkko.nikula@linux.intel.com> 11 */ 12 13 #include <linux/array_size.h> 14 #include <linux/bits.h> 15 #include <linux/clkdev.h> 16 #include <linux/clk.h> 17 #include <linux/clk-provider.h> 18 #include <linux/debugfs.h> 19 #include <linux/device.h> 20 #include <linux/err.h> 21 #include <linux/gfp_types.h> 22 #include <linux/idr.h> 23 #include <linux/io.h> 24 #include <linux/ioport.h> 25 #include <linux/mfd/core.h> 26 #include <linux/module.h> 27 #include <linux/pm.h> 28 #include <linux/pm_qos.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/sprintf.h> 31 #include <linux/types.h> 32 33 #include <linux/io-64-nonatomic-lo-hi.h> 34 35 #include <linux/dma/idma64.h> 36 37 #include "intel-lpss.h" 38 39 struct dentry; 40 41 #define LPSS_DEV_OFFSET 0x000 42 #define LPSS_DEV_SIZE 0x200 43 #define LPSS_PRIV_OFFSET 0x200 44 #define LPSS_PRIV_SIZE 0x100 45 #define LPSS_PRIV_REG_COUNT (LPSS_PRIV_SIZE / 4) 46 #define LPSS_IDMA64_OFFSET 0x800 47 #define LPSS_IDMA64_SIZE 0x800 48 49 /* Offsets from lpss->priv */ 50 #define LPSS_PRIV_RESETS 0x04 51 #define LPSS_PRIV_RESETS_IDMA BIT(2) 52 #define LPSS_PRIV_RESETS_FUNC 0x3 53 54 #define LPSS_PRIV_ACTIVELTR 0x10 55 #define LPSS_PRIV_IDLELTR 0x14 56 57 #define LPSS_PRIV_LTR_REQ BIT(15) 58 #define LPSS_PRIV_LTR_SCALE_MASK GENMASK(11, 10) 59 #define LPSS_PRIV_LTR_SCALE_1US (2 << 10) 60 #define LPSS_PRIV_LTR_SCALE_32US (3 << 10) 61 #define LPSS_PRIV_LTR_VALUE_MASK GENMASK(9, 0) 62 63 #define LPSS_PRIV_SSP_REG 0x20 64 #define LPSS_PRIV_SSP_REG_DIS_DMA_FIN BIT(0) 65 66 #define LPSS_PRIV_REMAP_ADDR 0x40 67 68 #define LPSS_PRIV_CAPS 0xfc 69 #define LPSS_PRIV_CAPS_NO_IDMA BIT(8) 70 #define LPSS_PRIV_CAPS_TYPE_MASK GENMASK(7, 4) 71 #define LPSS_PRIV_CAPS_TYPE_SHIFT 4 72 73 /* This matches the type field in CAPS register */ 74 enum intel_lpss_dev_type { 75 LPSS_DEV_I2C = 0, 76 LPSS_DEV_UART, 77 LPSS_DEV_SPI, 78 }; 79 80 struct intel_lpss { 81 const struct intel_lpss_platform_info *info; 82 enum intel_lpss_dev_type type; 83 struct clk *clk; 84 struct clk_lookup *clock; 85 struct mfd_cell *cell; 86 struct device *dev; 87 void __iomem *priv; 88 u32 priv_ctx[LPSS_PRIV_REG_COUNT]; 89 int devid; 90 u32 caps; 91 u32 active_ltr; 92 u32 idle_ltr; 93 struct dentry *debugfs; 94 }; 95 96 static const struct resource intel_lpss_dev_resources[] = { 97 DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"), 98 DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"), 99 DEFINE_RES_IRQ(0), 100 }; 101 102 static const struct resource intel_lpss_idma64_resources[] = { 103 DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE), 104 DEFINE_RES_IRQ(0), 105 }; 106 107 /* 108 * Cells needs to be ordered so that the iDMA is created first. This is 109 * because we need to be sure the DMA is available when the host controller 110 * driver is probed. 111 */ 112 static const struct mfd_cell intel_lpss_idma64_cell = { 113 .name = LPSS_IDMA64_DRIVER_NAME, 114 .num_resources = ARRAY_SIZE(intel_lpss_idma64_resources), 115 .resources = intel_lpss_idma64_resources, 116 }; 117 118 static const struct mfd_cell intel_lpss_i2c_cell = { 119 .name = "i2c_designware", 120 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 121 .resources = intel_lpss_dev_resources, 122 }; 123 124 static const struct mfd_cell intel_lpss_uart_cell = { 125 .name = "dw-apb-uart", 126 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 127 .resources = intel_lpss_dev_resources, 128 }; 129 130 static const struct mfd_cell intel_lpss_spi_cell = { 131 .name = "pxa2xx-spi", 132 .num_resources = ARRAY_SIZE(intel_lpss_dev_resources), 133 .resources = intel_lpss_dev_resources, 134 }; 135 136 static DEFINE_IDA(intel_lpss_devid_ida); 137 static struct dentry *intel_lpss_debugfs; 138 139 static void intel_lpss_cache_ltr(struct intel_lpss *lpss) 140 { 141 lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR); 142 lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR); 143 } 144 145 static int intel_lpss_debugfs_add(struct intel_lpss *lpss) 146 { 147 struct dentry *dir; 148 149 dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs); 150 if (IS_ERR(dir)) 151 return PTR_ERR(dir); 152 153 /* Cache the values into lpss structure */ 154 intel_lpss_cache_ltr(lpss); 155 156 debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps); 157 debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr); 158 debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr); 159 160 lpss->debugfs = dir; 161 return 0; 162 } 163 164 static void intel_lpss_debugfs_remove(struct intel_lpss *lpss) 165 { 166 debugfs_remove_recursive(lpss->debugfs); 167 } 168 169 static void intel_lpss_ltr_set(struct device *dev, s32 val) 170 { 171 struct intel_lpss *lpss = dev_get_drvdata(dev); 172 u32 ltr; 173 174 /* 175 * Program latency tolerance (LTR) accordingly what has been asked 176 * by the PM QoS layer or disable it in case we were passed 177 * negative value or PM_QOS_LATENCY_ANY. 178 */ 179 ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR); 180 181 if (val == PM_QOS_LATENCY_ANY || val < 0) { 182 ltr &= ~LPSS_PRIV_LTR_REQ; 183 } else { 184 ltr |= LPSS_PRIV_LTR_REQ; 185 ltr &= ~LPSS_PRIV_LTR_SCALE_MASK; 186 ltr &= ~LPSS_PRIV_LTR_VALUE_MASK; 187 188 if (val > LPSS_PRIV_LTR_VALUE_MASK) 189 ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5; 190 else 191 ltr |= LPSS_PRIV_LTR_SCALE_1US | val; 192 } 193 194 if (ltr == lpss->active_ltr) 195 return; 196 197 writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR); 198 writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR); 199 200 /* Cache the values into lpss structure */ 201 intel_lpss_cache_ltr(lpss); 202 } 203 204 static void intel_lpss_ltr_expose(struct intel_lpss *lpss) 205 { 206 lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set; 207 dev_pm_qos_expose_latency_tolerance(lpss->dev); 208 } 209 210 static void intel_lpss_ltr_hide(struct intel_lpss *lpss) 211 { 212 dev_pm_qos_hide_latency_tolerance(lpss->dev); 213 lpss->dev->power.set_latency_tolerance = NULL; 214 } 215 216 static int intel_lpss_assign_devs(struct intel_lpss *lpss) 217 { 218 const struct mfd_cell *cell; 219 unsigned int type; 220 221 type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK; 222 type >>= LPSS_PRIV_CAPS_TYPE_SHIFT; 223 224 switch (type) { 225 case LPSS_DEV_I2C: 226 cell = &intel_lpss_i2c_cell; 227 break; 228 case LPSS_DEV_UART: 229 cell = &intel_lpss_uart_cell; 230 break; 231 case LPSS_DEV_SPI: 232 cell = &intel_lpss_spi_cell; 233 break; 234 default: 235 return -ENODEV; 236 } 237 238 lpss->cell = devm_kmemdup(lpss->dev, cell, sizeof(*cell), GFP_KERNEL); 239 if (!lpss->cell) 240 return -ENOMEM; 241 242 lpss->type = type; 243 244 return 0; 245 } 246 247 static bool intel_lpss_has_idma(const struct intel_lpss *lpss) 248 { 249 return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0; 250 } 251 252 static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss) 253 { 254 resource_size_t addr = lpss->info->mem->start; 255 256 lo_hi_writeq(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR); 257 } 258 259 static void intel_lpss_deassert_reset(const struct intel_lpss *lpss) 260 { 261 u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA; 262 263 /* Bring out the device from reset */ 264 writel(value, lpss->priv + LPSS_PRIV_RESETS); 265 } 266 267 static void intel_lpss_init_dev(const struct intel_lpss *lpss) 268 { 269 u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN; 270 271 /* Set the device in reset state */ 272 writel(0, lpss->priv + LPSS_PRIV_RESETS); 273 274 intel_lpss_deassert_reset(lpss); 275 276 intel_lpss_set_remap_addr(lpss); 277 278 if (!intel_lpss_has_idma(lpss)) 279 return; 280 281 /* Make sure that SPI multiblock DMA transfers are re-enabled */ 282 if (lpss->type == LPSS_DEV_SPI) 283 writel(value, lpss->priv + LPSS_PRIV_SSP_REG); 284 } 285 286 static void intel_lpss_unregister_clock_tree(struct clk *clk) 287 { 288 struct clk *parent; 289 290 while (clk) { 291 parent = clk_get_parent(clk); 292 clk_unregister(clk); 293 clk = parent; 294 } 295 } 296 297 static int intel_lpss_register_clock_divider(struct intel_lpss *lpss, 298 const char *devname, 299 struct clk **clk) 300 { 301 char name[32]; 302 struct clk *tmp = *clk; 303 304 snprintf(name, sizeof(name), "%s-enable", devname); 305 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0, 306 lpss->priv, 0, 0, NULL); 307 if (IS_ERR(tmp)) 308 return PTR_ERR(tmp); 309 310 snprintf(name, sizeof(name), "%s-div", devname); 311 tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp), 312 0, lpss->priv, 1, 15, 16, 15, 313 CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, 314 NULL); 315 if (IS_ERR(tmp)) 316 return PTR_ERR(tmp); 317 *clk = tmp; 318 319 snprintf(name, sizeof(name), "%s-update", devname); 320 tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 321 CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL); 322 if (IS_ERR(tmp)) 323 return PTR_ERR(tmp); 324 *clk = tmp; 325 326 return 0; 327 } 328 329 static int intel_lpss_register_clock(struct intel_lpss *lpss) 330 { 331 const struct mfd_cell *cell = lpss->cell; 332 struct clk *clk; 333 char devname[24]; 334 int ret; 335 336 if (!lpss->info->clk_rate) 337 return 0; 338 339 /* Root clock */ 340 clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0, 341 lpss->info->clk_rate); 342 if (IS_ERR(clk)) 343 return PTR_ERR(clk); 344 345 snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid); 346 347 /* 348 * Support for clock divider only if it has some preset value. 349 * Otherwise we assume that the divider is not used. 350 */ 351 if (lpss->type != LPSS_DEV_I2C) { 352 ret = intel_lpss_register_clock_divider(lpss, devname, &clk); 353 if (ret) 354 goto err_clk_register; 355 } 356 357 ret = -ENOMEM; 358 359 /* Clock for the host controller */ 360 lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname); 361 if (!lpss->clock) 362 goto err_clk_register; 363 364 lpss->clk = clk; 365 366 return 0; 367 368 err_clk_register: 369 intel_lpss_unregister_clock_tree(clk); 370 371 return ret; 372 } 373 374 static void intel_lpss_unregister_clock(struct intel_lpss *lpss) 375 { 376 if (IS_ERR_OR_NULL(lpss->clk)) 377 return; 378 379 clkdev_drop(lpss->clock); 380 intel_lpss_unregister_clock_tree(lpss->clk); 381 } 382 383 int intel_lpss_probe(struct device *dev, 384 const struct intel_lpss_platform_info *info) 385 { 386 struct intel_lpss *lpss; 387 int ret; 388 389 if (!info || !info->mem) 390 return -EINVAL; 391 392 if (info->irq < 0) 393 return info->irq; 394 395 lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL); 396 if (!lpss) 397 return -ENOMEM; 398 399 lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET, 400 LPSS_PRIV_SIZE); 401 if (!lpss->priv) 402 return -ENOMEM; 403 404 lpss->info = info; 405 lpss->dev = dev; 406 lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS); 407 408 dev_set_drvdata(dev, lpss); 409 410 ret = intel_lpss_assign_devs(lpss); 411 if (ret) 412 return ret; 413 414 lpss->cell->swnode = info->swnode; 415 lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts; 416 417 intel_lpss_init_dev(lpss); 418 419 lpss->devid = ida_alloc(&intel_lpss_devid_ida, GFP_KERNEL); 420 if (lpss->devid < 0) 421 return lpss->devid; 422 423 ret = intel_lpss_register_clock(lpss); 424 if (ret) 425 goto err_clk_register; 426 427 intel_lpss_ltr_expose(lpss); 428 429 ret = intel_lpss_debugfs_add(lpss); 430 if (ret) 431 dev_warn(dev, "Failed to create debugfs entries\n"); 432 433 if (intel_lpss_has_idma(lpss)) { 434 ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell, 435 1, info->mem, info->irq, NULL); 436 if (ret) 437 dev_warn(dev, "Failed to add %s, fallback to PIO\n", 438 LPSS_IDMA64_DRIVER_NAME); 439 } 440 441 ret = mfd_add_devices(dev, lpss->devid, lpss->cell, 442 1, info->mem, info->irq, NULL); 443 if (ret) 444 goto err_remove_ltr; 445 446 dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); 447 448 return 0; 449 450 err_remove_ltr: 451 intel_lpss_debugfs_remove(lpss); 452 intel_lpss_ltr_hide(lpss); 453 intel_lpss_unregister_clock(lpss); 454 455 err_clk_register: 456 ida_free(&intel_lpss_devid_ida, lpss->devid); 457 458 return ret; 459 } 460 EXPORT_SYMBOL_NS_GPL(intel_lpss_probe, INTEL_LPSS); 461 462 void intel_lpss_remove(struct device *dev) 463 { 464 struct intel_lpss *lpss = dev_get_drvdata(dev); 465 466 mfd_remove_devices(dev); 467 intel_lpss_debugfs_remove(lpss); 468 intel_lpss_ltr_hide(lpss); 469 intel_lpss_unregister_clock(lpss); 470 ida_free(&intel_lpss_devid_ida, lpss->devid); 471 } 472 EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, INTEL_LPSS); 473 474 static int resume_lpss_device(struct device *dev, void *data) 475 { 476 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 477 pm_runtime_resume(dev); 478 479 return 0; 480 } 481 482 static int intel_lpss_prepare(struct device *dev) 483 { 484 /* 485 * Resume both child devices before entering system sleep. This 486 * ensures that they are in proper state before they get suspended. 487 */ 488 device_for_each_child_reverse(dev, NULL, resume_lpss_device); 489 return 0; 490 } 491 492 static int intel_lpss_suspend(struct device *dev) 493 { 494 struct intel_lpss *lpss = dev_get_drvdata(dev); 495 unsigned int i; 496 497 /* Save device context */ 498 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) 499 lpss->priv_ctx[i] = readl(lpss->priv + i * 4); 500 501 /* 502 * If the device type is not UART, then put the controller into 503 * reset. UART cannot be put into reset since S3/S0ix fail when 504 * no_console_suspend flag is enabled. 505 */ 506 if (lpss->type != LPSS_DEV_UART) 507 writel(0, lpss->priv + LPSS_PRIV_RESETS); 508 509 return 0; 510 } 511 512 static int intel_lpss_resume(struct device *dev) 513 { 514 struct intel_lpss *lpss = dev_get_drvdata(dev); 515 unsigned int i; 516 517 intel_lpss_deassert_reset(lpss); 518 519 /* Restore device context */ 520 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) 521 writel(lpss->priv_ctx[i], lpss->priv + i * 4); 522 523 return 0; 524 } 525 526 EXPORT_NS_GPL_DEV_PM_OPS(intel_lpss_pm_ops, INTEL_LPSS) = { 527 .prepare = pm_sleep_ptr(&intel_lpss_prepare), 528 LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume) 529 RUNTIME_PM_OPS(intel_lpss_suspend, intel_lpss_resume, NULL) 530 }; 531 532 static int __init intel_lpss_init(void) 533 { 534 intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL); 535 return 0; 536 } 537 module_init(intel_lpss_init); 538 539 static void __exit intel_lpss_exit(void) 540 { 541 ida_destroy(&intel_lpss_devid_ida); 542 debugfs_remove(intel_lpss_debugfs); 543 } 544 module_exit(intel_lpss_exit); 545 546 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); 547 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 548 MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>"); 549 MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); 550 MODULE_DESCRIPTION("Intel LPSS core driver"); 551 MODULE_LICENSE("GPL v2"); 552 /* 553 * Ensure the DMA driver is loaded before the host controller device appears, 554 * so that the host controller driver can request its DMA channels as early 555 * as possible. 556 * 557 * If the DMA module is not there that's OK as well. 558 */ 559 MODULE_SOFTDEP("pre: platform:" LPSS_IDMA64_DRIVER_NAME); 560