1 /* 2 * Qualcomm External Bus Interface 2 (EBI2) driver 3 * an older version of the Qualcomm Parallel Interface Controller (QPIC) 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * 7 * Author: Linus Walleij <linus.walleij@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2, as 11 * published by the Free Software Foundation. 12 * 13 * See the device tree bindings for this block for more details on the 14 * hardware. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/clk.h> 19 #include <linux/err.h> 20 #include <linux/io.h> 21 #include <linux/of.h> 22 #include <linux/of_platform.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/platform_device.h> 26 #include <linux/bitops.h> 27 28 /* 29 * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit. 30 */ 31 #define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1) 32 #define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3) 33 #define EBI2_CS2_ENABLE_MASK BIT(4) 34 #define EBI2_CS3_ENABLE_MASK BIT(5) 35 #define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7) 36 #define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9) 37 #define EBI2_CSN_MASK GENMASK(9, 0) 38 39 #define EBI2_XMEM_CFG 0x0000 /* Power management etc */ 40 41 /* 42 * SLOW CSn CFG 43 * 44 * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the 45 * memory continues to drive the data bus after OE is de-asserted. 46 * Inserted when reading one CS and switching to another CS or read 47 * followed by write on the same CS. Valid values 0 thru 15. 48 * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after 49 * every write minimum 1. The data out is driven from the time WE is 50 * asserted until CS is asserted. With a hold of 1, the CS stays 51 * active for 1 extra cycle etc. Valid values 0 thru 15. 52 * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first 53 * write to a page or burst memory 54 * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first 55 * read to a page or burst memory 56 * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle 57 * so 1 thru 16 cycles. 58 * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle 59 * so 1 thru 16 cycles. 60 */ 61 #define EBI2_XMEM_CS0_SLOW_CFG 0x0008 62 #define EBI2_XMEM_CS1_SLOW_CFG 0x000C 63 #define EBI2_XMEM_CS2_SLOW_CFG 0x0010 64 #define EBI2_XMEM_CS3_SLOW_CFG 0x0014 65 #define EBI2_XMEM_CS4_SLOW_CFG 0x0018 66 #define EBI2_XMEM_CS5_SLOW_CFG 0x001C 67 68 #define EBI2_XMEM_RECOVERY_SHIFT 28 69 #define EBI2_XMEM_WR_HOLD_SHIFT 24 70 #define EBI2_XMEM_WR_DELTA_SHIFT 16 71 #define EBI2_XMEM_RD_DELTA_SHIFT 8 72 #define EBI2_XMEM_WR_WAIT_SHIFT 4 73 #define EBI2_XMEM_RD_WAIT_SHIFT 0 74 75 /* 76 * FAST CSn CFG 77 * Bits 31-28: ? 78 * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read 79 * transfer. For a single read trandfer this will be the time 80 * from CS assertion to OE assertion. 81 * Bits 18-24: ? 82 * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE 83 * assertion, with respect to the cycle where ADV is asserted. 84 * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3. 85 * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet 86 * hold time requirements with ADV assertion. 87 * 88 * The manual mentions "write precharge cycles" and "precharge cycles". 89 * We have not been able to figure out which bit fields these correspond to 90 * in the hardware, or what valid values exist. The current hypothesis is that 91 * this is something just used on the FAST chip selects. There is also a "byte 92 * device enable" flag somewhere for 8bit memories. 93 */ 94 #define EBI2_XMEM_CS0_FAST_CFG 0x0028 95 #define EBI2_XMEM_CS1_FAST_CFG 0x002C 96 #define EBI2_XMEM_CS2_FAST_CFG 0x0030 97 #define EBI2_XMEM_CS3_FAST_CFG 0x0034 98 #define EBI2_XMEM_CS4_FAST_CFG 0x0038 99 #define EBI2_XMEM_CS5_FAST_CFG 0x003C 100 101 #define EBI2_XMEM_RD_HOLD_SHIFT 24 102 #define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16 103 #define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5 104 105 /** 106 * struct cs_data - struct with info on a chipselect setting 107 * @enable_mask: mask to enable the chipselect in the EBI2 config 108 * @slow_cfg0: offset to XMEMC slow CS config 109 * @fast_cfg1: offset to XMEMC fast CS config 110 */ 111 struct cs_data { 112 u32 enable_mask; 113 u16 slow_cfg; 114 u16 fast_cfg; 115 }; 116 117 static const struct cs_data cs_info[] = { 118 { 119 /* CS0 */ 120 .enable_mask = EBI2_CS0_ENABLE_MASK, 121 .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG, 122 .fast_cfg = EBI2_XMEM_CS0_FAST_CFG, 123 }, 124 { 125 /* CS1 */ 126 .enable_mask = EBI2_CS1_ENABLE_MASK, 127 .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG, 128 .fast_cfg = EBI2_XMEM_CS1_FAST_CFG, 129 }, 130 { 131 /* CS2 */ 132 .enable_mask = EBI2_CS2_ENABLE_MASK, 133 .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG, 134 .fast_cfg = EBI2_XMEM_CS2_FAST_CFG, 135 }, 136 { 137 /* CS3 */ 138 .enable_mask = EBI2_CS3_ENABLE_MASK, 139 .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG, 140 .fast_cfg = EBI2_XMEM_CS3_FAST_CFG, 141 }, 142 { 143 /* CS4 */ 144 .enable_mask = EBI2_CS4_ENABLE_MASK, 145 .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG, 146 .fast_cfg = EBI2_XMEM_CS4_FAST_CFG, 147 }, 148 { 149 /* CS5 */ 150 .enable_mask = EBI2_CS5_ENABLE_MASK, 151 .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG, 152 .fast_cfg = EBI2_XMEM_CS5_FAST_CFG, 153 }, 154 }; 155 156 /** 157 * struct ebi2_xmem_prop - describes an XMEM config property 158 * @prop: the device tree binding name 159 * @max: maximum value for the property 160 * @slowreg: true if this property is in the SLOW CS config register 161 * else it is assumed to be in the FAST config register 162 * @shift: the bit field start in the SLOW or FAST register for this 163 * property 164 */ 165 struct ebi2_xmem_prop { 166 const char *prop; 167 u32 max; 168 bool slowreg; 169 u16 shift; 170 }; 171 172 static const struct ebi2_xmem_prop xmem_props[] = { 173 { 174 .prop = "qcom,xmem-recovery-cycles", 175 .max = 15, 176 .slowreg = true, 177 .shift = EBI2_XMEM_RECOVERY_SHIFT, 178 }, 179 { 180 .prop = "qcom,xmem-write-hold-cycles", 181 .max = 15, 182 .slowreg = true, 183 .shift = EBI2_XMEM_WR_HOLD_SHIFT, 184 }, 185 { 186 .prop = "qcom,xmem-write-delta-cycles", 187 .max = 255, 188 .slowreg = true, 189 .shift = EBI2_XMEM_WR_DELTA_SHIFT, 190 }, 191 { 192 .prop = "qcom,xmem-read-delta-cycles", 193 .max = 255, 194 .slowreg = true, 195 .shift = EBI2_XMEM_RD_DELTA_SHIFT, 196 }, 197 { 198 .prop = "qcom,xmem-write-wait-cycles", 199 .max = 15, 200 .slowreg = true, 201 .shift = EBI2_XMEM_WR_WAIT_SHIFT, 202 }, 203 { 204 .prop = "qcom,xmem-read-wait-cycles", 205 .max = 15, 206 .slowreg = true, 207 .shift = EBI2_XMEM_RD_WAIT_SHIFT, 208 }, 209 { 210 .prop = "qcom,xmem-address-hold-enable", 211 .max = 1, /* boolean prop */ 212 .slowreg = false, 213 .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT, 214 }, 215 { 216 .prop = "qcom,xmem-adv-to-oe-recovery-cycles", 217 .max = 3, 218 .slowreg = false, 219 .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT, 220 }, 221 { 222 .prop = "qcom,xmem-read-hold-cycles", 223 .max = 15, 224 .slowreg = false, 225 .shift = EBI2_XMEM_RD_HOLD_SHIFT, 226 }, 227 }; 228 229 static void qcom_ebi2_setup_chipselect(struct device_node *np, 230 struct device *dev, 231 void __iomem *ebi2_base, 232 void __iomem *ebi2_xmem, 233 u32 csindex) 234 { 235 const struct cs_data *csd; 236 u32 slowcfg, fastcfg; 237 u32 val; 238 int ret; 239 int i; 240 241 csd = &cs_info[csindex]; 242 val = readl(ebi2_base); 243 val |= csd->enable_mask; 244 writel(val, ebi2_base); 245 dev_dbg(dev, "enabled CS%u\n", csindex); 246 247 /* Next set up the XMEMC */ 248 slowcfg = 0; 249 fastcfg = 0; 250 251 for (i = 0; i < ARRAY_SIZE(xmem_props); i++) { 252 const struct ebi2_xmem_prop *xp = &xmem_props[i]; 253 254 /* All are regular u32 values */ 255 ret = of_property_read_u32(np, xp->prop, &val); 256 if (ret) { 257 dev_dbg(dev, "could not read %s for CS%d\n", 258 xp->prop, csindex); 259 continue; 260 } 261 262 /* First check boolean props */ 263 if (xp->max == 1 && val) { 264 if (xp->slowreg) 265 slowcfg |= BIT(xp->shift); 266 else 267 fastcfg |= BIT(xp->shift); 268 dev_dbg(dev, "set %s flag\n", xp->prop); 269 continue; 270 } 271 272 /* We're dealing with an u32 */ 273 if (val > xp->max) { 274 dev_err(dev, 275 "too high value for %s: %u, capped at %u\n", 276 xp->prop, val, xp->max); 277 val = xp->max; 278 } 279 if (xp->slowreg) 280 slowcfg |= (val << xp->shift); 281 else 282 fastcfg |= (val << xp->shift); 283 dev_dbg(dev, "set %s to %u\n", xp->prop, val); 284 } 285 286 dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n", 287 csindex, slowcfg, fastcfg); 288 289 if (slowcfg) 290 writel(slowcfg, ebi2_xmem + csd->slow_cfg); 291 if (fastcfg) 292 writel(fastcfg, ebi2_xmem + csd->fast_cfg); 293 } 294 295 static int qcom_ebi2_probe(struct platform_device *pdev) 296 { 297 struct device_node *np = pdev->dev.of_node; 298 struct device_node *child; 299 struct device *dev = &pdev->dev; 300 struct resource *res; 301 void __iomem *ebi2_base; 302 void __iomem *ebi2_xmem; 303 struct clk *ebi2xclk; 304 struct clk *ebi2clk; 305 bool have_children = false; 306 u32 val; 307 int ret; 308 309 ebi2xclk = devm_clk_get(dev, "ebi2x"); 310 if (IS_ERR(ebi2xclk)) 311 return PTR_ERR(ebi2xclk); 312 313 ret = clk_prepare_enable(ebi2xclk); 314 if (ret) { 315 dev_err(dev, "could not enable EBI2X clk (%d)\n", ret); 316 return ret; 317 } 318 319 ebi2clk = devm_clk_get(dev, "ebi2"); 320 if (IS_ERR(ebi2clk)) { 321 ret = PTR_ERR(ebi2clk); 322 goto err_disable_2x_clk; 323 } 324 325 ret = clk_prepare_enable(ebi2clk); 326 if (ret) { 327 dev_err(dev, "could not enable EBI2 clk\n"); 328 goto err_disable_2x_clk; 329 } 330 331 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 332 ebi2_base = devm_ioremap_resource(dev, res); 333 if (IS_ERR(ebi2_base)) { 334 ret = PTR_ERR(ebi2_base); 335 goto err_disable_clk; 336 } 337 338 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 339 ebi2_xmem = devm_ioremap_resource(dev, res); 340 if (IS_ERR(ebi2_xmem)) { 341 ret = PTR_ERR(ebi2_xmem); 342 goto err_disable_clk; 343 } 344 345 /* Allegedly this turns the power save mode off */ 346 writel(0UL, ebi2_xmem + EBI2_XMEM_CFG); 347 348 /* Disable all chipselects */ 349 val = readl(ebi2_base); 350 val &= ~EBI2_CSN_MASK; 351 writel(val, ebi2_base); 352 353 /* Walk over the child nodes and see what chipselects we use */ 354 for_each_available_child_of_node(np, child) { 355 u32 csindex; 356 357 /* Figure out the chipselect */ 358 ret = of_property_read_u32(child, "reg", &csindex); 359 if (ret) 360 return ret; 361 362 if (csindex > 5) { 363 dev_err(dev, 364 "invalid chipselect %u, we only support 0-5\n", 365 csindex); 366 continue; 367 } 368 369 qcom_ebi2_setup_chipselect(child, 370 dev, 371 ebi2_base, 372 ebi2_xmem, 373 csindex); 374 375 /* We have at least one child */ 376 have_children = true; 377 } 378 379 if (have_children) 380 return of_platform_default_populate(np, NULL, dev); 381 return 0; 382 383 err_disable_clk: 384 clk_disable_unprepare(ebi2clk); 385 err_disable_2x_clk: 386 clk_disable_unprepare(ebi2xclk); 387 388 return ret; 389 } 390 391 static const struct of_device_id qcom_ebi2_of_match[] = { 392 { .compatible = "qcom,msm8660-ebi2", }, 393 { .compatible = "qcom,apq8060-ebi2", }, 394 { } 395 }; 396 397 static struct platform_driver qcom_ebi2_driver = { 398 .probe = qcom_ebi2_probe, 399 .driver = { 400 .name = "qcom-ebi2", 401 .of_match_table = qcom_ebi2_of_match, 402 }, 403 }; 404 module_platform_driver(qcom_ebi2_driver); 405 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); 406 MODULE_DESCRIPTION("Qualcomm EBI2 driver"); 407 MODULE_LICENSE("GPL"); 408