1 /* 2 * Driver for MMC and SSD cards for Cavium OCTEON SOCs. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2012-2017 Cavium Inc. 9 */ 10 #include <linux/dma-mapping.h> 11 #include <linux/gpio/consumer.h> 12 #include <linux/interrupt.h> 13 #include <linux/mmc/mmc.h> 14 #include <linux/mmc/slot-gpio.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_platform.h> 18 #include <linux/platform_device.h> 19 #include <asm/octeon/octeon.h> 20 #include "cavium.h" 21 22 #define CVMX_MIO_BOOT_CTL CVMX_ADD_IO_SEG(0x00011800000000D0ull) 23 24 /* 25 * The l2c* functions below are used for the EMMC-17978 workaround. 26 * 27 * Due to a bug in the design of the MMC bus hardware, the 2nd to last 28 * cache block of a DMA read must be locked into the L2 Cache. 29 * Otherwise, data corruption may occur. 30 */ 31 static inline void *phys_to_ptr(u64 address) 32 { 33 return (void *)(address | (1ull << 63)); /* XKPHYS */ 34 } 35 36 /* 37 * Lock a single line into L2. The line is zeroed before locking 38 * to make sure no dram accesses are made. 39 */ 40 static void l2c_lock_line(u64 addr) 41 { 42 char *addr_ptr = phys_to_ptr(addr); 43 44 asm volatile ( 45 "cache 31, %[line]" /* Unlock the line */ 46 ::[line] "m" (*addr_ptr)); 47 } 48 49 /* Unlock a single line in the L2 cache. */ 50 static void l2c_unlock_line(u64 addr) 51 { 52 char *addr_ptr = phys_to_ptr(addr); 53 54 asm volatile ( 55 "cache 23, %[line]" /* Unlock the line */ 56 ::[line] "m" (*addr_ptr)); 57 } 58 59 /* Locks a memory region in the L2 cache. */ 60 static void l2c_lock_mem_region(u64 start, u64 len) 61 { 62 u64 end; 63 64 /* Round start/end to cache line boundaries */ 65 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); 66 start = ALIGN(start, CVMX_CACHE_LINE_SIZE); 67 68 while (start <= end) { 69 l2c_lock_line(start); 70 start += CVMX_CACHE_LINE_SIZE; 71 } 72 asm volatile("sync"); 73 } 74 75 /* Unlock a memory region in the L2 cache. */ 76 static void l2c_unlock_mem_region(u64 start, u64 len) 77 { 78 u64 end; 79 80 /* Round start/end to cache line boundaries */ 81 end = ALIGN(start + len - 1, CVMX_CACHE_LINE_SIZE); 82 start = ALIGN(start, CVMX_CACHE_LINE_SIZE); 83 84 while (start <= end) { 85 l2c_unlock_line(start); 86 start += CVMX_CACHE_LINE_SIZE; 87 } 88 } 89 90 static void octeon_mmc_acquire_bus(struct cvm_mmc_host *host) 91 { 92 if (!host->has_ciu3) { 93 down(&octeon_bootbus_sem); 94 /* For CN70XX, switch the MMC controller onto the bus. */ 95 if (OCTEON_IS_MODEL(OCTEON_CN70XX)) 96 writeq(0, (void __iomem *)CVMX_MIO_BOOT_CTL); 97 } else { 98 down(&host->mmc_serializer); 99 } 100 } 101 102 static void octeon_mmc_release_bus(struct cvm_mmc_host *host) 103 { 104 if (!host->has_ciu3) 105 up(&octeon_bootbus_sem); 106 else 107 up(&host->mmc_serializer); 108 } 109 110 static void octeon_mmc_int_enable(struct cvm_mmc_host *host, u64 val) 111 { 112 writeq(val, host->base + MIO_EMM_INT(host)); 113 if (!host->has_ciu3) 114 writeq(val, host->base + MIO_EMM_INT_EN(host)); 115 } 116 117 static void octeon_mmc_set_shared_power(struct cvm_mmc_host *host, int dir) 118 { 119 if (dir == 0) 120 if (!atomic_dec_return(&host->shared_power_users)) 121 gpiod_set_value_cansleep(host->global_pwr_gpiod, 0); 122 if (dir == 1) 123 if (atomic_inc_return(&host->shared_power_users) == 1) 124 gpiod_set_value_cansleep(host->global_pwr_gpiod, 1); 125 } 126 127 static void octeon_mmc_dmar_fixup(struct cvm_mmc_host *host, 128 struct mmc_command *cmd, 129 struct mmc_data *data, 130 u64 addr) 131 { 132 if (cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK) 133 return; 134 if (data->blksz * data->blocks <= 1024) 135 return; 136 137 host->n_minus_one = addr + (data->blksz * data->blocks) - 1024; 138 l2c_lock_mem_region(host->n_minus_one, 512); 139 } 140 141 static void octeon_mmc_dmar_fixup_done(struct cvm_mmc_host *host) 142 { 143 if (!host->n_minus_one) 144 return; 145 l2c_unlock_mem_region(host->n_minus_one, 512); 146 host->n_minus_one = 0; 147 } 148 149 static int octeon_mmc_probe(struct platform_device *pdev) 150 { 151 struct device_node *cn, *node = pdev->dev.of_node; 152 struct cvm_mmc_host *host; 153 void __iomem *base; 154 int mmc_irq[9]; 155 int i, ret = 0; 156 u64 val; 157 158 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 159 if (!host) 160 return -ENOMEM; 161 162 spin_lock_init(&host->irq_handler_lock); 163 sema_init(&host->mmc_serializer, 1); 164 165 host->dev = &pdev->dev; 166 host->acquire_bus = octeon_mmc_acquire_bus; 167 host->release_bus = octeon_mmc_release_bus; 168 host->int_enable = octeon_mmc_int_enable; 169 host->set_shared_power = octeon_mmc_set_shared_power; 170 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || 171 OCTEON_IS_MODEL(OCTEON_CNF7XXX)) { 172 host->dmar_fixup = octeon_mmc_dmar_fixup; 173 host->dmar_fixup_done = octeon_mmc_dmar_fixup_done; 174 } 175 176 host->sys_freq = octeon_get_io_clock_rate(); 177 178 if (of_device_is_compatible(node, "cavium,octeon-7890-mmc")) { 179 host->big_dma_addr = true; 180 host->need_irq_handler_lock = true; 181 host->has_ciu3 = true; 182 host->use_sg = true; 183 /* 184 * First seven are the EMM_INT bits 0..6, then two for 185 * the EMM_DMA_INT bits 186 */ 187 for (i = 0; i < 9; i++) { 188 mmc_irq[i] = platform_get_irq(pdev, i); 189 if (mmc_irq[i] < 0) 190 return mmc_irq[i]; 191 192 /* work around legacy u-boot device trees */ 193 irq_set_irq_type(mmc_irq[i], IRQ_TYPE_EDGE_RISING); 194 } 195 } else { 196 host->big_dma_addr = false; 197 host->need_irq_handler_lock = false; 198 host->has_ciu3 = false; 199 /* First one is EMM second DMA */ 200 for (i = 0; i < 2; i++) { 201 mmc_irq[i] = platform_get_irq(pdev, i); 202 if (mmc_irq[i] < 0) 203 return mmc_irq[i]; 204 } 205 } 206 207 host->last_slot = -1; 208 209 base = devm_platform_ioremap_resource(pdev, 0); 210 if (IS_ERR(base)) 211 return PTR_ERR(base); 212 host->base = base; 213 host->reg_off = 0; 214 215 base = devm_platform_ioremap_resource(pdev, 1); 216 if (IS_ERR(base)) 217 return PTR_ERR(base); 218 host->dma_base = base; 219 /* 220 * To keep the register addresses shared we intentionaly use 221 * a negative offset here, first register used on Octeon therefore 222 * starts at 0x20 (MIO_EMM_DMA_CFG). 223 */ 224 host->reg_off_dma = -0x20; 225 226 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 227 if (ret) 228 return ret; 229 230 /* 231 * Clear out any pending interrupts that may be left over from 232 * bootloader. 233 */ 234 val = readq(host->base + MIO_EMM_INT(host)); 235 writeq(val, host->base + MIO_EMM_INT(host)); 236 237 if (host->has_ciu3) { 238 /* Only CMD_DONE, DMA_DONE, CMD_ERR, DMA_ERR */ 239 for (i = 1; i <= 4; i++) { 240 ret = devm_request_irq(&pdev->dev, mmc_irq[i], 241 cvm_mmc_interrupt, 242 0, cvm_mmc_irq_names[i], host); 243 if (ret < 0) { 244 dev_err(&pdev->dev, "Error: devm_request_irq %d\n", 245 mmc_irq[i]); 246 return ret; 247 } 248 } 249 } else { 250 ret = devm_request_irq(&pdev->dev, mmc_irq[0], 251 cvm_mmc_interrupt, 0, KBUILD_MODNAME, 252 host); 253 if (ret < 0) { 254 dev_err(&pdev->dev, "Error: devm_request_irq %d\n", 255 mmc_irq[0]); 256 return ret; 257 } 258 } 259 260 host->global_pwr_gpiod = devm_gpiod_get_optional(&pdev->dev, 261 "power", 262 GPIOD_OUT_HIGH); 263 if (IS_ERR(host->global_pwr_gpiod)) { 264 dev_err(&pdev->dev, "Invalid power GPIO\n"); 265 return PTR_ERR(host->global_pwr_gpiod); 266 } 267 268 platform_set_drvdata(pdev, host); 269 270 i = 0; 271 for_each_child_of_node(node, cn) { 272 host->slot_pdev[i] = 273 of_platform_device_create(cn, NULL, &pdev->dev); 274 if (!host->slot_pdev[i]) { 275 i++; 276 continue; 277 } 278 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host); 279 if (ret) { 280 dev_err(&pdev->dev, "Error populating slots\n"); 281 octeon_mmc_set_shared_power(host, 0); 282 of_node_put(cn); 283 goto error; 284 } 285 i++; 286 } 287 return 0; 288 289 error: 290 for (i = 0; i < CAVIUM_MAX_MMC; i++) { 291 if (host->slot[i]) 292 cvm_mmc_of_slot_remove(host->slot[i]); 293 if (host->slot_pdev[i]) 294 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); 295 } 296 return ret; 297 } 298 299 static void octeon_mmc_remove(struct platform_device *pdev) 300 { 301 struct cvm_mmc_host *host = platform_get_drvdata(pdev); 302 u64 dma_cfg; 303 int i; 304 305 for (i = 0; i < CAVIUM_MAX_MMC; i++) 306 if (host->slot[i]) 307 cvm_mmc_of_slot_remove(host->slot[i]); 308 309 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host)); 310 dma_cfg &= ~MIO_EMM_DMA_CFG_EN; 311 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); 312 313 octeon_mmc_set_shared_power(host, 0); 314 } 315 316 static const struct of_device_id octeon_mmc_match[] = { 317 { 318 .compatible = "cavium,octeon-6130-mmc", 319 }, 320 { 321 .compatible = "cavium,octeon-7890-mmc", 322 }, 323 {}, 324 }; 325 MODULE_DEVICE_TABLE(of, octeon_mmc_match); 326 327 static struct platform_driver octeon_mmc_driver = { 328 .probe = octeon_mmc_probe, 329 .remove_new = octeon_mmc_remove, 330 .driver = { 331 .name = KBUILD_MODNAME, 332 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 333 .of_match_table = octeon_mmc_match, 334 }, 335 }; 336 337 module_platform_driver(octeon_mmc_driver); 338 339 MODULE_AUTHOR("Cavium Inc. <support@cavium.com>"); 340 MODULE_DESCRIPTION("Low-level driver for Cavium OCTEON MMC/SSD card"); 341 MODULE_LICENSE("GPL"); 342