1 /* 2 * Driver for MMC and SSD cards for Cavium ThunderX SOCs. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2016 Cavium Inc. 9 */ 10 #include <linux/device.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/interrupt.h> 13 #include <linux/mmc/mmc.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/of_platform.h> 17 #include <linux/platform_device.h> 18 #include <linux/pci.h> 19 #include "cavium.h" 20 21 static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host) 22 { 23 down(&host->mmc_serializer); 24 } 25 26 static void thunder_mmc_release_bus(struct cvm_mmc_host *host) 27 { 28 up(&host->mmc_serializer); 29 } 30 31 static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val) 32 { 33 writeq(val, host->base + MIO_EMM_INT(host)); 34 writeq(val, host->base + MIO_EMM_INT_EN_SET(host)); 35 } 36 37 static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host, 38 struct pci_dev *pdev) 39 { 40 int nvec, ret, i; 41 42 nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX); 43 if (nvec < 0) 44 return nvec; 45 46 /* register interrupts */ 47 for (i = 0; i < nvec; i++) { 48 ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), 49 cvm_mmc_interrupt, 50 0, cvm_mmc_irq_names[i], host); 51 if (ret) 52 return ret; 53 } 54 return 0; 55 } 56 57 static int thunder_mmc_probe(struct pci_dev *pdev, 58 const struct pci_device_id *id) 59 { 60 struct device_node *node = pdev->dev.of_node; 61 struct device *dev = &pdev->dev; 62 struct device_node *child_node; 63 struct cvm_mmc_host *host; 64 int ret, i = 0; 65 66 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 67 if (!host) 68 return -ENOMEM; 69 70 pci_set_drvdata(pdev, host); 71 ret = pcim_enable_device(pdev); 72 if (ret) 73 return ret; 74 75 ret = pci_request_regions(pdev, KBUILD_MODNAME); 76 if (ret) 77 return ret; 78 79 host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); 80 if (!host->base) { 81 ret = -EINVAL; 82 goto error; 83 } 84 85 /* On ThunderX these are identical */ 86 host->dma_base = host->base; 87 88 host->reg_off = 0x2000; 89 host->reg_off_dma = 0x160; 90 91 host->clk = devm_clk_get(dev, NULL); 92 if (IS_ERR(host->clk)) { 93 ret = PTR_ERR(host->clk); 94 goto error; 95 } 96 97 ret = clk_prepare_enable(host->clk); 98 if (ret) 99 goto error; 100 host->sys_freq = clk_get_rate(host->clk); 101 102 spin_lock_init(&host->irq_handler_lock); 103 sema_init(&host->mmc_serializer, 1); 104 105 host->dev = dev; 106 host->acquire_bus = thunder_mmc_acquire_bus; 107 host->release_bus = thunder_mmc_release_bus; 108 host->int_enable = thunder_mmc_int_enable; 109 110 host->use_sg = true; 111 host->big_dma_addr = true; 112 host->need_irq_handler_lock = true; 113 host->last_slot = -1; 114 115 ret = dma_set_mask(dev, DMA_BIT_MASK(48)); 116 if (ret) 117 goto error; 118 119 /* 120 * Clear out any pending interrupts that may be left over from 121 * bootloader. Writing 1 to the bits clears them. 122 */ 123 writeq(127, host->base + MIO_EMM_INT_EN(host)); 124 writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host)); 125 /* Clear DMA FIFO */ 126 writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host)); 127 128 ret = thunder_mmc_register_interrupts(host, pdev); 129 if (ret) 130 goto error; 131 132 for_each_child_of_node(node, child_node) { 133 /* 134 * mmc_of_parse and devm* require one device per slot. 135 * Create a dummy device per slot and set the node pointer to 136 * the slot. The easiest way to get this is using 137 * of_platform_device_create. 138 */ 139 if (of_device_is_compatible(child_node, "mmc-slot")) { 140 host->slot_pdev[i] = of_platform_device_create(child_node, NULL, 141 &pdev->dev); 142 if (!host->slot_pdev[i]) 143 continue; 144 145 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host); 146 if (ret) { 147 of_node_put(child_node); 148 goto error; 149 } 150 } 151 i++; 152 } 153 dev_info(dev, "probed\n"); 154 return 0; 155 156 error: 157 for (i = 0; i < CAVIUM_MAX_MMC; i++) { 158 if (host->slot[i]) 159 cvm_mmc_of_slot_remove(host->slot[i]); 160 if (host->slot_pdev[i]) { 161 get_device(&host->slot_pdev[i]->dev); 162 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); 163 put_device(&host->slot_pdev[i]->dev); 164 } 165 } 166 clk_disable_unprepare(host->clk); 167 pci_release_regions(pdev); 168 return ret; 169 } 170 171 static void thunder_mmc_remove(struct pci_dev *pdev) 172 { 173 struct cvm_mmc_host *host = pci_get_drvdata(pdev); 174 u64 dma_cfg; 175 int i; 176 177 for (i = 0; i < CAVIUM_MAX_MMC; i++) 178 if (host->slot[i]) 179 cvm_mmc_of_slot_remove(host->slot[i]); 180 181 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host)); 182 dma_cfg &= ~MIO_EMM_DMA_CFG_EN; 183 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); 184 185 clk_disable_unprepare(host->clk); 186 pci_release_regions(pdev); 187 } 188 189 static const struct pci_device_id thunder_mmc_id_table[] = { 190 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) }, 191 { 0, } /* end of table */ 192 }; 193 194 static struct pci_driver thunder_mmc_driver = { 195 .name = KBUILD_MODNAME, 196 .id_table = thunder_mmc_id_table, 197 .probe = thunder_mmc_probe, 198 .remove = thunder_mmc_remove, 199 }; 200 201 module_pci_driver(thunder_mmc_driver); 202 203 MODULE_AUTHOR("Cavium Inc."); 204 MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver"); 205 MODULE_LICENSE("GPL"); 206 MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table); 207