1 // SPDX-License-Identifier: GPL-2.0 2 // PCI1xxxx SPI driver 3 // Copyright (C) 2022 Microchip Technology Inc. 4 // Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com> 5 // Kumaravel Thiagarajan <Kumaravel.Thiagarajan@microchip.com> 6 7 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/iopoll.h> 11 #include <linux/irq.h> 12 #include <linux/module.h> 13 #include <linux/msi.h> 14 #include <linux/pci_regs.h> 15 #include <linux/pci.h> 16 #include <linux/spinlock.h> 17 #include <linux/spi/spi.h> 18 #include <linux/delay.h> 19 20 #define DRV_NAME "spi-pci1xxxx" 21 22 #define SYS_FREQ_DEFAULT (62500000) 23 24 #define PCI1XXXX_SPI_MAX_CLOCK_HZ (30000000) 25 #define PCI1XXXX_SPI_CLK_20MHZ (20000000) 26 #define PCI1XXXX_SPI_CLK_15MHZ (15000000) 27 #define PCI1XXXX_SPI_CLK_12MHZ (12000000) 28 #define PCI1XXXX_SPI_CLK_10MHZ (10000000) 29 #define PCI1XXXX_SPI_MIN_CLOCK_HZ (2000000) 30 31 #define PCI1XXXX_SPI_BUFFER_SIZE (320) 32 33 #define SPI_MST_CTL_DEVSEL_MASK (GENMASK(27, 25)) 34 #define SPI_MST_CTL_CMD_LEN_MASK (GENMASK(16, 8)) 35 #define SPI_MST_CTL_SPEED_MASK (GENMASK(7, 5)) 36 #define SPI_MSI_VECTOR_SEL_MASK (GENMASK(4, 4)) 37 38 #define SPI_MST_CTL_FORCE_CE (BIT(4)) 39 #define SPI_MST_CTL_MODE_SEL (BIT(2)) 40 #define SPI_MST_CTL_GO (BIT(0)) 41 42 #define SPI_PERI_ADDR_BASE (0x160000) 43 #define SPI_SYSTEM_ADDR_BASE (0x2000) 44 #define SPI_MST1_ADDR_BASE (0x800) 45 46 #define DEV_REV_REG (SPI_SYSTEM_ADDR_BASE + 0x00) 47 #define SPI_SYSLOCK_REG (SPI_SYSTEM_ADDR_BASE + 0xA0) 48 #define SPI_CONFIG_PERI_ENABLE_REG (SPI_SYSTEM_ADDR_BASE + 0x108) 49 50 #define SPI_PERI_ENBLE_PF_MASK (GENMASK(17, 16)) 51 #define DEV_REV_MASK (GENMASK(7, 0)) 52 53 #define SPI_SYSLOCK BIT(4) 54 #define SPI0 (0) 55 #define SPI1 (1) 56 57 /* DMA Related Registers */ 58 #define SPI_DMA_ADDR_BASE (0x1000) 59 #define SPI_DMA_GLOBAL_WR_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x0C) 60 #define SPI_DMA_WR_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x10) 61 #define SPI_DMA_GLOBAL_RD_ENGINE_EN (SPI_DMA_ADDR_BASE + 0x2C) 62 #define SPI_DMA_RD_DOORBELL_REG (SPI_DMA_ADDR_BASE + 0x30) 63 #define SPI_DMA_INTR_WR_STS (SPI_DMA_ADDR_BASE + 0x4C) 64 #define SPI_DMA_WR_INT_MASK (SPI_DMA_ADDR_BASE + 0x54) 65 #define SPI_DMA_INTR_WR_CLR (SPI_DMA_ADDR_BASE + 0x58) 66 #define SPI_DMA_ERR_WR_STS (SPI_DMA_ADDR_BASE + 0x5C) 67 #define SPI_DMA_INTR_IMWR_WDONE_LOW (SPI_DMA_ADDR_BASE + 0x60) 68 #define SPI_DMA_INTR_IMWR_WDONE_HIGH (SPI_DMA_ADDR_BASE + 0x64) 69 #define SPI_DMA_INTR_IMWR_WABORT_LOW (SPI_DMA_ADDR_BASE + 0x68) 70 #define SPI_DMA_INTR_IMWR_WABORT_HIGH (SPI_DMA_ADDR_BASE + 0x6C) 71 #define SPI_DMA_INTR_WR_IMWR_DATA (SPI_DMA_ADDR_BASE + 0x70) 72 #define SPI_DMA_INTR_RD_STS (SPI_DMA_ADDR_BASE + 0xA0) 73 #define SPI_DMA_RD_INT_MASK (SPI_DMA_ADDR_BASE + 0xA8) 74 #define SPI_DMA_INTR_RD_CLR (SPI_DMA_ADDR_BASE + 0xAC) 75 #define SPI_DMA_ERR_RD_STS (SPI_DMA_ADDR_BASE + 0xB8) 76 #define SPI_DMA_INTR_IMWR_RDONE_LOW (SPI_DMA_ADDR_BASE + 0xCC) 77 #define SPI_DMA_INTR_IMWR_RDONE_HIGH (SPI_DMA_ADDR_BASE + 0xD0) 78 #define SPI_DMA_INTR_IMWR_RABORT_LOW (SPI_DMA_ADDR_BASE + 0xD4) 79 #define SPI_DMA_INTR_IMWR_RABORT_HIGH (SPI_DMA_ADDR_BASE + 0xD8) 80 #define SPI_DMA_INTR_RD_IMWR_DATA (SPI_DMA_ADDR_BASE + 0xDC) 81 82 #define SPI_DMA_CH0_WR_BASE (SPI_DMA_ADDR_BASE + 0x200) 83 #define SPI_DMA_CH0_RD_BASE (SPI_DMA_ADDR_BASE + 0x300) 84 #define SPI_DMA_CH1_WR_BASE (SPI_DMA_ADDR_BASE + 0x400) 85 #define SPI_DMA_CH1_RD_BASE (SPI_DMA_ADDR_BASE + 0x500) 86 87 #define SPI_DMA_CH_CTL1_OFFSET (0x00) 88 #define SPI_DMA_CH_XFER_LEN_OFFSET (0x08) 89 #define SPI_DMA_CH_SAR_LO_OFFSET (0x0C) 90 #define SPI_DMA_CH_SAR_HI_OFFSET (0x10) 91 #define SPI_DMA_CH_DAR_LO_OFFSET (0x14) 92 #define SPI_DMA_CH_DAR_HI_OFFSET (0x18) 93 94 #define SPI_DMA_CH0_DONE_INT BIT(0) 95 #define SPI_DMA_CH1_DONE_INT BIT(1) 96 #define SPI_DMA_CH0_ABORT_INT BIT(16) 97 #define SPI_DMA_CH1_ABORT_INT BIT(17) 98 #define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT) 99 #define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT) 100 #define DMA_CH_CONTROL_LIE BIT(3) 101 #define DMA_CH_CONTROL_RIE BIT(4) 102 #define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE) 103 104 /* x refers to SPI Host Controller HW instance id in the below macros - 0 or 1 */ 105 106 #define SPI_MST_CMD_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x00) 107 #define SPI_MST_RSP_BUF_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x200) 108 #define SPI_MST_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x400) 109 #define SPI_MST_EVENT_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x420) 110 #define SPI_MST_EVENT_MASK_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x424) 111 #define SPI_MST_PAD_CTL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x460) 112 #define SPIALERT_MST_DB_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x464) 113 #define SPIALERT_MST_VAL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x468) 114 #define SPI_PCI_CTRL_REG_OFFSET(x) (((x) * SPI_MST1_ADDR_BASE) + 0x480) 115 116 #define PCI1XXXX_IRQ_FLAGS (IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE) 117 #define SPI_MAX_DATA_LEN 320 118 119 #define PCI1XXXX_SPI_TIMEOUT (msecs_to_jiffies(100)) 120 #define SYSLOCK_RETRY_CNT (1000) 121 #define SPI_DMA_ENGINE_EN (0x1) 122 #define SPI_DMA_ENGINE_DIS (0x0) 123 124 #define SPI_INTR BIT(8) 125 #define SPI_FORCE_CE BIT(4) 126 127 #define SPI_CHIP_SEL_COUNT 7 128 #define VENDOR_ID_MCHP 0x1055 129 130 #define SPI_SUSPEND_CONFIG 0x101 131 #define SPI_RESUME_CONFIG 0x203 132 133 struct pci1xxxx_spi_internal { 134 u8 hw_inst; 135 u8 clkdiv; 136 int irq; 137 int mode; 138 bool spi_xfer_in_progress; 139 void *rx_buf; 140 bool dma_aborted_rd; 141 u32 bytes_recvd; 142 u32 tx_sgl_len; 143 u32 rx_sgl_len; 144 struct scatterlist *tx_sgl, *rx_sgl; 145 bool dma_aborted_wr; 146 struct completion spi_xfer_done; 147 struct spi_controller *spi_host; 148 struct pci1xxxx_spi *parent; 149 struct spi_transfer *xfer; 150 struct { 151 unsigned int dev_sel : 3; 152 unsigned int msi_vector_sel : 1; 153 } prev_val; 154 }; 155 156 struct pci1xxxx_spi { 157 struct pci_dev *dev; 158 u8 total_hw_instances; 159 u8 dev_rev; 160 void __iomem *reg_base; 161 void __iomem *dma_offset_bar; 162 /* lock to safely access the DMA registers in isr */ 163 spinlock_t dma_reg_lock; 164 bool can_dma; 165 struct pci1xxxx_spi_internal *spi_int[] __counted_by(total_hw_instances); 166 }; 167 168 static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = { 169 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0001), 0, 0, 0x02}, 170 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0002), 0, 0, 0x01}, 171 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, 0x0003), 0, 0, 0x11}, 172 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa004, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01}, 173 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0001), 0, 0, 0x02}, 174 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0002), 0, 0, 0x01}, 175 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, 0x0003), 0, 0, 0x11}, 176 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa014, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01}, 177 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0001), 0, 0, 0x02}, 178 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0002), 0, 0, 0x01}, 179 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, 0x0003), 0, 0, 0x11}, 180 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa024, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01}, 181 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0001), 0, 0, 0x02}, 182 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0002), 0, 0, 0x01}, 183 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, 0x0003), 0, 0, 0x11}, 184 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa034, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01}, 185 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0001), 0, 0, 0x02}, 186 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0002), 0, 0, 0x01}, 187 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, 0x0003), 0, 0, 0x11}, 188 { PCI_DEVICE_SUB(VENDOR_ID_MCHP, 0xa044, PCI_ANY_ID, PCI_ANY_ID), 0, 0, 0x01}, 189 { 0, } 190 }; 191 192 MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table); 193 194 static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par) 195 { 196 writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG); 197 return readl(par->reg_base + SPI_SYSLOCK_REG); 198 } 199 200 static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_spi *par) 201 { 202 u32 regval; 203 204 return readx_poll_timeout(pci1xxxx_set_sys_lock, par, regval, 205 (regval & SPI_SYSLOCK), 100, 206 SYSLOCK_RETRY_CNT * 100); 207 } 208 209 static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par) 210 { 211 writel(0x0, par->reg_base + SPI_SYSLOCK_REG); 212 } 213 214 static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq) 215 { 216 struct pci_dev *pdev = spi_bus->dev; 217 u32 pf_num; 218 u32 regval; 219 int ret; 220 221 /* 222 * DEV REV Registers is a system register, HW Syslock bit 223 * should be acquired before accessing the register 224 */ 225 ret = pci1xxxx_acquire_sys_lock(spi_bus); 226 if (ret) { 227 dev_err(&pdev->dev, "Error failed to acquire syslock\n"); 228 return ret; 229 } 230 231 regval = readl(spi_bus->reg_base + DEV_REV_REG); 232 spi_bus->dev_rev = regval & DEV_REV_MASK; 233 if (spi_bus->dev_rev >= 0xC0) { 234 regval = readl(spi_bus->reg_base + 235 SPI_CONFIG_PERI_ENABLE_REG); 236 pf_num = regval & SPI_PERI_ENBLE_PF_MASK; 237 } 238 239 pci1xxxx_release_sys_lock(spi_bus); 240 241 /* 242 * DMA is supported only from C0 and SPI can use DMA only if 243 * it is mapped to PF0 244 */ 245 if (spi_bus->dev_rev < 0xC0 || pf_num) 246 return -EOPNOTSUPP; 247 248 /* 249 * DMA Supported only with MSI Interrupts 250 * One of the SPI instance's MSI vector address and data 251 * is used for DMA Interrupt 252 */ 253 if (!irq_get_msi_desc(irq)) { 254 dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n"); 255 return -EOPNOTSUPP; 256 } 257 258 spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2)); 259 if (!spi_bus->dma_offset_bar) { 260 dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n"); 261 return -EOPNOTSUPP; 262 } 263 264 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 265 dev_warn(&pdev->dev, "Error failed to set DMA mask, will operate in PIO mode\n"); 266 pcim_iounmap(pdev, spi_bus->dma_offset_bar); 267 spi_bus->dma_offset_bar = NULL; 268 return -EOPNOTSUPP; 269 } 270 271 return 0; 272 } 273 274 static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq) 275 { 276 struct msi_msg msi; 277 int ret; 278 279 ret = pci1xxxx_check_spi_can_dma(spi_bus, irq); 280 if (ret) 281 return ret; 282 283 spin_lock_init(&spi_bus->dma_reg_lock); 284 get_cached_msi_msg(irq, &msi); 285 writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN); 286 writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN); 287 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH); 288 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH); 289 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH); 290 writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH); 291 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW); 292 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW); 293 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW); 294 writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW); 295 writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA); 296 writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA); 297 dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE); 298 spi_bus->can_dma = true; 299 return 0; 300 } 301 302 static void pci1xxxx_spi_set_cs(struct spi_device *spi, bool enable) 303 { 304 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi->controller); 305 struct pci1xxxx_spi *par = p->parent; 306 u32 regval; 307 308 /* Set the DEV_SEL bits of the SPI_MST_CTL_REG */ 309 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst)); 310 if (!enable) { 311 regval |= SPI_FORCE_CE; 312 regval &= ~SPI_MST_CTL_DEVSEL_MASK; 313 regval |= (spi_get_chipselect(spi, 0) << 25); 314 } else { 315 regval &= ~SPI_FORCE_CE; 316 } 317 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst)); 318 } 319 320 static u8 pci1xxxx_get_clock_div(u32 hz) 321 { 322 u8 val = 0; 323 324 if (hz >= PCI1XXXX_SPI_MAX_CLOCK_HZ) 325 val = 2; 326 else if ((hz < PCI1XXXX_SPI_MAX_CLOCK_HZ) && (hz >= PCI1XXXX_SPI_CLK_20MHZ)) 327 val = 3; 328 else if ((hz < PCI1XXXX_SPI_CLK_20MHZ) && (hz >= PCI1XXXX_SPI_CLK_15MHZ)) 329 val = 4; 330 else if ((hz < PCI1XXXX_SPI_CLK_15MHZ) && (hz >= PCI1XXXX_SPI_CLK_12MHZ)) 331 val = 5; 332 else if ((hz < PCI1XXXX_SPI_CLK_12MHZ) && (hz >= PCI1XXXX_SPI_CLK_10MHZ)) 333 val = 6; 334 else if ((hz < PCI1XXXX_SPI_CLK_10MHZ) && (hz >= PCI1XXXX_SPI_MIN_CLOCK_HZ)) 335 val = 7; 336 else 337 val = 2; 338 339 return val; 340 } 341 342 static void pci1xxxx_spi_setup_dma_to_io(struct pci1xxxx_spi_internal *p, 343 dma_addr_t dma_addr, u32 len) 344 { 345 void __iomem *base; 346 347 if (!p->hw_inst) 348 base = p->parent->dma_offset_bar + SPI_DMA_CH0_RD_BASE; 349 else 350 base = p->parent->dma_offset_bar + SPI_DMA_CH1_RD_BASE; 351 352 writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET); 353 writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET); 354 writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_SAR_LO_OFFSET); 355 writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_SAR_HI_OFFSET); 356 /* Updated SPI Command Registers */ 357 writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)), 358 base + SPI_DMA_CH_DAR_LO_OFFSET); 359 writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_CMD_BUF_OFFSET(p->hw_inst)), 360 base + SPI_DMA_CH_DAR_HI_OFFSET); 361 } 362 363 static void pci1xxxx_spi_setup_dma_from_io(struct pci1xxxx_spi_internal *p, 364 dma_addr_t dma_addr, u32 len) 365 { 366 void *base; 367 368 if (!p->hw_inst) 369 base = p->parent->dma_offset_bar + SPI_DMA_CH0_WR_BASE; 370 else 371 base = p->parent->dma_offset_bar + SPI_DMA_CH1_WR_BASE; 372 373 writel(DMA_INTR_EN, base + SPI_DMA_CH_CTL1_OFFSET); 374 writel(len, base + SPI_DMA_CH_XFER_LEN_OFFSET); 375 writel(lower_32_bits(dma_addr), base + SPI_DMA_CH_DAR_LO_OFFSET); 376 writel(upper_32_bits(dma_addr), base + SPI_DMA_CH_DAR_HI_OFFSET); 377 writel(lower_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)), 378 base + SPI_DMA_CH_SAR_LO_OFFSET); 379 writel(upper_32_bits(SPI_PERI_ADDR_BASE + SPI_MST_RSP_BUF_OFFSET(p->hw_inst)), 380 base + SPI_DMA_CH_SAR_HI_OFFSET); 381 } 382 383 static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode, 384 u8 clkdiv, u32 len) 385 { 386 u32 regval; 387 388 regval = readl(par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); 389 regval &= ~(SPI_MST_CTL_MODE_SEL | SPI_MST_CTL_CMD_LEN_MASK | 390 SPI_MST_CTL_SPEED_MASK); 391 392 if (mode == SPI_MODE_3) 393 regval |= SPI_MST_CTL_MODE_SEL; 394 395 regval |= FIELD_PREP(SPI_MST_CTL_CMD_LEN_MASK, len); 396 regval |= FIELD_PREP(SPI_MST_CTL_SPEED_MASK, clkdiv); 397 writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); 398 } 399 400 static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst) 401 { 402 u32 regval; 403 404 regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); 405 regval |= SPI_MST_CTL_GO; 406 writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst)); 407 } 408 409 static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr, 410 struct spi_device *spi, struct spi_transfer *xfer) 411 { 412 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr); 413 struct pci1xxxx_spi *par = p->parent; 414 int len, loop_iter, transfer_len; 415 unsigned long bytes_transfered; 416 unsigned long bytes_recvd; 417 unsigned long loop_count; 418 u8 *rx_buf, result; 419 const u8 *tx_buf; 420 u32 regval; 421 u8 clkdiv; 422 423 p->spi_xfer_in_progress = true; 424 p->bytes_recvd = 0; 425 clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz); 426 tx_buf = xfer->tx_buf; 427 rx_buf = xfer->rx_buf; 428 transfer_len = xfer->len; 429 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 430 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 431 432 if (tx_buf) { 433 bytes_transfered = 0; 434 bytes_recvd = 0; 435 loop_count = transfer_len / SPI_MAX_DATA_LEN; 436 if (transfer_len % SPI_MAX_DATA_LEN != 0) 437 loop_count += 1; 438 439 for (loop_iter = 0; loop_iter < loop_count; loop_iter++) { 440 len = SPI_MAX_DATA_LEN; 441 if ((transfer_len % SPI_MAX_DATA_LEN != 0) && 442 (loop_iter == loop_count - 1)) 443 len = transfer_len % SPI_MAX_DATA_LEN; 444 445 reinit_completion(&p->spi_xfer_done); 446 memcpy_toio(par->reg_base + SPI_MST_CMD_BUF_OFFSET(p->hw_inst), 447 &tx_buf[bytes_transfered], len); 448 bytes_transfered += len; 449 pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len); 450 pci1xxxx_start_spi_xfer(p, p->hw_inst); 451 452 /* Wait for DMA_TERM interrupt */ 453 result = wait_for_completion_timeout(&p->spi_xfer_done, 454 PCI1XXXX_SPI_TIMEOUT); 455 if (!result) 456 return -ETIMEDOUT; 457 458 if (rx_buf) { 459 memcpy_fromio(&rx_buf[bytes_recvd], par->reg_base + 460 SPI_MST_RSP_BUF_OFFSET(p->hw_inst), len); 461 bytes_recvd += len; 462 } 463 } 464 } 465 p->spi_xfer_in_progress = false; 466 467 return 0; 468 } 469 470 static int pci1xxxx_spi_transfer_with_dma(struct spi_controller *spi_ctlr, 471 struct spi_device *spi, 472 struct spi_transfer *xfer) 473 { 474 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(spi_ctlr); 475 struct pci1xxxx_spi *par = p->parent; 476 dma_addr_t rx_dma_addr = 0; 477 dma_addr_t tx_dma_addr = 0; 478 int ret = 0; 479 u32 regval; 480 481 p->spi_xfer_in_progress = true; 482 p->tx_sgl = xfer->tx_sg.sgl; 483 p->rx_sgl = xfer->rx_sg.sgl; 484 p->rx_buf = xfer->rx_buf; 485 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 486 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 487 488 if (!xfer->tx_buf || !p->tx_sgl) { 489 ret = -EINVAL; 490 goto error; 491 } 492 p->xfer = xfer; 493 p->mode = spi->mode; 494 p->clkdiv = pci1xxxx_get_clock_div(xfer->speed_hz); 495 p->bytes_recvd = 0; 496 p->rx_buf = xfer->rx_buf; 497 regval = readl(par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 498 writel(regval, par->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 499 500 tx_dma_addr = sg_dma_address(p->tx_sgl); 501 rx_dma_addr = sg_dma_address(p->rx_sgl); 502 p->tx_sgl_len = sg_dma_len(p->tx_sgl); 503 p->rx_sgl_len = sg_dma_len(p->rx_sgl); 504 pci1xxxx_spi_setup(par, p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len); 505 pci1xxxx_spi_setup_dma_to_io(p, (tx_dma_addr), p->tx_sgl_len); 506 if (rx_dma_addr) 507 pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len); 508 writel(p->hw_inst, par->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG); 509 510 reinit_completion(&p->spi_xfer_done); 511 /* Wait for DMA_TERM interrupt */ 512 ret = wait_for_completion_timeout(&p->spi_xfer_done, PCI1XXXX_SPI_TIMEOUT); 513 if (!ret) { 514 ret = -ETIMEDOUT; 515 if (p->dma_aborted_rd) { 516 writel(SPI_DMA_ENGINE_DIS, 517 par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN); 518 /* 519 * DMA ENGINE reset takes time if any TLP 520 * completeion in progress, should wait 521 * till DMA Engine reset is completed. 522 */ 523 ret = readl_poll_timeout(par->dma_offset_bar + 524 SPI_DMA_GLOBAL_RD_ENGINE_EN, regval, 525 (regval == 0x0), 0, USEC_PER_MSEC); 526 if (ret) { 527 ret = -ECANCELED; 528 goto error; 529 } 530 writel(SPI_DMA_ENGINE_EN, 531 par->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN); 532 p->dma_aborted_rd = false; 533 ret = -ECANCELED; 534 } 535 if (p->dma_aborted_wr) { 536 writel(SPI_DMA_ENGINE_DIS, 537 par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN); 538 539 /* 540 * DMA ENGINE reset takes time if any TLP 541 * completeion in progress, should wait 542 * till DMA Engine reset is completed. 543 */ 544 ret = readl_poll_timeout(par->dma_offset_bar + 545 SPI_DMA_GLOBAL_WR_ENGINE_EN, regval, 546 (regval == 0x0), 0, USEC_PER_MSEC); 547 if (ret) { 548 ret = -ECANCELED; 549 goto error; 550 } 551 552 writel(SPI_DMA_ENGINE_EN, 553 par->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN); 554 p->dma_aborted_wr = false; 555 ret = -ECANCELED; 556 } 557 goto error; 558 } 559 ret = 0; 560 561 error: 562 p->spi_xfer_in_progress = false; 563 564 return ret; 565 } 566 567 static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr, 568 struct spi_device *spi, struct spi_transfer *xfer) 569 { 570 if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped) 571 return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer); 572 else 573 return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer); 574 } 575 576 static irqreturn_t pci1xxxx_spi_isr_io(int irq, void *dev) 577 { 578 struct pci1xxxx_spi_internal *p = dev; 579 irqreturn_t spi_int_fired = IRQ_NONE; 580 u32 regval; 581 582 /* Clear the SPI GO_BIT Interrupt */ 583 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 584 if (regval & SPI_INTR) { 585 /* Clear xfer_done */ 586 if (p->parent->can_dma && p->rx_buf) 587 writel(p->hw_inst, p->parent->dma_offset_bar + 588 SPI_DMA_WR_DOORBELL_REG); 589 else 590 complete(&p->parent->spi_int[p->hw_inst]->spi_xfer_done); 591 spi_int_fired = IRQ_HANDLED; 592 } 593 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 594 return spi_int_fired; 595 } 596 597 static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p) 598 { 599 dma_addr_t tx_dma_addr = 0; 600 dma_addr_t rx_dma_addr = 0; 601 u32 prev_len; 602 603 p->tx_sgl = sg_next(p->tx_sgl); 604 if (p->rx_sgl) 605 p->rx_sgl = sg_next(p->rx_sgl); 606 if (!p->tx_sgl) { 607 /* Clear xfer_done */ 608 complete(&p->spi_xfer_done); 609 } else { 610 tx_dma_addr = sg_dma_address(p->tx_sgl); 611 prev_len = p->tx_sgl_len; 612 p->tx_sgl_len = sg_dma_len(p->tx_sgl); 613 if (prev_len != p->tx_sgl_len) 614 pci1xxxx_spi_setup(p->parent, 615 p->hw_inst, p->mode, p->clkdiv, p->tx_sgl_len); 616 pci1xxxx_spi_setup_dma_to_io(p, tx_dma_addr, p->tx_sgl_len); 617 if (p->rx_sgl) { 618 rx_dma_addr = sg_dma_address(p->rx_sgl); 619 p->rx_sgl_len = sg_dma_len(p->rx_sgl); 620 pci1xxxx_spi_setup_dma_from_io(p, rx_dma_addr, p->rx_sgl_len); 621 } 622 writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_RD_DOORBELL_REG); 623 } 624 } 625 626 static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev) 627 { 628 struct pci1xxxx_spi_internal *p = dev; 629 irqreturn_t spi_int_fired = IRQ_NONE; 630 unsigned long flags; 631 u32 regval; 632 633 spin_lock_irqsave(&p->parent->dma_reg_lock, flags); 634 /* Clear the DMA RD INT and start spi xfer*/ 635 regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS); 636 if (regval & SPI_DMA_DONE_INT_MASK) { 637 if (regval & SPI_DMA_CH0_DONE_INT) 638 pci1xxxx_start_spi_xfer(p, SPI0); 639 if (regval & SPI_DMA_CH1_DONE_INT) 640 pci1xxxx_start_spi_xfer(p, SPI1); 641 spi_int_fired = IRQ_HANDLED; 642 } 643 if (regval & SPI_DMA_ABORT_INT_MASK) { 644 p->dma_aborted_rd = true; 645 spi_int_fired = IRQ_HANDLED; 646 } 647 writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR); 648 649 /* Clear the DMA WR INT */ 650 regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS); 651 if (regval & SPI_DMA_DONE_INT_MASK) { 652 if (regval & SPI_DMA_CH0_DONE_INT) 653 pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]); 654 655 if (regval & SPI_DMA_CH1_DONE_INT) 656 pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]); 657 658 spi_int_fired = IRQ_HANDLED; 659 } 660 if (regval & SPI_DMA_ABORT_INT_MASK) { 661 p->dma_aborted_wr = true; 662 spi_int_fired = IRQ_HANDLED; 663 } 664 writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR); 665 spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags); 666 667 /* Clear the SPI GO_BIT Interrupt */ 668 regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 669 if (regval & SPI_INTR) { 670 writel(p->hw_inst, p->parent->dma_offset_bar + SPI_DMA_WR_DOORBELL_REG); 671 spi_int_fired = IRQ_HANDLED; 672 } 673 writel(regval, p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst)); 674 return spi_int_fired; 675 } 676 677 static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev) 678 { 679 struct pci1xxxx_spi_internal *p = dev; 680 681 if (p->spi_host->can_dma(p->spi_host, NULL, p->xfer)) 682 return pci1xxxx_spi_isr_dma(irq, dev); 683 else 684 return pci1xxxx_spi_isr_io(irq, dev); 685 } 686 687 static bool pci1xxxx_spi_can_dma(struct spi_controller *host, 688 struct spi_device *spi, 689 struct spi_transfer *xfer) 690 { 691 struct pci1xxxx_spi_internal *p = spi_controller_get_devdata(host); 692 struct pci1xxxx_spi *par = p->parent; 693 694 return par->can_dma; 695 } 696 697 static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 698 { 699 u8 hw_inst_cnt, iter, start, only_sec_inst; 700 struct pci1xxxx_spi_internal *spi_sub_ptr; 701 struct device *dev = &pdev->dev; 702 struct pci1xxxx_spi *spi_bus; 703 struct spi_controller *spi_host; 704 u32 regval; 705 int ret; 706 707 hw_inst_cnt = ent->driver_data & 0x0f; 708 start = (ent->driver_data & 0xf0) >> 4; 709 if (start == 1) 710 only_sec_inst = 1; 711 else 712 only_sec_inst = 0; 713 714 spi_bus = devm_kzalloc(&pdev->dev, 715 struct_size(spi_bus, spi_int, hw_inst_cnt), 716 GFP_KERNEL); 717 if (!spi_bus) 718 return -ENOMEM; 719 720 spi_bus->dev = pdev; 721 spi_bus->total_hw_instances = hw_inst_cnt; 722 pci_set_master(pdev); 723 724 for (iter = 0; iter < hw_inst_cnt; iter++) { 725 spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev, 726 sizeof(struct pci1xxxx_spi_internal), 727 GFP_KERNEL); 728 spi_sub_ptr = spi_bus->spi_int[iter]; 729 spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller)); 730 if (!spi_sub_ptr->spi_host) 731 return -ENOMEM; 732 733 spi_sub_ptr->parent = spi_bus; 734 spi_sub_ptr->spi_xfer_in_progress = false; 735 736 if (!iter) { 737 ret = pcim_enable_device(pdev); 738 if (ret) 739 return -ENOMEM; 740 741 ret = pci_request_regions(pdev, DRV_NAME); 742 if (ret) 743 return -ENOMEM; 744 745 spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); 746 if (!spi_bus->reg_base) { 747 ret = -EINVAL; 748 goto error; 749 } 750 751 ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt, 752 PCI_IRQ_ALL_TYPES); 753 if (ret < 0) { 754 dev_err(&pdev->dev, "Error allocating MSI vectors\n"); 755 goto error; 756 } 757 758 init_completion(&spi_sub_ptr->spi_xfer_done); 759 /* Initialize Interrupts - SPI_INT */ 760 regval = readl(spi_bus->reg_base + 761 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); 762 regval &= ~SPI_INTR; 763 writel(regval, spi_bus->reg_base + 764 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); 765 spi_sub_ptr->irq = pci_irq_vector(pdev, 0); 766 767 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, 768 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, 769 pci_name(pdev), spi_sub_ptr); 770 if (ret < 0) { 771 dev_err(&pdev->dev, "Unable to request irq : %d", 772 spi_sub_ptr->irq); 773 ret = -ENODEV; 774 goto error; 775 } 776 777 ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq); 778 if (ret && ret != -EOPNOTSUPP) 779 goto error; 780 781 /* This register is only applicable for 1st instance */ 782 regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0)); 783 if (!only_sec_inst) 784 regval |= (BIT(4)); 785 else 786 regval &= ~(BIT(4)); 787 788 writel(regval, spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0)); 789 } 790 791 spi_sub_ptr->hw_inst = start++; 792 793 if (iter == 1) { 794 init_completion(&spi_sub_ptr->spi_xfer_done); 795 /* Initialize Interrupts - SPI_INT */ 796 regval = readl(spi_bus->reg_base + 797 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); 798 regval &= ~SPI_INTR; 799 writel(regval, spi_bus->reg_base + 800 SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); 801 spi_sub_ptr->irq = pci_irq_vector(pdev, iter); 802 ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, 803 pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, 804 pci_name(pdev), spi_sub_ptr); 805 if (ret < 0) { 806 dev_err(&pdev->dev, "Unable to request irq : %d", 807 spi_sub_ptr->irq); 808 ret = -ENODEV; 809 goto error; 810 } 811 } 812 813 spi_host = spi_sub_ptr->spi_host; 814 spi_host->num_chipselect = SPI_CHIP_SEL_COUNT; 815 spi_host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_RX_DUAL | 816 SPI_TX_DUAL | SPI_LOOP; 817 spi_host->can_dma = pci1xxxx_spi_can_dma; 818 spi_host->transfer_one = pci1xxxx_spi_transfer_one; 819 820 spi_host->set_cs = pci1xxxx_spi_set_cs; 821 spi_host->bits_per_word_mask = SPI_BPW_MASK(8); 822 spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ; 823 spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ; 824 spi_host->flags = SPI_CONTROLLER_MUST_TX; 825 spi_controller_set_devdata(spi_host, spi_sub_ptr); 826 ret = devm_spi_register_controller(dev, spi_host); 827 if (ret) 828 goto error; 829 } 830 pci_set_drvdata(pdev, spi_bus); 831 832 return 0; 833 834 error: 835 pci_release_regions(pdev); 836 return ret; 837 } 838 839 static void store_restore_config(struct pci1xxxx_spi *spi_ptr, 840 struct pci1xxxx_spi_internal *spi_sub_ptr, 841 u8 inst, bool store) 842 { 843 u32 regval; 844 845 if (store) { 846 regval = readl(spi_ptr->reg_base + 847 SPI_MST_CTL_REG_OFFSET(spi_sub_ptr->hw_inst)); 848 regval &= SPI_MST_CTL_DEVSEL_MASK; 849 spi_sub_ptr->prev_val.dev_sel = (regval >> 25) & 7; 850 regval = readl(spi_ptr->reg_base + 851 SPI_PCI_CTRL_REG_OFFSET(spi_sub_ptr->hw_inst)); 852 regval &= SPI_MSI_VECTOR_SEL_MASK; 853 spi_sub_ptr->prev_val.msi_vector_sel = (regval >> 4) & 1; 854 } else { 855 regval = readl(spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst)); 856 regval &= ~SPI_MST_CTL_DEVSEL_MASK; 857 regval |= (spi_sub_ptr->prev_val.dev_sel << 25); 858 writel(regval, 859 spi_ptr->reg_base + SPI_MST_CTL_REG_OFFSET(inst)); 860 writel((spi_sub_ptr->prev_val.msi_vector_sel << 4), 861 spi_ptr->reg_base + SPI_PCI_CTRL_REG_OFFSET(inst)); 862 } 863 } 864 865 static int pci1xxxx_spi_resume(struct device *dev) 866 { 867 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev); 868 struct pci1xxxx_spi_internal *spi_sub_ptr; 869 u32 regval = SPI_RESUME_CONFIG; 870 u8 iter; 871 872 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) { 873 spi_sub_ptr = spi_ptr->spi_int[iter]; 874 spi_controller_resume(spi_sub_ptr->spi_host); 875 writel(regval, spi_ptr->reg_base + 876 SPI_MST_EVENT_MASK_REG_OFFSET(iter)); 877 878 /* Restore config at resume */ 879 store_restore_config(spi_ptr, spi_sub_ptr, iter, 0); 880 } 881 882 return 0; 883 } 884 885 static int pci1xxxx_spi_suspend(struct device *dev) 886 { 887 struct pci1xxxx_spi *spi_ptr = dev_get_drvdata(dev); 888 struct pci1xxxx_spi_internal *spi_sub_ptr; 889 u32 reg1 = SPI_SUSPEND_CONFIG; 890 u8 iter; 891 892 for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) { 893 spi_sub_ptr = spi_ptr->spi_int[iter]; 894 895 while (spi_sub_ptr->spi_xfer_in_progress) 896 msleep(20); 897 898 /* Store existing config before suspend */ 899 store_restore_config(spi_ptr, spi_sub_ptr, iter, 1); 900 spi_controller_suspend(spi_sub_ptr->spi_host); 901 writel(reg1, spi_ptr->reg_base + 902 SPI_MST_EVENT_MASK_REG_OFFSET(iter)); 903 } 904 905 return 0; 906 } 907 908 static DEFINE_SIMPLE_DEV_PM_OPS(spi_pm_ops, pci1xxxx_spi_suspend, 909 pci1xxxx_spi_resume); 910 911 static struct pci_driver pci1xxxx_spi_driver = { 912 .name = DRV_NAME, 913 .id_table = pci1xxxx_spi_pci_id_table, 914 .probe = pci1xxxx_spi_probe, 915 .driver = { 916 .pm = pm_sleep_ptr(&spi_pm_ops), 917 }, 918 }; 919 920 module_pci_driver(pci1xxxx_spi_driver); 921 922 MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx SPI bus driver"); 923 MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>"); 924 MODULE_AUTHOR("Kumaravel Thiagarajan<kumaravel.thiagarajan@microchip.com>"); 925 MODULE_LICENSE("GPL v2"); 926