1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011-2015 Xilinx Inc. 4 * Copyright (c) 2015, National Instruments Corp. 5 * 6 * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver 7 * in their vendor tree. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/completion.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/fpga/fpga-mgr.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> 18 #include <linux/module.h> 19 #include <linux/mfd/syscon.h> 20 #include <linux/of_address.h> 21 #include <linux/of_irq.h> 22 #include <linux/pm.h> 23 #include <linux/regmap.h> 24 #include <linux/string.h> 25 #include <linux/scatterlist.h> 26 27 /* Offsets into SLCR regmap */ 28 29 /* FPGA Software Reset Control */ 30 #define SLCR_FPGA_RST_CTRL_OFFSET 0x240 31 /* Level Shifters Enable */ 32 #define SLCR_LVL_SHFTR_EN_OFFSET 0x900 33 34 /* Constant Definitions */ 35 36 /* Control Register */ 37 #define CTRL_OFFSET 0x00 38 /* Lock Register */ 39 #define LOCK_OFFSET 0x04 40 /* Interrupt Status Register */ 41 #define INT_STS_OFFSET 0x0c 42 /* Interrupt Mask Register */ 43 #define INT_MASK_OFFSET 0x10 44 /* Status Register */ 45 #define STATUS_OFFSET 0x14 46 /* DMA Source Address Register */ 47 #define DMA_SRC_ADDR_OFFSET 0x18 48 /* DMA Destination Address Reg */ 49 #define DMA_DST_ADDR_OFFSET 0x1c 50 /* DMA Source Transfer Length */ 51 #define DMA_SRC_LEN_OFFSET 0x20 52 /* DMA Destination Transfer */ 53 #define DMA_DEST_LEN_OFFSET 0x24 54 /* Unlock Register */ 55 #define UNLOCK_OFFSET 0x34 56 /* Misc. Control Register */ 57 #define MCTRL_OFFSET 0x80 58 59 /* Control Register Bit definitions */ 60 61 /* Signal to reset FPGA */ 62 #define CTRL_PCFG_PROG_B_MASK BIT(30) 63 /* Enable PCAP for PR */ 64 #define CTRL_PCAP_PR_MASK BIT(27) 65 /* Enable PCAP */ 66 #define CTRL_PCAP_MODE_MASK BIT(26) 67 /* Lower rate to allow decrypt on the fly */ 68 #define CTRL_PCAP_RATE_EN_MASK BIT(25) 69 /* System booted in secure mode */ 70 #define CTRL_SEC_EN_MASK BIT(7) 71 72 /* Miscellaneous Control Register bit definitions */ 73 /* Internal PCAP loopback */ 74 #define MCTRL_PCAP_LPBK_MASK BIT(4) 75 76 /* Status register bit definitions */ 77 78 /* FPGA init status */ 79 #define STATUS_DMA_Q_F BIT(31) 80 #define STATUS_DMA_Q_E BIT(30) 81 #define STATUS_PCFG_INIT_MASK BIT(4) 82 83 /* Interrupt Status/Mask Register Bit definitions */ 84 /* DMA command done */ 85 #define IXR_DMA_DONE_MASK BIT(13) 86 /* DMA and PCAP cmd done */ 87 #define IXR_D_P_DONE_MASK BIT(12) 88 /* FPGA programmed */ 89 #define IXR_PCFG_DONE_MASK BIT(2) 90 #define IXR_ERROR_FLAGS_MASK 0x00F0C860 91 #define IXR_ALL_MASK 0xF8F7F87F 92 93 /* Miscellaneous constant values */ 94 95 /* Invalid DMA addr */ 96 #define DMA_INVALID_ADDRESS GENMASK(31, 0) 97 /* Used to unlock the dev */ 98 #define UNLOCK_MASK 0x757bdf0d 99 /* Timeout for polling reset bits */ 100 #define INIT_POLL_TIMEOUT 2500000 101 /* Delay for polling reset bits */ 102 #define INIT_POLL_DELAY 20 103 /* Signal this is the last DMA transfer, wait for the AXI and PCAP before 104 * interrupting 105 */ 106 #define DMA_SRC_LAST_TRANSFER 1 107 /* Timeout for DMA completion */ 108 #define DMA_TIMEOUT_MS 5000 109 110 /* Masks for controlling stuff in SLCR */ 111 /* Disable all Level shifters */ 112 #define LVL_SHFTR_DISABLE_ALL_MASK 0x0 113 /* Enable Level shifters from PS to PL */ 114 #define LVL_SHFTR_ENABLE_PS_TO_PL 0xa 115 /* Enable Level shifters from PL to PS */ 116 #define LVL_SHFTR_ENABLE_PL_TO_PS 0xf 117 /* Enable global resets */ 118 #define FPGA_RST_ALL_MASK 0xf 119 /* Disable global resets */ 120 #define FPGA_RST_NONE_MASK 0x0 121 122 struct zynq_fpga_priv { 123 int irq; 124 struct clk *clk; 125 126 void __iomem *io_base; 127 struct regmap *slcr; 128 129 spinlock_t dma_lock; 130 unsigned int dma_elm; 131 unsigned int dma_nelms; 132 struct scatterlist *cur_sg; 133 134 struct completion dma_done; 135 }; 136 137 static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset, 138 u32 val) 139 { 140 writel(val, priv->io_base + offset); 141 } 142 143 static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv, 144 u32 offset) 145 { 146 return readl(priv->io_base + offset); 147 } 148 149 #define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ 150 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \ 151 timeout_us) 152 153 /* Cause the specified irq mask bits to generate IRQs */ 154 static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable) 155 { 156 zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable); 157 } 158 159 /* Must be called with dma_lock held */ 160 static void zynq_step_dma(struct zynq_fpga_priv *priv) 161 { 162 u32 addr; 163 u32 len; 164 bool first; 165 166 first = priv->dma_elm == 0; 167 while (priv->cur_sg) { 168 /* Feed the DMA queue until it is full. */ 169 if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F) 170 break; 171 172 addr = sg_dma_address(priv->cur_sg); 173 len = sg_dma_len(priv->cur_sg); 174 if (priv->dma_elm + 1 == priv->dma_nelms) { 175 /* The last transfer waits for the PCAP to finish too, 176 * notice this also changes the irq_mask to ignore 177 * IXR_DMA_DONE_MASK which ensures we do not trigger 178 * the completion too early. 179 */ 180 addr |= DMA_SRC_LAST_TRANSFER; 181 priv->cur_sg = NULL; 182 } else { 183 priv->cur_sg = sg_next(priv->cur_sg); 184 priv->dma_elm++; 185 } 186 187 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr); 188 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS); 189 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4); 190 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0); 191 } 192 193 /* Once the first transfer is queued we can turn on the ISR, future 194 * calls to zynq_step_dma will happen from the ISR context. The 195 * dma_lock spinlock guarentees this handover is done coherently, the 196 * ISR enable is put at the end to avoid another CPU spinning in the 197 * ISR on this lock. 198 */ 199 if (first && priv->cur_sg) { 200 zynq_fpga_set_irq(priv, 201 IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK); 202 } else if (!priv->cur_sg) { 203 /* The last transfer changes to DMA & PCAP mode since we do 204 * not want to continue until everything has been flushed into 205 * the PCAP. 206 */ 207 zynq_fpga_set_irq(priv, 208 IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK); 209 } 210 } 211 212 static irqreturn_t zynq_fpga_isr(int irq, void *data) 213 { 214 struct zynq_fpga_priv *priv = data; 215 u32 intr_status; 216 217 /* If anything other than DMA completion is reported stop and hand 218 * control back to zynq_fpga_ops_write, something went wrong, 219 * otherwise progress the DMA. 220 */ 221 spin_lock(&priv->dma_lock); 222 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); 223 if (!(intr_status & IXR_ERROR_FLAGS_MASK) && 224 (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) { 225 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK); 226 zynq_step_dma(priv); 227 spin_unlock(&priv->dma_lock); 228 return IRQ_HANDLED; 229 } 230 spin_unlock(&priv->dma_lock); 231 232 zynq_fpga_set_irq(priv, 0); 233 complete(&priv->dma_done); 234 235 return IRQ_HANDLED; 236 } 237 238 /* Sanity check the proposed bitstream. It must start with the sync word in 239 * the correct byte order, and be dword aligned. The input is a Xilinx .bin 240 * file with every 32 bit quantity swapped. 241 */ 242 static bool zynq_fpga_has_sync(const u8 *buf, size_t count) 243 { 244 for (; count >= 4; buf += 4, count -= 4) 245 if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 && 246 buf[3] == 0xaa) 247 return true; 248 return false; 249 } 250 251 static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, 252 struct fpga_image_info *info, 253 const char *buf, size_t count) 254 { 255 struct zynq_fpga_priv *priv; 256 u32 ctrl, status; 257 int err; 258 259 priv = mgr->priv; 260 261 err = clk_enable(priv->clk); 262 if (err) 263 return err; 264 265 /* check if bitstream is encrypted & and system's still secure */ 266 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) { 267 ctrl = zynq_fpga_read(priv, CTRL_OFFSET); 268 if (!(ctrl & CTRL_SEC_EN_MASK)) { 269 dev_err(&mgr->dev, 270 "System not secure, can't use crypted bitstreams\n"); 271 err = -EINVAL; 272 goto out_err; 273 } 274 } 275 276 /* don't globally reset PL if we're doing partial reconfig */ 277 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { 278 if (!zynq_fpga_has_sync(buf, count)) { 279 dev_err(&mgr->dev, 280 "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n"); 281 err = -EINVAL; 282 goto out_err; 283 } 284 285 /* assert AXI interface resets */ 286 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, 287 FPGA_RST_ALL_MASK); 288 289 /* disable all level shifters */ 290 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, 291 LVL_SHFTR_DISABLE_ALL_MASK); 292 /* enable level shifters from PS to PL */ 293 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, 294 LVL_SHFTR_ENABLE_PS_TO_PL); 295 296 /* create a rising edge on PCFG_INIT. PCFG_INIT follows 297 * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B 298 * to make sure the rising edge actually happens. 299 * Note: PCFG_PROG_B is low active, sequence as described in 300 * UG585 v1.10 page 211 301 */ 302 ctrl = zynq_fpga_read(priv, CTRL_OFFSET); 303 ctrl |= CTRL_PCFG_PROG_B_MASK; 304 305 zynq_fpga_write(priv, CTRL_OFFSET, ctrl); 306 307 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status, 308 status & STATUS_PCFG_INIT_MASK, 309 INIT_POLL_DELAY, 310 INIT_POLL_TIMEOUT); 311 if (err) { 312 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); 313 goto out_err; 314 } 315 316 ctrl = zynq_fpga_read(priv, CTRL_OFFSET); 317 ctrl &= ~CTRL_PCFG_PROG_B_MASK; 318 319 zynq_fpga_write(priv, CTRL_OFFSET, ctrl); 320 321 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status, 322 !(status & STATUS_PCFG_INIT_MASK), 323 INIT_POLL_DELAY, 324 INIT_POLL_TIMEOUT); 325 if (err) { 326 dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n"); 327 goto out_err; 328 } 329 330 ctrl = zynq_fpga_read(priv, CTRL_OFFSET); 331 ctrl |= CTRL_PCFG_PROG_B_MASK; 332 333 zynq_fpga_write(priv, CTRL_OFFSET, ctrl); 334 335 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status, 336 status & STATUS_PCFG_INIT_MASK, 337 INIT_POLL_DELAY, 338 INIT_POLL_TIMEOUT); 339 if (err) { 340 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); 341 goto out_err; 342 } 343 } 344 345 /* set configuration register with following options: 346 * - enable PCAP interface 347 * - set throughput for maximum speed (if bistream not crypted) 348 * - set CPU in user mode 349 */ 350 ctrl = zynq_fpga_read(priv, CTRL_OFFSET); 351 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) 352 zynq_fpga_write(priv, CTRL_OFFSET, 353 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK 354 | CTRL_PCAP_RATE_EN_MASK | ctrl)); 355 else 356 zynq_fpga_write(priv, CTRL_OFFSET, 357 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK 358 | ctrl)); 359 360 361 /* We expect that the command queue is empty right now. */ 362 status = zynq_fpga_read(priv, STATUS_OFFSET); 363 if ((status & STATUS_DMA_Q_F) || 364 (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) { 365 dev_err(&mgr->dev, "DMA command queue not right\n"); 366 err = -EBUSY; 367 goto out_err; 368 } 369 370 /* ensure internal PCAP loopback is disabled */ 371 ctrl = zynq_fpga_read(priv, MCTRL_OFFSET); 372 zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl)); 373 374 clk_disable(priv->clk); 375 376 return 0; 377 378 out_err: 379 clk_disable(priv->clk); 380 381 return err; 382 } 383 384 static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt) 385 { 386 struct zynq_fpga_priv *priv; 387 const char *why; 388 int err; 389 u32 intr_status; 390 unsigned long timeout; 391 unsigned long flags; 392 struct scatterlist *sg; 393 int i; 394 395 priv = mgr->priv; 396 397 /* The hardware can only DMA multiples of 4 bytes, and it requires the 398 * starting addresses to be aligned to 64 bits (UG585 pg 212). 399 */ 400 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 401 if ((sg->offset % 8) || (sg->length % 4)) { 402 dev_err(&mgr->dev, 403 "Invalid bitstream, chunks must be aligned\n"); 404 return -EINVAL; 405 } 406 } 407 408 priv->dma_nelms = 409 dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); 410 if (priv->dma_nelms == 0) { 411 dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n"); 412 return -ENOMEM; 413 } 414 415 /* enable clock */ 416 err = clk_enable(priv->clk); 417 if (err) 418 goto out_free; 419 420 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 421 reinit_completion(&priv->dma_done); 422 423 /* zynq_step_dma will turn on interrupts */ 424 spin_lock_irqsave(&priv->dma_lock, flags); 425 priv->dma_elm = 0; 426 priv->cur_sg = sgt->sgl; 427 zynq_step_dma(priv); 428 spin_unlock_irqrestore(&priv->dma_lock, flags); 429 430 timeout = wait_for_completion_timeout(&priv->dma_done, 431 msecs_to_jiffies(DMA_TIMEOUT_MS)); 432 433 spin_lock_irqsave(&priv->dma_lock, flags); 434 zynq_fpga_set_irq(priv, 0); 435 priv->cur_sg = NULL; 436 spin_unlock_irqrestore(&priv->dma_lock, flags); 437 438 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); 439 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 440 441 /* There doesn't seem to be a way to force cancel any DMA, so if 442 * something went wrong we are relying on the hardware to have halted 443 * the DMA before we get here, if there was we could use 444 * wait_for_completion_interruptible too. 445 */ 446 447 if (intr_status & IXR_ERROR_FLAGS_MASK) { 448 why = "DMA reported error"; 449 err = -EIO; 450 goto out_report; 451 } 452 453 if (priv->cur_sg || 454 !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { 455 if (timeout == 0) 456 why = "DMA timed out"; 457 else 458 why = "DMA did not complete"; 459 err = -EIO; 460 goto out_report; 461 } 462 463 err = 0; 464 goto out_clk; 465 466 out_report: 467 dev_err(&mgr->dev, 468 "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n", 469 why, 470 intr_status, 471 zynq_fpga_read(priv, CTRL_OFFSET), 472 zynq_fpga_read(priv, LOCK_OFFSET), 473 zynq_fpga_read(priv, INT_MASK_OFFSET), 474 zynq_fpga_read(priv, STATUS_OFFSET), 475 zynq_fpga_read(priv, MCTRL_OFFSET)); 476 477 out_clk: 478 clk_disable(priv->clk); 479 480 out_free: 481 dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE); 482 return err; 483 } 484 485 static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, 486 struct fpga_image_info *info) 487 { 488 struct zynq_fpga_priv *priv = mgr->priv; 489 int err; 490 u32 intr_status; 491 492 err = clk_enable(priv->clk); 493 if (err) 494 return err; 495 496 /* Release 'PR' control back to the ICAP */ 497 zynq_fpga_write(priv, CTRL_OFFSET, 498 zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK); 499 500 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status, 501 intr_status & IXR_PCFG_DONE_MASK, 502 INIT_POLL_DELAY, 503 INIT_POLL_TIMEOUT); 504 505 clk_disable(priv->clk); 506 507 if (err) 508 return err; 509 510 /* for the partial reconfig case we didn't touch the level shifters */ 511 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { 512 /* enable level shifters from PL to PS */ 513 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, 514 LVL_SHFTR_ENABLE_PL_TO_PS); 515 516 /* deassert AXI interface resets */ 517 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, 518 FPGA_RST_NONE_MASK); 519 } 520 521 return 0; 522 } 523 524 static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr) 525 { 526 int err; 527 u32 intr_status; 528 struct zynq_fpga_priv *priv; 529 530 priv = mgr->priv; 531 532 err = clk_enable(priv->clk); 533 if (err) 534 return FPGA_MGR_STATE_UNKNOWN; 535 536 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); 537 clk_disable(priv->clk); 538 539 if (intr_status & IXR_PCFG_DONE_MASK) 540 return FPGA_MGR_STATE_OPERATING; 541 542 return FPGA_MGR_STATE_UNKNOWN; 543 } 544 545 static const struct fpga_manager_ops zynq_fpga_ops = { 546 .initial_header_size = 128, 547 .state = zynq_fpga_ops_state, 548 .write_init = zynq_fpga_ops_write_init, 549 .write_sg = zynq_fpga_ops_write, 550 .write_complete = zynq_fpga_ops_write_complete, 551 }; 552 553 static int zynq_fpga_probe(struct platform_device *pdev) 554 { 555 struct device *dev = &pdev->dev; 556 struct zynq_fpga_priv *priv; 557 struct fpga_manager *mgr; 558 struct resource *res; 559 int err; 560 561 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 562 if (!priv) 563 return -ENOMEM; 564 spin_lock_init(&priv->dma_lock); 565 566 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 567 priv->io_base = devm_ioremap_resource(dev, res); 568 if (IS_ERR(priv->io_base)) 569 return PTR_ERR(priv->io_base); 570 571 priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node, 572 "syscon"); 573 if (IS_ERR(priv->slcr)) { 574 dev_err(dev, "unable to get zynq-slcr regmap\n"); 575 return PTR_ERR(priv->slcr); 576 } 577 578 init_completion(&priv->dma_done); 579 580 priv->irq = platform_get_irq(pdev, 0); 581 if (priv->irq < 0) 582 return priv->irq; 583 584 priv->clk = devm_clk_get(dev, "ref_clk"); 585 if (IS_ERR(priv->clk)) { 586 if (PTR_ERR(priv->clk) != -EPROBE_DEFER) 587 dev_err(dev, "input clock not found\n"); 588 return PTR_ERR(priv->clk); 589 } 590 591 err = clk_prepare_enable(priv->clk); 592 if (err) { 593 dev_err(dev, "unable to enable clock\n"); 594 return err; 595 } 596 597 /* unlock the device */ 598 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK); 599 600 zynq_fpga_set_irq(priv, 0); 601 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 602 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev), 603 priv); 604 if (err) { 605 dev_err(dev, "unable to request IRQ\n"); 606 clk_disable_unprepare(priv->clk); 607 return err; 608 } 609 610 clk_disable(priv->clk); 611 612 mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager", 613 &zynq_fpga_ops, priv); 614 if (!mgr) 615 return -ENOMEM; 616 617 platform_set_drvdata(pdev, mgr); 618 619 err = fpga_mgr_register(mgr); 620 if (err) { 621 dev_err(dev, "unable to register FPGA manager\n"); 622 clk_unprepare(priv->clk); 623 return err; 624 } 625 626 return 0; 627 } 628 629 static int zynq_fpga_remove(struct platform_device *pdev) 630 { 631 struct zynq_fpga_priv *priv; 632 struct fpga_manager *mgr; 633 634 mgr = platform_get_drvdata(pdev); 635 priv = mgr->priv; 636 637 fpga_mgr_unregister(mgr); 638 639 clk_unprepare(priv->clk); 640 641 return 0; 642 } 643 644 #ifdef CONFIG_OF 645 static const struct of_device_id zynq_fpga_of_match[] = { 646 { .compatible = "xlnx,zynq-devcfg-1.0", }, 647 {}, 648 }; 649 650 MODULE_DEVICE_TABLE(of, zynq_fpga_of_match); 651 #endif 652 653 static struct platform_driver zynq_fpga_driver = { 654 .probe = zynq_fpga_probe, 655 .remove = zynq_fpga_remove, 656 .driver = { 657 .name = "zynq_fpga_manager", 658 .of_match_table = of_match_ptr(zynq_fpga_of_match), 659 }, 660 }; 661 662 module_platform_driver(zynq_fpga_driver); 663 664 MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>"); 665 MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>"); 666 MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager"); 667 MODULE_LICENSE("GPL v2"); 668