1 /* 2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge 3 * 4 * Copyright 2011 Integrated Device Technology, Inc. 5 * Alexandre Bounine <alexandre.bounine@idt.com> 6 * Chul Kim <chul.kim@idt.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/io.h> 24 #include <linux/errno.h> 25 #include <linux/init.h> 26 #include <linux/ioport.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/rio.h> 31 #include <linux/rio_drv.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/interrupt.h> 34 #include <linux/kfifo.h> 35 #include <linux/delay.h> 36 37 #include "tsi721.h" 38 39 #define DEBUG_PW /* Inbound Port-Write debugging */ 40 41 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); 42 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); 43 44 /** 45 * tsi721_lcread - read from local SREP config space 46 * @mport: RapidIO master port info 47 * @index: ID of RapdiIO interface 48 * @offset: Offset into configuration space 49 * @len: Length (in bytes) of the maintenance transaction 50 * @data: Value to be read into 51 * 52 * Generates a local SREP space read. Returns %0 on 53 * success or %-EINVAL on failure. 54 */ 55 static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, 56 int len, u32 *data) 57 { 58 struct tsi721_device *priv = mport->priv; 59 60 if (len != sizeof(u32)) 61 return -EINVAL; /* only 32-bit access is supported */ 62 63 *data = ioread32(priv->regs + offset); 64 65 return 0; 66 } 67 68 /** 69 * tsi721_lcwrite - write into local SREP config space 70 * @mport: RapidIO master port info 71 * @index: ID of RapdiIO interface 72 * @offset: Offset into configuration space 73 * @len: Length (in bytes) of the maintenance transaction 74 * @data: Value to be written 75 * 76 * Generates a local write into SREP configuration space. Returns %0 on 77 * success or %-EINVAL on failure. 78 */ 79 static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, 80 int len, u32 data) 81 { 82 struct tsi721_device *priv = mport->priv; 83 84 if (len != sizeof(u32)) 85 return -EINVAL; /* only 32-bit access is supported */ 86 87 iowrite32(data, priv->regs + offset); 88 89 return 0; 90 } 91 92 /** 93 * tsi721_maint_dma - Helper function to generate RapidIO maintenance 94 * transactions using designated Tsi721 DMA channel. 95 * @priv: pointer to tsi721 private data 96 * @sys_size: RapdiIO transport system size 97 * @destid: Destination ID of transaction 98 * @hopcount: Number of hops to target device 99 * @offset: Offset into configuration space 100 * @len: Length (in bytes) of the maintenance transaction 101 * @data: Location to be read from or write into 102 * @do_wr: Operation flag (1 == MAINT_WR) 103 * 104 * Generates a RapidIO maintenance transaction (Read or Write). 105 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 106 */ 107 static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, 108 u16 destid, u8 hopcount, u32 offset, int len, 109 u32 *data, int do_wr) 110 { 111 struct tsi721_dma_desc *bd_ptr; 112 u32 rd_count, swr_ptr, ch_stat; 113 int i, err = 0; 114 u32 op = do_wr ? MAINT_WR : MAINT_RD; 115 116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 117 return -EINVAL; 118 119 bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; 120 121 rd_count = ioread32( 122 priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT)); 123 124 /* Initialize DMA descriptor */ 125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); 126 bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); 127 bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); 128 bd_ptr[0].raddr_hi = 0; 129 if (do_wr) 130 bd_ptr[0].data[0] = cpu_to_be32p(data); 131 else 132 bd_ptr[0].data[0] = 0xffffffff; 133 134 mb(); 135 136 /* Start DMA operation */ 137 iowrite32(rd_count + 2, 138 priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 139 ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 140 i = 0; 141 142 /* Wait until DMA transfer is finished */ 143 while ((ch_stat = ioread32(priv->regs + 144 TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { 145 udelay(1); 146 if (++i >= 5000000) { 147 dev_dbg(&priv->pdev->dev, 148 "%s : DMA[%d] read timeout ch_status=%x\n", 149 __func__, TSI721_DMACH_MAINT, ch_stat); 150 if (!do_wr) 151 *data = 0xffffffff; 152 err = -EIO; 153 goto err_out; 154 } 155 } 156 157 if (ch_stat & TSI721_DMAC_STS_ABORT) { 158 /* If DMA operation aborted due to error, 159 * reinitialize DMA channel 160 */ 161 dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", 162 __func__, ch_stat); 163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 165 iowrite32(TSI721_DMAC_INT_ALL, 166 priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); 167 iowrite32(TSI721_DMAC_CTL_INIT, 168 priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT)); 169 udelay(10); 170 iowrite32(0, priv->regs + 171 TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 172 udelay(1); 173 if (!do_wr) 174 *data = 0xffffffff; 175 err = -EIO; 176 goto err_out; 177 } 178 179 if (!do_wr) 180 *data = be32_to_cpu(bd_ptr[0].data[0]); 181 182 /* 183 * Update descriptor status FIFO RD pointer. 184 * NOTE: Skipping check and clear FIFO entries because we are waiting 185 * for transfer to be completed. 186 */ 187 swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); 188 iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); 189 err_out: 190 191 return err; 192 } 193 194 /** 195 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction 196 * using Tsi721 BDMA engine. 197 * @mport: RapidIO master port control structure 198 * @index: ID of RapdiIO interface 199 * @destid: Destination ID of transaction 200 * @hopcount: Number of hops to target device 201 * @offset: Offset into configuration space 202 * @len: Length (in bytes) of the maintenance transaction 203 * @val: Location to be read into 204 * 205 * Generates a RapidIO maintenance read transaction. 206 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 207 */ 208 static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, 209 u8 hopcount, u32 offset, int len, u32 *data) 210 { 211 struct tsi721_device *priv = mport->priv; 212 213 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 214 offset, len, data, 0); 215 } 216 217 /** 218 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction 219 * using Tsi721 BDMA engine 220 * @mport: RapidIO master port control structure 221 * @index: ID of RapdiIO interface 222 * @destid: Destination ID of transaction 223 * @hopcount: Number of hops to target device 224 * @offset: Offset into configuration space 225 * @len: Length (in bytes) of the maintenance transaction 226 * @val: Value to be written 227 * 228 * Generates a RapidIO maintenance write transaction. 229 * Returns %0 on success and %-EINVAL or %-EFAULT on failure. 230 */ 231 static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, 232 u8 hopcount, u32 offset, int len, u32 data) 233 { 234 struct tsi721_device *priv = mport->priv; 235 u32 temp = data; 236 237 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, 238 offset, len, &temp, 1); 239 } 240 241 /** 242 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler 243 * @mport: RapidIO master port structure 244 * 245 * Handles inbound port-write interrupts. Copies PW message from an internal 246 * buffer into PW message FIFO and schedules deferred routine to process 247 * queued messages. 248 */ 249 static int 250 tsi721_pw_handler(struct rio_mport *mport) 251 { 252 struct tsi721_device *priv = mport->priv; 253 u32 pw_stat; 254 u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; 255 256 257 pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); 258 259 if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { 260 pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); 261 pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); 262 pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); 263 pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); 264 265 /* Queue PW message (if there is room in FIFO), 266 * otherwise discard it. 267 */ 268 spin_lock(&priv->pw_fifo_lock); 269 if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) 270 kfifo_in(&priv->pw_fifo, pw_buf, 271 TSI721_RIO_PW_MSG_SIZE); 272 else 273 priv->pw_discard_count++; 274 spin_unlock(&priv->pw_fifo_lock); 275 } 276 277 /* Clear pending PW interrupts */ 278 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 279 priv->regs + TSI721_RIO_PW_RX_STAT); 280 281 schedule_work(&priv->pw_work); 282 283 return 0; 284 } 285 286 static void tsi721_pw_dpc(struct work_struct *work) 287 { 288 struct tsi721_device *priv = container_of(work, struct tsi721_device, 289 pw_work); 290 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message 291 buffer for RIO layer */ 292 293 /* 294 * Process port-write messages 295 */ 296 while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, 297 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { 298 /* Process one message */ 299 #ifdef DEBUG_PW 300 { 301 u32 i; 302 pr_debug("%s : Port-Write Message:", __func__); 303 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { 304 pr_debug("0x%02x: %08x %08x %08x %08x", i*4, 305 msg_buffer[i], msg_buffer[i + 1], 306 msg_buffer[i + 2], msg_buffer[i + 3]); 307 i += 4; 308 } 309 pr_debug("\n"); 310 } 311 #endif 312 /* Pass the port-write message to RIO core for processing */ 313 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 314 } 315 } 316 317 /** 318 * tsi721_pw_enable - enable/disable port-write interface init 319 * @mport: Master port implementing the port write unit 320 * @enable: 1=enable; 0=disable port-write message handling 321 */ 322 static int tsi721_pw_enable(struct rio_mport *mport, int enable) 323 { 324 struct tsi721_device *priv = mport->priv; 325 u32 rval; 326 327 rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); 328 329 if (enable) 330 rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; 331 else 332 rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; 333 334 /* Clear pending PW interrupts */ 335 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, 336 priv->regs + TSI721_RIO_PW_RX_STAT); 337 /* Update enable bits */ 338 iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); 339 340 return 0; 341 } 342 343 /** 344 * tsi721_dsend - Send a RapidIO doorbell 345 * @mport: RapidIO master port info 346 * @index: ID of RapidIO interface 347 * @destid: Destination ID of target device 348 * @data: 16-bit info field of RapidIO doorbell 349 * 350 * Sends a RapidIO doorbell message. Always returns %0. 351 */ 352 static int tsi721_dsend(struct rio_mport *mport, int index, 353 u16 destid, u16 data) 354 { 355 struct tsi721_device *priv = mport->priv; 356 u32 offset; 357 358 offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | 359 (destid << 2); 360 361 dev_dbg(&priv->pdev->dev, 362 "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); 363 iowrite16be(data, priv->odb_base + offset); 364 365 return 0; 366 } 367 368 /** 369 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler 370 * @mport: RapidIO master port structure 371 * 372 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal 373 * buffer into DB message FIFO and schedules deferred routine to process 374 * queued DBs. 375 */ 376 static int 377 tsi721_dbell_handler(struct rio_mport *mport) 378 { 379 struct tsi721_device *priv = mport->priv; 380 u32 regval; 381 382 /* Disable IDB interrupts */ 383 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 384 regval &= ~TSI721_SR_CHINT_IDBQRCV; 385 iowrite32(regval, 386 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 387 388 schedule_work(&priv->idb_work); 389 390 return 0; 391 } 392 393 static void tsi721_db_dpc(struct work_struct *work) 394 { 395 struct tsi721_device *priv = container_of(work, struct tsi721_device, 396 idb_work); 397 struct rio_mport *mport; 398 struct rio_dbell *dbell; 399 int found = 0; 400 u32 wr_ptr, rd_ptr; 401 u64 *idb_entry; 402 u32 regval; 403 union { 404 u64 msg; 405 u8 bytes[8]; 406 } idb; 407 408 /* 409 * Process queued inbound doorbells 410 */ 411 mport = priv->mport; 412 413 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)); 414 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 415 416 while (wr_ptr != rd_ptr) { 417 idb_entry = (u64 *)(priv->idb_base + 418 (TSI721_IDB_ENTRY_SIZE * rd_ptr)); 419 rd_ptr++; 420 idb.msg = *idb_entry; 421 *idb_entry = 0; 422 423 /* Process one doorbell */ 424 list_for_each_entry(dbell, &mport->dbells, node) { 425 if ((dbell->res->start <= DBELL_INF(idb.bytes)) && 426 (dbell->res->end >= DBELL_INF(idb.bytes))) { 427 found = 1; 428 break; 429 } 430 } 431 432 if (found) { 433 dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), 434 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 435 } else { 436 dev_dbg(&priv->pdev->dev, 437 "spurious inb doorbell, sid %2.2x tid %2.2x" 438 " info %4.4x\n", DBELL_SID(idb.bytes), 439 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); 440 } 441 } 442 443 iowrite32(rd_ptr & (IDB_QSIZE - 1), 444 priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 445 446 /* Re-enable IDB interrupts */ 447 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 448 regval |= TSI721_SR_CHINT_IDBQRCV; 449 iowrite32(regval, 450 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 451 } 452 453 /** 454 * tsi721_irqhandler - Tsi721 interrupt handler 455 * @irq: Linux interrupt number 456 * @ptr: Pointer to interrupt-specific data (mport structure) 457 * 458 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported 459 * interrupt events and calls an event-specific handler(s). 460 */ 461 static irqreturn_t tsi721_irqhandler(int irq, void *ptr) 462 { 463 struct rio_mport *mport = (struct rio_mport *)ptr; 464 struct tsi721_device *priv = mport->priv; 465 u32 dev_int; 466 u32 dev_ch_int; 467 u32 intval; 468 u32 ch_inte; 469 470 dev_int = ioread32(priv->regs + TSI721_DEV_INT); 471 if (!dev_int) 472 return IRQ_NONE; 473 474 dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); 475 476 if (dev_int & TSI721_DEV_INT_SR2PC_CH) { 477 /* Service SR2PC Channel interrupts */ 478 if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { 479 /* Service Inbound Doorbell interrupt */ 480 intval = ioread32(priv->regs + 481 TSI721_SR_CHINT(IDB_QUEUE)); 482 if (intval & TSI721_SR_CHINT_IDBQRCV) 483 tsi721_dbell_handler(mport); 484 else 485 dev_info(&priv->pdev->dev, 486 "Unsupported SR_CH_INT %x\n", intval); 487 488 /* Clear interrupts */ 489 iowrite32(intval, 490 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 491 ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 492 } 493 } 494 495 if (dev_int & TSI721_DEV_INT_SMSG_CH) { 496 int ch; 497 498 /* 499 * Service channel interrupts from Messaging Engine 500 */ 501 502 if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ 503 /* Disable signaled OB MSG Channel interrupts */ 504 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 505 ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); 506 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 507 508 /* 509 * Process Inbound Message interrupt for each MBOX 510 */ 511 for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { 512 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) 513 continue; 514 tsi721_imsg_handler(priv, ch); 515 } 516 } 517 518 if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ 519 /* Disable signaled OB MSG Channel interrupts */ 520 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 521 ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); 522 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 523 524 /* 525 * Process Outbound Message interrupts for each MBOX 526 */ 527 528 for (ch = 0; ch < RIO_MAX_MBOX; ch++) { 529 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) 530 continue; 531 tsi721_omsg_handler(priv, ch); 532 } 533 } 534 } 535 536 if (dev_int & TSI721_DEV_INT_SRIO) { 537 /* Service SRIO MAC interrupts */ 538 intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 539 if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) 540 tsi721_pw_handler(mport); 541 } 542 543 return IRQ_HANDLED; 544 } 545 546 static void tsi721_interrupts_init(struct tsi721_device *priv) 547 { 548 u32 intr; 549 550 /* Enable IDB interrupts */ 551 iowrite32(TSI721_SR_CHINT_ALL, 552 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 553 iowrite32(TSI721_SR_CHINT_IDBQRCV, 554 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 555 iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE), 556 priv->regs + TSI721_DEV_CHAN_INTE); 557 558 /* Enable SRIO MAC interrupts */ 559 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 560 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 561 562 if (priv->flags & TSI721_USING_MSIX) 563 intr = TSI721_DEV_INT_SRIO; 564 else 565 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 566 TSI721_DEV_INT_SMSG_CH; 567 568 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 569 ioread32(priv->regs + TSI721_DEV_INTE); 570 } 571 572 #ifdef CONFIG_PCI_MSI 573 /** 574 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging 575 * @irq: Linux interrupt number 576 * @ptr: Pointer to interrupt-specific data (mport structure) 577 * 578 * Handles outbound messaging interrupts signaled using MSI-X. 579 */ 580 static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) 581 { 582 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 583 int mbox; 584 585 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; 586 tsi721_omsg_handler(priv, mbox); 587 return IRQ_HANDLED; 588 } 589 590 /** 591 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging 592 * @irq: Linux interrupt number 593 * @ptr: Pointer to interrupt-specific data (mport structure) 594 * 595 * Handles inbound messaging interrupts signaled using MSI-X. 596 */ 597 static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) 598 { 599 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 600 int mbox; 601 602 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; 603 tsi721_imsg_handler(priv, mbox + 4); 604 return IRQ_HANDLED; 605 } 606 607 /** 608 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler 609 * @irq: Linux interrupt number 610 * @ptr: Pointer to interrupt-specific data (mport structure) 611 * 612 * Handles Tsi721 interrupts from SRIO MAC. 613 */ 614 static irqreturn_t tsi721_srio_msix(int irq, void *ptr) 615 { 616 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 617 u32 srio_int; 618 619 /* Service SRIO MAC interrupts */ 620 srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); 621 if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) 622 tsi721_pw_handler((struct rio_mport *)ptr); 623 624 return IRQ_HANDLED; 625 } 626 627 /** 628 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler 629 * @irq: Linux interrupt number 630 * @ptr: Pointer to interrupt-specific data (mport structure) 631 * 632 * Handles Tsi721 interrupts from SR2PC Channel. 633 * NOTE: At this moment services only one SR2PC channel associated with inbound 634 * doorbells. 635 */ 636 static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) 637 { 638 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; 639 u32 sr_ch_int; 640 641 /* Service Inbound DB interrupt from SR2PC channel */ 642 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 643 if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) 644 tsi721_dbell_handler((struct rio_mport *)ptr); 645 646 /* Clear interrupts */ 647 iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 648 /* Read back to ensure that interrupt was cleared */ 649 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 650 651 return IRQ_HANDLED; 652 } 653 654 /** 655 * tsi721_request_msix - register interrupt service for MSI-X mode. 656 * @mport: RapidIO master port structure 657 * 658 * Registers MSI-X interrupt service routines for interrupts that are active 659 * immediately after mport initialization. Messaging interrupt service routines 660 * should be registered during corresponding open requests. 661 */ 662 static int tsi721_request_msix(struct rio_mport *mport) 663 { 664 struct tsi721_device *priv = mport->priv; 665 int err = 0; 666 667 err = request_irq(priv->msix[TSI721_VECT_IDB].vector, 668 tsi721_sr2pc_ch_msix, 0, 669 priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); 670 if (err) 671 goto out; 672 673 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, 674 tsi721_srio_msix, 0, 675 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); 676 if (err) 677 free_irq( 678 priv->msix[TSI721_VECT_IDB].vector, 679 (void *)mport); 680 out: 681 return err; 682 } 683 684 /** 685 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. 686 * @priv: pointer to tsi721 private data 687 * 688 * Configures MSI-X support for Tsi721. Supports only an exact number 689 * of requested vectors. 690 */ 691 static int tsi721_enable_msix(struct tsi721_device *priv) 692 { 693 struct msix_entry entries[TSI721_VECT_MAX]; 694 int err; 695 int i; 696 697 entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); 698 entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; 699 700 /* 701 * Initialize MSI-X entries for Messaging Engine: 702 * this driver supports four RIO mailboxes (inbound and outbound) 703 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore 704 * offset +4 is added to IB MBOX number. 705 */ 706 for (i = 0; i < RIO_MAX_MBOX; i++) { 707 entries[TSI721_VECT_IMB0_RCV + i].entry = 708 TSI721_MSIX_IMSG_DQ_RCV(i + 4); 709 entries[TSI721_VECT_IMB0_INT + i].entry = 710 TSI721_MSIX_IMSG_INT(i + 4); 711 entries[TSI721_VECT_OMB0_DONE + i].entry = 712 TSI721_MSIX_OMSG_DONE(i); 713 entries[TSI721_VECT_OMB0_INT + i].entry = 714 TSI721_MSIX_OMSG_INT(i); 715 } 716 717 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 718 if (err) { 719 if (err > 0) 720 dev_info(&priv->pdev->dev, 721 "Only %d MSI-X vectors available, " 722 "not using MSI-X\n", err); 723 return err; 724 } 725 726 /* 727 * Copy MSI-X vector information into tsi721 private structure 728 */ 729 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; 730 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, 731 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); 732 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; 733 snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, 734 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); 735 736 for (i = 0; i < RIO_MAX_MBOX; i++) { 737 priv->msix[TSI721_VECT_IMB0_RCV + i].vector = 738 entries[TSI721_VECT_IMB0_RCV + i].vector; 739 snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, 740 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", 741 i, pci_name(priv->pdev)); 742 743 priv->msix[TSI721_VECT_IMB0_INT + i].vector = 744 entries[TSI721_VECT_IMB0_INT + i].vector; 745 snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, 746 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", 747 i, pci_name(priv->pdev)); 748 749 priv->msix[TSI721_VECT_OMB0_DONE + i].vector = 750 entries[TSI721_VECT_OMB0_DONE + i].vector; 751 snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, 752 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", 753 i, pci_name(priv->pdev)); 754 755 priv->msix[TSI721_VECT_OMB0_INT + i].vector = 756 entries[TSI721_VECT_OMB0_INT + i].vector; 757 snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, 758 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", 759 i, pci_name(priv->pdev)); 760 } 761 762 return 0; 763 } 764 #endif /* CONFIG_PCI_MSI */ 765 766 static int tsi721_request_irq(struct rio_mport *mport) 767 { 768 struct tsi721_device *priv = mport->priv; 769 int err; 770 771 #ifdef CONFIG_PCI_MSI 772 if (priv->flags & TSI721_USING_MSIX) 773 err = tsi721_request_msix(mport); 774 else 775 #endif 776 err = request_irq(priv->pdev->irq, tsi721_irqhandler, 777 (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, 778 DRV_NAME, (void *)mport); 779 780 if (err) 781 dev_err(&priv->pdev->dev, 782 "Unable to allocate interrupt, Error: %d\n", err); 783 784 return err; 785 } 786 787 /** 788 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) 789 * translation regions. 790 * @priv: pointer to tsi721 private data 791 * 792 * Disables SREP translation regions. 793 */ 794 static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) 795 { 796 int i; 797 798 /* Disable all PC2SR translation windows */ 799 for (i = 0; i < TSI721_OBWIN_NUM; i++) 800 iowrite32(0, priv->regs + TSI721_OBWINLB(i)); 801 } 802 803 /** 804 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) 805 * translation regions. 806 * @priv: pointer to tsi721 private data 807 * 808 * Disables inbound windows. 809 */ 810 static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) 811 { 812 int i; 813 814 /* Disable all SR2PC inbound windows */ 815 for (i = 0; i < TSI721_IBWIN_NUM; i++) 816 iowrite32(0, priv->regs + TSI721_IBWINLB(i)); 817 } 818 819 /** 820 * tsi721_port_write_init - Inbound port write interface init 821 * @priv: pointer to tsi721 private data 822 * 823 * Initializes inbound port write handler. 824 * Returns %0 on success or %-ENOMEM on failure. 825 */ 826 static int tsi721_port_write_init(struct tsi721_device *priv) 827 { 828 priv->pw_discard_count = 0; 829 INIT_WORK(&priv->pw_work, tsi721_pw_dpc); 830 spin_lock_init(&priv->pw_fifo_lock); 831 if (kfifo_alloc(&priv->pw_fifo, 832 TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 833 dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); 834 return -ENOMEM; 835 } 836 837 /* Use reliable port-write capture mode */ 838 iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); 839 return 0; 840 } 841 842 static int tsi721_doorbell_init(struct tsi721_device *priv) 843 { 844 /* Outbound Doorbells do not require any setup. 845 * Tsi721 uses dedicated PCI BAR1 to generate doorbells. 846 * That BAR1 was mapped during the probe routine. 847 */ 848 849 /* Initialize Inbound Doorbell processing DPC and queue */ 850 priv->db_discard_count = 0; 851 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 852 853 /* Allocate buffer for inbound doorbells queue */ 854 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 856 &priv->idb_dma, GFP_KERNEL); 857 if (!priv->idb_base) 858 return -ENOMEM; 859 860 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", 861 priv->idb_base, (unsigned long long)priv->idb_dma); 862 863 iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), 864 priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); 865 iowrite32(((u64)priv->idb_dma >> 32), 866 priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); 867 iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), 868 priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); 869 /* Enable accepting all inbound doorbells */ 870 iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); 871 872 iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); 873 874 iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); 875 876 return 0; 877 } 878 879 static void tsi721_doorbell_free(struct tsi721_device *priv) 880 { 881 if (priv->idb_base == NULL) 882 return; 883 884 /* Free buffer allocated for inbound doorbell queue */ 885 dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 886 priv->idb_base, priv->idb_dma); 887 priv->idb_base = NULL; 888 } 889 890 static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) 891 { 892 struct tsi721_dma_desc *bd_ptr; 893 u64 *sts_ptr; 894 dma_addr_t bd_phys, sts_phys; 895 int sts_size; 896 int bd_num = priv->bdma[chnum].bd_num; 897 898 dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); 899 900 /* 901 * Initialize DMA channel for maintenance requests 902 */ 903 904 /* Allocate space for DMA descriptors */ 905 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 906 bd_num * sizeof(struct tsi721_dma_desc), 907 &bd_phys, GFP_KERNEL); 908 if (!bd_ptr) 909 return -ENOMEM; 910 911 priv->bdma[chnum].bd_phys = bd_phys; 912 priv->bdma[chnum].bd_base = bd_ptr; 913 914 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 915 bd_ptr, (unsigned long long)bd_phys); 916 917 /* Allocate space for descriptor status FIFO */ 918 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 919 bd_num : TSI721_DMA_MINSTSSZ; 920 sts_size = roundup_pow_of_two(sts_size); 921 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 922 sts_size * sizeof(struct tsi721_dma_sts), 923 &sts_phys, GFP_KERNEL); 924 if (!sts_ptr) { 925 /* Free space allocated for DMA descriptors */ 926 dma_free_coherent(&priv->pdev->dev, 927 bd_num * sizeof(struct tsi721_dma_desc), 928 bd_ptr, bd_phys); 929 priv->bdma[chnum].bd_base = NULL; 930 return -ENOMEM; 931 } 932 933 priv->bdma[chnum].sts_phys = sts_phys; 934 priv->bdma[chnum].sts_base = sts_ptr; 935 priv->bdma[chnum].sts_size = sts_size; 936 937 dev_dbg(&priv->pdev->dev, 938 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 939 sts_ptr, (unsigned long long)sts_phys, sts_size); 940 941 /* Initialize DMA descriptors ring */ 942 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); 943 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & 944 TSI721_DMAC_DPTRL_MASK); 945 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 946 947 /* Setup DMA descriptor pointers */ 948 iowrite32(((u64)bd_phys >> 32), 949 priv->regs + TSI721_DMAC_DPTRH(chnum)); 950 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 951 priv->regs + TSI721_DMAC_DPTRL(chnum)); 952 953 /* Setup descriptor status FIFO */ 954 iowrite32(((u64)sts_phys >> 32), 955 priv->regs + TSI721_DMAC_DSBH(chnum)); 956 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 957 priv->regs + TSI721_DMAC_DSBL(chnum)); 958 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 959 priv->regs + TSI721_DMAC_DSSZ(chnum)); 960 961 /* Clear interrupt bits */ 962 iowrite32(TSI721_DMAC_INT_ALL, 963 priv->regs + TSI721_DMAC_INT(chnum)); 964 965 ioread32(priv->regs + TSI721_DMAC_INT(chnum)); 966 967 /* Toggle DMA channel initialization */ 968 iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); 969 ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); 970 udelay(10); 971 972 return 0; 973 } 974 975 static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) 976 { 977 u32 ch_stat; 978 979 if (priv->bdma[chnum].bd_base == NULL) 980 return 0; 981 982 /* Check if DMA channel still running */ 983 ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); 984 if (ch_stat & TSI721_DMAC_STS_RUN) 985 return -EFAULT; 986 987 /* Put DMA channel into init state */ 988 iowrite32(TSI721_DMAC_CTL_INIT, 989 priv->regs + TSI721_DMAC_CTL(chnum)); 990 991 /* Free space allocated for DMA descriptors */ 992 dma_free_coherent(&priv->pdev->dev, 993 priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), 994 priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); 995 priv->bdma[chnum].bd_base = NULL; 996 997 /* Free space allocated for status FIFO */ 998 dma_free_coherent(&priv->pdev->dev, 999 priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), 1000 priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); 1001 priv->bdma[chnum].sts_base = NULL; 1002 return 0; 1003 } 1004 1005 static int tsi721_bdma_init(struct tsi721_device *priv) 1006 { 1007 /* Initialize BDMA channel allocated for RapidIO maintenance read/write 1008 * request generation 1009 */ 1010 priv->bdma[TSI721_DMACH_MAINT].bd_num = 2; 1011 if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) { 1012 dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA" 1013 " channel %d, aborting\n", TSI721_DMACH_MAINT); 1014 return -ENOMEM; 1015 } 1016 1017 return 0; 1018 } 1019 1020 static void tsi721_bdma_free(struct tsi721_device *priv) 1021 { 1022 tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT); 1023 } 1024 1025 /* Enable Inbound Messaging Interrupts */ 1026 static void 1027 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, 1028 u32 inte_mask) 1029 { 1030 u32 rval; 1031 1032 if (!inte_mask) 1033 return; 1034 1035 /* Clear pending Inbound Messaging interrupts */ 1036 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1037 1038 /* Enable Inbound Messaging interrupts */ 1039 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1040 iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); 1041 1042 if (priv->flags & TSI721_USING_MSIX) 1043 return; /* Finished if we are in MSI-X mode */ 1044 1045 /* 1046 * For MSI and INTA interrupt signalling we need to enable next levels 1047 */ 1048 1049 /* Enable Device Channel Interrupt */ 1050 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1051 iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), 1052 priv->regs + TSI721_DEV_CHAN_INTE); 1053 } 1054 1055 /* Disable Inbound Messaging Interrupts */ 1056 static void 1057 tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, 1058 u32 inte_mask) 1059 { 1060 u32 rval; 1061 1062 if (!inte_mask) 1063 return; 1064 1065 /* Clear pending Inbound Messaging interrupts */ 1066 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); 1067 1068 /* Disable Inbound Messaging interrupts */ 1069 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); 1070 rval &= ~inte_mask; 1071 iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); 1072 1073 if (priv->flags & TSI721_USING_MSIX) 1074 return; /* Finished if we are in MSI-X mode */ 1075 1076 /* 1077 * For MSI and INTA interrupt signalling we need to disable next levels 1078 */ 1079 1080 /* Disable Device Channel Interrupt */ 1081 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1082 rval &= ~TSI721_INT_IMSG_CHAN(ch); 1083 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1084 } 1085 1086 /* Enable Outbound Messaging interrupts */ 1087 static void 1088 tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, 1089 u32 inte_mask) 1090 { 1091 u32 rval; 1092 1093 if (!inte_mask) 1094 return; 1095 1096 /* Clear pending Outbound Messaging interrupts */ 1097 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1098 1099 /* Enable Outbound Messaging channel interrupts */ 1100 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1101 iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); 1102 1103 if (priv->flags & TSI721_USING_MSIX) 1104 return; /* Finished if we are in MSI-X mode */ 1105 1106 /* 1107 * For MSI and INTA interrupt signalling we need to enable next levels 1108 */ 1109 1110 /* Enable Device Channel Interrupt */ 1111 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1112 iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), 1113 priv->regs + TSI721_DEV_CHAN_INTE); 1114 } 1115 1116 /* Disable Outbound Messaging interrupts */ 1117 static void 1118 tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, 1119 u32 inte_mask) 1120 { 1121 u32 rval; 1122 1123 if (!inte_mask) 1124 return; 1125 1126 /* Clear pending Outbound Messaging interrupts */ 1127 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); 1128 1129 /* Disable Outbound Messaging interrupts */ 1130 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); 1131 rval &= ~inte_mask; 1132 iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); 1133 1134 if (priv->flags & TSI721_USING_MSIX) 1135 return; /* Finished if we are in MSI-X mode */ 1136 1137 /* 1138 * For MSI and INTA interrupt signalling we need to disable next levels 1139 */ 1140 1141 /* Disable Device Channel Interrupt */ 1142 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1143 rval &= ~TSI721_INT_OMSG_CHAN(ch); 1144 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); 1145 } 1146 1147 /** 1148 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue 1149 * @mport: Master port with outbound message queue 1150 * @rdev: Target of outbound message 1151 * @mbox: Outbound mailbox 1152 * @buffer: Message to add to outbound queue 1153 * @len: Length of message 1154 */ 1155 static int 1156 tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 1157 void *buffer, size_t len) 1158 { 1159 struct tsi721_device *priv = mport->priv; 1160 struct tsi721_omsg_desc *desc; 1161 u32 tx_slot; 1162 1163 if (!priv->omsg_init[mbox] || 1164 len > TSI721_MSG_MAX_SIZE || len < 8) 1165 return -EINVAL; 1166 1167 tx_slot = priv->omsg_ring[mbox].tx_slot; 1168 1169 /* Copy copy message into transfer buffer */ 1170 memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); 1171 1172 if (len & 0x7) 1173 len += 8; 1174 1175 /* Build descriptor associated with buffer */ 1176 desc = priv->omsg_ring[mbox].omd_base; 1177 desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); 1178 if (tx_slot % 4 == 0) 1179 desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); 1180 1181 desc[tx_slot].msg_info = 1182 cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | 1183 (0xe << 12) | (len & 0xff8)); 1184 desc[tx_slot].bufptr_lo = 1185 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & 1186 0xffffffff); 1187 desc[tx_slot].bufptr_hi = 1188 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); 1189 1190 priv->omsg_ring[mbox].wr_count++; 1191 1192 /* Go to next descriptor */ 1193 if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { 1194 priv->omsg_ring[mbox].tx_slot = 0; 1195 /* Move through the ring link descriptor at the end */ 1196 priv->omsg_ring[mbox].wr_count++; 1197 } 1198 1199 mb(); 1200 1201 /* Set new write count value */ 1202 iowrite32(priv->omsg_ring[mbox].wr_count, 1203 priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1204 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1205 1206 return 0; 1207 } 1208 1209 /** 1210 * tsi721_omsg_handler - Outbound Message Interrupt Handler 1211 * @priv: pointer to tsi721 private data 1212 * @ch: number of OB MSG channel to service 1213 * 1214 * Services channel interrupts from outbound messaging engine. 1215 */ 1216 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) 1217 { 1218 u32 omsg_int; 1219 1220 spin_lock(&priv->omsg_ring[ch].lock); 1221 1222 omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); 1223 1224 if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) 1225 dev_info(&priv->pdev->dev, 1226 "OB MBOX%d: Status FIFO is full\n", ch); 1227 1228 if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { 1229 u32 srd_ptr; 1230 u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; 1231 int i, j; 1232 u32 tx_slot; 1233 1234 /* 1235 * Find last successfully processed descriptor 1236 */ 1237 1238 /* Check and clear descriptor status FIFO entries */ 1239 srd_ptr = priv->omsg_ring[ch].sts_rdptr; 1240 sts_ptr = priv->omsg_ring[ch].sts_base; 1241 j = srd_ptr * 8; 1242 while (sts_ptr[j]) { 1243 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { 1244 prev_ptr = last_ptr; 1245 last_ptr = le64_to_cpu(sts_ptr[j]); 1246 sts_ptr[j] = 0; 1247 } 1248 1249 ++srd_ptr; 1250 srd_ptr %= priv->omsg_ring[ch].sts_size; 1251 j = srd_ptr * 8; 1252 } 1253 1254 if (last_ptr == 0) 1255 goto no_sts_update; 1256 1257 priv->omsg_ring[ch].sts_rdptr = srd_ptr; 1258 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); 1259 1260 if (!priv->mport->outb_msg[ch].mcback) 1261 goto no_sts_update; 1262 1263 /* Inform upper layer about transfer completion */ 1264 1265 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ 1266 sizeof(struct tsi721_omsg_desc); 1267 1268 /* 1269 * Check if this is a Link Descriptor (LD). 1270 * If yes, ignore LD and use descriptor processed 1271 * before LD. 1272 */ 1273 if (tx_slot == priv->omsg_ring[ch].size) { 1274 if (prev_ptr) 1275 tx_slot = (prev_ptr - 1276 (u64)priv->omsg_ring[ch].omd_phys)/ 1277 sizeof(struct tsi721_omsg_desc); 1278 else 1279 goto no_sts_update; 1280 } 1281 1282 /* Move slot index to the next message to be sent */ 1283 ++tx_slot; 1284 if (tx_slot == priv->omsg_ring[ch].size) 1285 tx_slot = 0; 1286 BUG_ON(tx_slot >= priv->omsg_ring[ch].size); 1287 priv->mport->outb_msg[ch].mcback(priv->mport, 1288 priv->omsg_ring[ch].dev_id, ch, 1289 tx_slot); 1290 } 1291 1292 no_sts_update: 1293 1294 if (omsg_int & TSI721_OBDMAC_INT_ERROR) { 1295 /* 1296 * Outbound message operation aborted due to error, 1297 * reinitialize OB MSG channel 1298 */ 1299 1300 dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", 1301 ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); 1302 1303 iowrite32(TSI721_OBDMAC_INT_ERROR, 1304 priv->regs + TSI721_OBDMAC_INT(ch)); 1305 iowrite32(TSI721_OBDMAC_CTL_INIT, 1306 priv->regs + TSI721_OBDMAC_CTL(ch)); 1307 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); 1308 1309 /* Inform upper level to clear all pending tx slots */ 1310 if (priv->mport->outb_msg[ch].mcback) 1311 priv->mport->outb_msg[ch].mcback(priv->mport, 1312 priv->omsg_ring[ch].dev_id, ch, 1313 priv->omsg_ring[ch].tx_slot); 1314 /* Synch tx_slot tracking */ 1315 iowrite32(priv->omsg_ring[ch].tx_slot, 1316 priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1317 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); 1318 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; 1319 priv->omsg_ring[ch].sts_rdptr = 0; 1320 } 1321 1322 /* Clear channel interrupts */ 1323 iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); 1324 1325 if (!(priv->flags & TSI721_USING_MSIX)) { 1326 u32 ch_inte; 1327 1328 /* Re-enable channel interrupts */ 1329 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1330 ch_inte |= TSI721_INT_OMSG_CHAN(ch); 1331 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1332 } 1333 1334 spin_unlock(&priv->omsg_ring[ch].lock); 1335 } 1336 1337 /** 1338 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox 1339 * @mport: Master port implementing Outbound Messaging Engine 1340 * @dev_id: Device specific pointer to pass on event 1341 * @mbox: Mailbox to open 1342 * @entries: Number of entries in the outbound mailbox ring 1343 */ 1344 static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, 1345 int mbox, int entries) 1346 { 1347 struct tsi721_device *priv = mport->priv; 1348 struct tsi721_omsg_desc *bd_ptr; 1349 int i, rc = 0; 1350 1351 if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || 1352 (entries > (TSI721_OMSGD_RING_SIZE)) || 1353 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1354 rc = -EINVAL; 1355 goto out; 1356 } 1357 1358 priv->omsg_ring[mbox].dev_id = dev_id; 1359 priv->omsg_ring[mbox].size = entries; 1360 priv->omsg_ring[mbox].sts_rdptr = 0; 1361 spin_lock_init(&priv->omsg_ring[mbox].lock); 1362 1363 /* Outbound Msg Buffer allocation based on 1364 the number of maximum descriptor entries */ 1365 for (i = 0; i < entries; i++) { 1366 priv->omsg_ring[mbox].omq_base[i] = 1367 dma_alloc_coherent( 1368 &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, 1369 &priv->omsg_ring[mbox].omq_phys[i], 1370 GFP_KERNEL); 1371 if (priv->omsg_ring[mbox].omq_base[i] == NULL) { 1372 dev_dbg(&priv->pdev->dev, 1373 "Unable to allocate OB MSG data buffer for" 1374 " MBOX%d\n", mbox); 1375 rc = -ENOMEM; 1376 goto out_buf; 1377 } 1378 } 1379 1380 /* Outbound message descriptor allocation */ 1381 priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( 1382 &priv->pdev->dev, 1383 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1384 &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); 1385 if (priv->omsg_ring[mbox].omd_base == NULL) { 1386 dev_dbg(&priv->pdev->dev, 1387 "Unable to allocate OB MSG descriptor memory " 1388 "for MBOX%d\n", mbox); 1389 rc = -ENOMEM; 1390 goto out_buf; 1391 } 1392 1393 priv->omsg_ring[mbox].tx_slot = 0; 1394 1395 /* Outbound message descriptor status FIFO allocation */ 1396 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1397 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1398 priv->omsg_ring[mbox].sts_size * 1399 sizeof(struct tsi721_dma_sts), 1400 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1401 if (priv->omsg_ring[mbox].sts_base == NULL) { 1402 dev_dbg(&priv->pdev->dev, 1403 "Unable to allocate OB MSG descriptor status FIFO " 1404 "for MBOX%d\n", mbox); 1405 rc = -ENOMEM; 1406 goto out_desc; 1407 } 1408 1409 /* 1410 * Configure Outbound Messaging Engine 1411 */ 1412 1413 /* Setup Outbound Message descriptor pointer */ 1414 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), 1415 priv->regs + TSI721_OBDMAC_DPTRH(mbox)); 1416 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & 1417 TSI721_OBDMAC_DPTRL_MASK), 1418 priv->regs + TSI721_OBDMAC_DPTRL(mbox)); 1419 1420 /* Setup Outbound Message descriptor status FIFO */ 1421 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), 1422 priv->regs + TSI721_OBDMAC_DSBH(mbox)); 1423 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & 1424 TSI721_OBDMAC_DSBL_MASK), 1425 priv->regs + TSI721_OBDMAC_DSBL(mbox)); 1426 iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), 1427 priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); 1428 1429 /* Enable interrupts */ 1430 1431 #ifdef CONFIG_PCI_MSI 1432 if (priv->flags & TSI721_USING_MSIX) { 1433 /* Request interrupt service if we are in MSI-X mode */ 1434 rc = request_irq( 1435 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1436 tsi721_omsg_msix, 0, 1437 priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, 1438 (void *)mport); 1439 1440 if (rc) { 1441 dev_dbg(&priv->pdev->dev, 1442 "Unable to allocate MSI-X interrupt for " 1443 "OBOX%d-DONE\n", mbox); 1444 goto out_stat; 1445 } 1446 1447 rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1448 tsi721_omsg_msix, 0, 1449 priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, 1450 (void *)mport); 1451 1452 if (rc) { 1453 dev_dbg(&priv->pdev->dev, 1454 "Unable to allocate MSI-X interrupt for " 1455 "MBOX%d-INT\n", mbox); 1456 free_irq( 1457 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1458 (void *)mport); 1459 goto out_stat; 1460 } 1461 } 1462 #endif /* CONFIG_PCI_MSI */ 1463 1464 tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1465 1466 /* Initialize Outbound Message descriptors ring */ 1467 bd_ptr = priv->omsg_ring[mbox].omd_base; 1468 bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); 1469 bd_ptr[entries].msg_info = 0; 1470 bd_ptr[entries].next_lo = 1471 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & 1472 TSI721_OBDMAC_DPTRL_MASK); 1473 bd_ptr[entries].next_hi = 1474 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); 1475 priv->omsg_ring[mbox].wr_count = 0; 1476 mb(); 1477 1478 /* Initialize Outbound Message engine */ 1479 iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); 1480 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); 1481 udelay(10); 1482 1483 priv->omsg_init[mbox] = 1; 1484 1485 return 0; 1486 1487 #ifdef CONFIG_PCI_MSI 1488 out_stat: 1489 dma_free_coherent(&priv->pdev->dev, 1490 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1491 priv->omsg_ring[mbox].sts_base, 1492 priv->omsg_ring[mbox].sts_phys); 1493 1494 priv->omsg_ring[mbox].sts_base = NULL; 1495 #endif /* CONFIG_PCI_MSI */ 1496 1497 out_desc: 1498 dma_free_coherent(&priv->pdev->dev, 1499 (entries + 1) * sizeof(struct tsi721_omsg_desc), 1500 priv->omsg_ring[mbox].omd_base, 1501 priv->omsg_ring[mbox].omd_phys); 1502 1503 priv->omsg_ring[mbox].omd_base = NULL; 1504 1505 out_buf: 1506 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1507 if (priv->omsg_ring[mbox].omq_base[i]) { 1508 dma_free_coherent(&priv->pdev->dev, 1509 TSI721_MSG_BUFFER_SIZE, 1510 priv->omsg_ring[mbox].omq_base[i], 1511 priv->omsg_ring[mbox].omq_phys[i]); 1512 1513 priv->omsg_ring[mbox].omq_base[i] = NULL; 1514 } 1515 } 1516 1517 out: 1518 return rc; 1519 } 1520 1521 /** 1522 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox 1523 * @mport: Master port implementing the outbound message unit 1524 * @mbox: Mailbox to close 1525 */ 1526 static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) 1527 { 1528 struct tsi721_device *priv = mport->priv; 1529 u32 i; 1530 1531 if (!priv->omsg_init[mbox]) 1532 return; 1533 priv->omsg_init[mbox] = 0; 1534 1535 /* Disable Interrupts */ 1536 1537 tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); 1538 1539 #ifdef CONFIG_PCI_MSI 1540 if (priv->flags & TSI721_USING_MSIX) { 1541 free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, 1542 (void *)mport); 1543 free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, 1544 (void *)mport); 1545 } 1546 #endif /* CONFIG_PCI_MSI */ 1547 1548 /* Free OMSG Descriptor Status FIFO */ 1549 dma_free_coherent(&priv->pdev->dev, 1550 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), 1551 priv->omsg_ring[mbox].sts_base, 1552 priv->omsg_ring[mbox].sts_phys); 1553 1554 priv->omsg_ring[mbox].sts_base = NULL; 1555 1556 /* Free OMSG descriptors */ 1557 dma_free_coherent(&priv->pdev->dev, 1558 (priv->omsg_ring[mbox].size + 1) * 1559 sizeof(struct tsi721_omsg_desc), 1560 priv->omsg_ring[mbox].omd_base, 1561 priv->omsg_ring[mbox].omd_phys); 1562 1563 priv->omsg_ring[mbox].omd_base = NULL; 1564 1565 /* Free message buffers */ 1566 for (i = 0; i < priv->omsg_ring[mbox].size; i++) { 1567 if (priv->omsg_ring[mbox].omq_base[i]) { 1568 dma_free_coherent(&priv->pdev->dev, 1569 TSI721_MSG_BUFFER_SIZE, 1570 priv->omsg_ring[mbox].omq_base[i], 1571 priv->omsg_ring[mbox].omq_phys[i]); 1572 1573 priv->omsg_ring[mbox].omq_base[i] = NULL; 1574 } 1575 } 1576 } 1577 1578 /** 1579 * tsi721_imsg_handler - Inbound Message Interrupt Handler 1580 * @priv: pointer to tsi721 private data 1581 * @ch: inbound message channel number to service 1582 * 1583 * Services channel interrupts from inbound messaging engine. 1584 */ 1585 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) 1586 { 1587 u32 mbox = ch - 4; 1588 u32 imsg_int; 1589 1590 spin_lock(&priv->imsg_ring[mbox].lock); 1591 1592 imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); 1593 1594 if (imsg_int & TSI721_IBDMAC_INT_SRTO) 1595 dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", 1596 mbox); 1597 1598 if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) 1599 dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", 1600 mbox); 1601 1602 if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) 1603 dev_info(&priv->pdev->dev, 1604 "IB MBOX%d IB free queue low\n", mbox); 1605 1606 /* Clear IB channel interrupts */ 1607 iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); 1608 1609 /* If an IB Msg is received notify the upper layer */ 1610 if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && 1611 priv->mport->inb_msg[mbox].mcback) 1612 priv->mport->inb_msg[mbox].mcback(priv->mport, 1613 priv->imsg_ring[mbox].dev_id, mbox, -1); 1614 1615 if (!(priv->flags & TSI721_USING_MSIX)) { 1616 u32 ch_inte; 1617 1618 /* Re-enable channel interrupts */ 1619 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); 1620 ch_inte |= TSI721_INT_IMSG_CHAN(ch); 1621 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); 1622 } 1623 1624 spin_unlock(&priv->imsg_ring[mbox].lock); 1625 } 1626 1627 /** 1628 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox 1629 * @mport: Master port implementing the Inbound Messaging Engine 1630 * @dev_id: Device specific pointer to pass on event 1631 * @mbox: Mailbox to open 1632 * @entries: Number of entries in the inbound mailbox ring 1633 */ 1634 static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, 1635 int mbox, int entries) 1636 { 1637 struct tsi721_device *priv = mport->priv; 1638 int ch = mbox + 4; 1639 int i; 1640 u64 *free_ptr; 1641 int rc = 0; 1642 1643 if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || 1644 (entries > TSI721_IMSGD_RING_SIZE) || 1645 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { 1646 rc = -EINVAL; 1647 goto out; 1648 } 1649 1650 /* Initialize IB Messaging Ring */ 1651 priv->imsg_ring[mbox].dev_id = dev_id; 1652 priv->imsg_ring[mbox].size = entries; 1653 priv->imsg_ring[mbox].rx_slot = 0; 1654 priv->imsg_ring[mbox].desc_rdptr = 0; 1655 priv->imsg_ring[mbox].fq_wrptr = 0; 1656 for (i = 0; i < priv->imsg_ring[mbox].size; i++) 1657 priv->imsg_ring[mbox].imq_base[i] = NULL; 1658 spin_lock_init(&priv->imsg_ring[mbox].lock); 1659 1660 /* Allocate buffers for incoming messages */ 1661 priv->imsg_ring[mbox].buf_base = 1662 dma_alloc_coherent(&priv->pdev->dev, 1663 entries * TSI721_MSG_BUFFER_SIZE, 1664 &priv->imsg_ring[mbox].buf_phys, 1665 GFP_KERNEL); 1666 1667 if (priv->imsg_ring[mbox].buf_base == NULL) { 1668 dev_err(&priv->pdev->dev, 1669 "Failed to allocate buffers for IB MBOX%d\n", mbox); 1670 rc = -ENOMEM; 1671 goto out; 1672 } 1673 1674 /* Allocate memory for circular free list */ 1675 priv->imsg_ring[mbox].imfq_base = 1676 dma_alloc_coherent(&priv->pdev->dev, 1677 entries * 8, 1678 &priv->imsg_ring[mbox].imfq_phys, 1679 GFP_KERNEL); 1680 1681 if (priv->imsg_ring[mbox].imfq_base == NULL) { 1682 dev_err(&priv->pdev->dev, 1683 "Failed to allocate free queue for IB MBOX%d\n", mbox); 1684 rc = -ENOMEM; 1685 goto out_buf; 1686 } 1687 1688 /* Allocate memory for Inbound message descriptors */ 1689 priv->imsg_ring[mbox].imd_base = 1690 dma_alloc_coherent(&priv->pdev->dev, 1691 entries * sizeof(struct tsi721_imsg_desc), 1692 &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); 1693 1694 if (priv->imsg_ring[mbox].imd_base == NULL) { 1695 dev_err(&priv->pdev->dev, 1696 "Failed to allocate descriptor memory for IB MBOX%d\n", 1697 mbox); 1698 rc = -ENOMEM; 1699 goto out_dma; 1700 } 1701 1702 /* Fill free buffer pointer list */ 1703 free_ptr = priv->imsg_ring[mbox].imfq_base; 1704 for (i = 0; i < entries; i++) 1705 free_ptr[i] = cpu_to_le64( 1706 (u64)(priv->imsg_ring[mbox].buf_phys) + 1707 i * 0x1000); 1708 1709 mb(); 1710 1711 /* 1712 * For mapping of inbound SRIO Messages into appropriate queues we need 1713 * to set Inbound Device ID register in the messaging engine. We do it 1714 * once when first inbound mailbox is requested. 1715 */ 1716 if (!(priv->flags & TSI721_IMSGID_SET)) { 1717 iowrite32((u32)priv->mport->host_deviceid, 1718 priv->regs + TSI721_IB_DEVID); 1719 priv->flags |= TSI721_IMSGID_SET; 1720 } 1721 1722 /* 1723 * Configure Inbound Messaging channel (ch = mbox + 4) 1724 */ 1725 1726 /* Setup Inbound Message free queue */ 1727 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), 1728 priv->regs + TSI721_IBDMAC_FQBH(ch)); 1729 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & 1730 TSI721_IBDMAC_FQBL_MASK), 1731 priv->regs+TSI721_IBDMAC_FQBL(ch)); 1732 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1733 priv->regs + TSI721_IBDMAC_FQSZ(ch)); 1734 1735 /* Setup Inbound Message descriptor queue */ 1736 iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), 1737 priv->regs + TSI721_IBDMAC_DQBH(ch)); 1738 iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & 1739 (u32)TSI721_IBDMAC_DQBL_MASK), 1740 priv->regs+TSI721_IBDMAC_DQBL(ch)); 1741 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), 1742 priv->regs + TSI721_IBDMAC_DQSZ(ch)); 1743 1744 /* Enable interrupts */ 1745 1746 #ifdef CONFIG_PCI_MSI 1747 if (priv->flags & TSI721_USING_MSIX) { 1748 /* Request interrupt service if we are in MSI-X mode */ 1749 rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1750 tsi721_imsg_msix, 0, 1751 priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, 1752 (void *)mport); 1753 1754 if (rc) { 1755 dev_dbg(&priv->pdev->dev, 1756 "Unable to allocate MSI-X interrupt for " 1757 "IBOX%d-DONE\n", mbox); 1758 goto out_desc; 1759 } 1760 1761 rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1762 tsi721_imsg_msix, 0, 1763 priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, 1764 (void *)mport); 1765 1766 if (rc) { 1767 dev_dbg(&priv->pdev->dev, 1768 "Unable to allocate MSI-X interrupt for " 1769 "IBOX%d-INT\n", mbox); 1770 free_irq( 1771 priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1772 (void *)mport); 1773 goto out_desc; 1774 } 1775 } 1776 #endif /* CONFIG_PCI_MSI */ 1777 1778 tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); 1779 1780 /* Initialize Inbound Message Engine */ 1781 iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); 1782 ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); 1783 udelay(10); 1784 priv->imsg_ring[mbox].fq_wrptr = entries - 1; 1785 iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); 1786 1787 priv->imsg_init[mbox] = 1; 1788 return 0; 1789 1790 #ifdef CONFIG_PCI_MSI 1791 out_desc: 1792 dma_free_coherent(&priv->pdev->dev, 1793 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 1794 priv->imsg_ring[mbox].imd_base, 1795 priv->imsg_ring[mbox].imd_phys); 1796 1797 priv->imsg_ring[mbox].imd_base = NULL; 1798 #endif /* CONFIG_PCI_MSI */ 1799 1800 out_dma: 1801 dma_free_coherent(&priv->pdev->dev, 1802 priv->imsg_ring[mbox].size * 8, 1803 priv->imsg_ring[mbox].imfq_base, 1804 priv->imsg_ring[mbox].imfq_phys); 1805 1806 priv->imsg_ring[mbox].imfq_base = NULL; 1807 1808 out_buf: 1809 dma_free_coherent(&priv->pdev->dev, 1810 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 1811 priv->imsg_ring[mbox].buf_base, 1812 priv->imsg_ring[mbox].buf_phys); 1813 1814 priv->imsg_ring[mbox].buf_base = NULL; 1815 1816 out: 1817 return rc; 1818 } 1819 1820 /** 1821 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox 1822 * @mport: Master port implementing the Inbound Messaging Engine 1823 * @mbox: Mailbox to close 1824 */ 1825 static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) 1826 { 1827 struct tsi721_device *priv = mport->priv; 1828 u32 rx_slot; 1829 int ch = mbox + 4; 1830 1831 if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ 1832 return; 1833 priv->imsg_init[mbox] = 0; 1834 1835 /* Disable Inbound Messaging Engine */ 1836 1837 /* Disable Interrupts */ 1838 tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); 1839 1840 #ifdef CONFIG_PCI_MSI 1841 if (priv->flags & TSI721_USING_MSIX) { 1842 free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, 1843 (void *)mport); 1844 free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, 1845 (void *)mport); 1846 } 1847 #endif /* CONFIG_PCI_MSI */ 1848 1849 /* Clear Inbound Buffer Queue */ 1850 for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) 1851 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 1852 1853 /* Free memory allocated for message buffers */ 1854 dma_free_coherent(&priv->pdev->dev, 1855 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, 1856 priv->imsg_ring[mbox].buf_base, 1857 priv->imsg_ring[mbox].buf_phys); 1858 1859 priv->imsg_ring[mbox].buf_base = NULL; 1860 1861 /* Free memory allocated for free pointr list */ 1862 dma_free_coherent(&priv->pdev->dev, 1863 priv->imsg_ring[mbox].size * 8, 1864 priv->imsg_ring[mbox].imfq_base, 1865 priv->imsg_ring[mbox].imfq_phys); 1866 1867 priv->imsg_ring[mbox].imfq_base = NULL; 1868 1869 /* Free memory allocated for RX descriptors */ 1870 dma_free_coherent(&priv->pdev->dev, 1871 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), 1872 priv->imsg_ring[mbox].imd_base, 1873 priv->imsg_ring[mbox].imd_phys); 1874 1875 priv->imsg_ring[mbox].imd_base = NULL; 1876 } 1877 1878 /** 1879 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue 1880 * @mport: Master port implementing the Inbound Messaging Engine 1881 * @mbox: Inbound mailbox number 1882 * @buf: Buffer to add to inbound queue 1883 */ 1884 static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 1885 { 1886 struct tsi721_device *priv = mport->priv; 1887 u32 rx_slot; 1888 int rc = 0; 1889 1890 rx_slot = priv->imsg_ring[mbox].rx_slot; 1891 if (priv->imsg_ring[mbox].imq_base[rx_slot]) { 1892 dev_err(&priv->pdev->dev, 1893 "Error adding inbound buffer %d, buffer exists\n", 1894 rx_slot); 1895 rc = -EINVAL; 1896 goto out; 1897 } 1898 1899 priv->imsg_ring[mbox].imq_base[rx_slot] = buf; 1900 1901 if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) 1902 priv->imsg_ring[mbox].rx_slot = 0; 1903 1904 out: 1905 return rc; 1906 } 1907 1908 /** 1909 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue 1910 * @mport: Master port implementing the Inbound Messaging Engine 1911 * @mbox: Inbound mailbox number 1912 * 1913 * Returns pointer to the message on success or NULL on failure. 1914 */ 1915 static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) 1916 { 1917 struct tsi721_device *priv = mport->priv; 1918 struct tsi721_imsg_desc *desc; 1919 u32 rx_slot; 1920 void *rx_virt = NULL; 1921 u64 rx_phys; 1922 void *buf = NULL; 1923 u64 *free_ptr; 1924 int ch = mbox + 4; 1925 int msg_size; 1926 1927 if (!priv->imsg_init[mbox]) 1928 return NULL; 1929 1930 desc = priv->imsg_ring[mbox].imd_base; 1931 desc += priv->imsg_ring[mbox].desc_rdptr; 1932 1933 if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) 1934 goto out; 1935 1936 rx_slot = priv->imsg_ring[mbox].rx_slot; 1937 while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { 1938 if (++rx_slot == priv->imsg_ring[mbox].size) 1939 rx_slot = 0; 1940 } 1941 1942 rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | 1943 le32_to_cpu(desc->bufptr_lo); 1944 1945 rx_virt = priv->imsg_ring[mbox].buf_base + 1946 (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); 1947 1948 buf = priv->imsg_ring[mbox].imq_base[rx_slot]; 1949 msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; 1950 if (msg_size == 0) 1951 msg_size = RIO_MAX_MSG_SIZE; 1952 1953 memcpy(buf, rx_virt, msg_size); 1954 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; 1955 1956 desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); 1957 if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) 1958 priv->imsg_ring[mbox].desc_rdptr = 0; 1959 1960 iowrite32(priv->imsg_ring[mbox].desc_rdptr, 1961 priv->regs + TSI721_IBDMAC_DQRP(ch)); 1962 1963 /* Return free buffer into the pointer list */ 1964 free_ptr = priv->imsg_ring[mbox].imfq_base; 1965 free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); 1966 1967 if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) 1968 priv->imsg_ring[mbox].fq_wrptr = 0; 1969 1970 iowrite32(priv->imsg_ring[mbox].fq_wrptr, 1971 priv->regs + TSI721_IBDMAC_FQWP(ch)); 1972 out: 1973 return buf; 1974 } 1975 1976 /** 1977 * tsi721_messages_init - Initialization of Messaging Engine 1978 * @priv: pointer to tsi721 private data 1979 * 1980 * Configures Tsi721 messaging engine. 1981 */ 1982 static int tsi721_messages_init(struct tsi721_device *priv) 1983 { 1984 int ch; 1985 1986 iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); 1987 iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); 1988 iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); 1989 1990 /* Set SRIO Message Request/Response Timeout */ 1991 iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); 1992 1993 /* Initialize Inbound Messaging Engine Registers */ 1994 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { 1995 /* Clear interrupt bits */ 1996 iowrite32(TSI721_IBDMAC_INT_MASK, 1997 priv->regs + TSI721_IBDMAC_INT(ch)); 1998 /* Clear Status */ 1999 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); 2000 2001 iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, 2002 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); 2003 iowrite32(TSI721_SMSG_ECC_NCOR_MASK, 2004 priv->regs + TSI721_SMSG_ECC_NCOR(ch)); 2005 } 2006 2007 return 0; 2008 } 2009 2010 /** 2011 * tsi721_disable_ints - disables all device interrupts 2012 * @priv: pointer to tsi721 private data 2013 */ 2014 static void tsi721_disable_ints(struct tsi721_device *priv) 2015 { 2016 int ch; 2017 2018 /* Disable all device level interrupts */ 2019 iowrite32(0, priv->regs + TSI721_DEV_INTE); 2020 2021 /* Disable all Device Channel interrupts */ 2022 iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); 2023 2024 /* Disable all Inbound Msg Channel interrupts */ 2025 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) 2026 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); 2027 2028 /* Disable all Outbound Msg Channel interrupts */ 2029 for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) 2030 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); 2031 2032 /* Disable all general messaging interrupts */ 2033 iowrite32(0, priv->regs + TSI721_SMSG_INTE); 2034 2035 /* Disable all BDMA Channel interrupts */ 2036 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2037 iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); 2038 2039 /* Disable all general BDMA interrupts */ 2040 iowrite32(0, priv->regs + TSI721_BDMA_INTE); 2041 2042 /* Disable all SRIO Channel interrupts */ 2043 for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) 2044 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); 2045 2046 /* Disable all general SR2PC interrupts */ 2047 iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); 2048 2049 /* Disable all PC2SR interrupts */ 2050 iowrite32(0, priv->regs + TSI721_PC2SR_INTE); 2051 2052 /* Disable all I2C interrupts */ 2053 iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); 2054 2055 /* Disable SRIO MAC interrupts */ 2056 iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); 2057 iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); 2058 } 2059 2060 /** 2061 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port 2062 * @priv: pointer to tsi721 private data 2063 * 2064 * Configures Tsi721 as RapidIO master port. 2065 */ 2066 static int __devinit tsi721_setup_mport(struct tsi721_device *priv) 2067 { 2068 struct pci_dev *pdev = priv->pdev; 2069 int err = 0; 2070 struct rio_ops *ops; 2071 2072 struct rio_mport *mport; 2073 2074 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); 2075 if (!ops) { 2076 dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); 2077 return -ENOMEM; 2078 } 2079 2080 ops->lcread = tsi721_lcread; 2081 ops->lcwrite = tsi721_lcwrite; 2082 ops->cread = tsi721_cread_dma; 2083 ops->cwrite = tsi721_cwrite_dma; 2084 ops->dsend = tsi721_dsend; 2085 ops->open_inb_mbox = tsi721_open_inb_mbox; 2086 ops->close_inb_mbox = tsi721_close_inb_mbox; 2087 ops->open_outb_mbox = tsi721_open_outb_mbox; 2088 ops->close_outb_mbox = tsi721_close_outb_mbox; 2089 ops->add_outb_message = tsi721_add_outb_message; 2090 ops->add_inb_buffer = tsi721_add_inb_buffer; 2091 ops->get_inb_message = tsi721_get_inb_message; 2092 2093 mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 2094 if (!mport) { 2095 kfree(ops); 2096 dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); 2097 return -ENOMEM; 2098 } 2099 2100 mport->ops = ops; 2101 mport->index = 0; 2102 mport->sys_size = 0; /* small system */ 2103 mport->phy_type = RIO_PHY_SERIAL; 2104 mport->priv = (void *)priv; 2105 mport->phys_efptr = 0x100; 2106 2107 INIT_LIST_HEAD(&mport->dbells); 2108 2109 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 2110 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); 2111 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); 2112 strcpy(mport->name, "Tsi721 mport"); 2113 2114 /* Hook up interrupt handler */ 2115 2116 #ifdef CONFIG_PCI_MSI 2117 if (!tsi721_enable_msix(priv)) 2118 priv->flags |= TSI721_USING_MSIX; 2119 else if (!pci_enable_msi(pdev)) 2120 priv->flags |= TSI721_USING_MSI; 2121 else 2122 dev_info(&pdev->dev, 2123 "MSI/MSI-X is not available. Using legacy INTx.\n"); 2124 #endif /* CONFIG_PCI_MSI */ 2125 2126 err = tsi721_request_irq(mport); 2127 2128 if (!err) { 2129 tsi721_interrupts_init(priv); 2130 ops->pwenable = tsi721_pw_enable; 2131 } else 2132 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2133 "vector %02X err=0x%x\n", pdev->irq, err); 2134 2135 /* Enable SRIO link */ 2136 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2137 TSI721_DEVCTL_SRBOOT_CMPL, 2138 priv->regs + TSI721_DEVCTL); 2139 2140 rio_register_mport(mport); 2141 priv->mport = mport; 2142 2143 if (mport->host_deviceid >= 0) 2144 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | 2145 RIO_PORT_GEN_DISCOVERED, 2146 priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2147 else 2148 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2149 2150 return 0; 2151 } 2152 2153 static int __devinit tsi721_probe(struct pci_dev *pdev, 2154 const struct pci_device_id *id) 2155 { 2156 struct tsi721_device *priv; 2157 int i, cap; 2158 int err; 2159 u32 regval; 2160 2161 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); 2162 if (priv == NULL) { 2163 dev_err(&pdev->dev, "Failed to allocate memory for device\n"); 2164 err = -ENOMEM; 2165 goto err_exit; 2166 } 2167 2168 err = pci_enable_device(pdev); 2169 if (err) { 2170 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 2171 goto err_clean; 2172 } 2173 2174 priv->pdev = pdev; 2175 2176 #ifdef DEBUG 2177 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 2178 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", 2179 i, (unsigned long long)pci_resource_start(pdev, i), 2180 (unsigned long)pci_resource_len(pdev, i), 2181 pci_resource_flags(pdev, i)); 2182 } 2183 #endif 2184 /* 2185 * Verify BAR configuration 2186 */ 2187 2188 /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ 2189 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || 2190 pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || 2191 pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { 2192 dev_err(&pdev->dev, 2193 "Missing or misconfigured CSR BAR0, aborting.\n"); 2194 err = -ENODEV; 2195 goto err_disable_pdev; 2196 } 2197 2198 /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ 2199 if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || 2200 pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || 2201 pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { 2202 dev_err(&pdev->dev, 2203 "Missing or misconfigured Doorbell BAR1, aborting.\n"); 2204 err = -ENODEV; 2205 goto err_disable_pdev; 2206 } 2207 2208 /* 2209 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address 2210 * space. 2211 * NOTE: BAR_2 and BAR_4 are not used by this version of driver. 2212 * It may be a good idea to keep them disabled using HW configuration 2213 * to save PCI memory space. 2214 */ 2215 if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && 2216 (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { 2217 dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); 2218 } 2219 2220 if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && 2221 (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { 2222 dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); 2223 } 2224 2225 err = pci_request_regions(pdev, DRV_NAME); 2226 if (err) { 2227 dev_err(&pdev->dev, "Cannot obtain PCI resources, " 2228 "aborting.\n"); 2229 goto err_disable_pdev; 2230 } 2231 2232 pci_set_master(pdev); 2233 2234 priv->regs = pci_ioremap_bar(pdev, BAR_0); 2235 if (!priv->regs) { 2236 dev_err(&pdev->dev, 2237 "Unable to map device registers space, aborting\n"); 2238 err = -ENOMEM; 2239 goto err_free_res; 2240 } 2241 2242 priv->odb_base = pci_ioremap_bar(pdev, BAR_1); 2243 if (!priv->odb_base) { 2244 dev_err(&pdev->dev, 2245 "Unable to map outbound doorbells space, aborting\n"); 2246 err = -ENOMEM; 2247 goto err_unmap_bars; 2248 } 2249 2250 /* Configure DMA attributes. */ 2251 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2252 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 2253 dev_info(&pdev->dev, "Unable to set DMA mask\n"); 2254 goto err_unmap_bars; 2255 } 2256 2257 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2258 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2259 } else { 2260 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2261 if (err) 2262 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); 2263 } 2264 2265 cap = pci_pcie_cap(pdev); 2266 BUG_ON(cap == 0); 2267 2268 /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ 2269 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); 2270 regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | 2271 PCI_EXP_DEVCTL_NOSNOOP_EN); 2272 regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; 2273 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); 2274 2275 /* Adjust PCIe completion timeout. */ 2276 pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); 2277 regval &= ~(0x0f); 2278 pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); 2279 2280 /* 2281 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block 2282 */ 2283 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); 2284 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, 2285 TSI721_MSIXTBL_OFFSET); 2286 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, 2287 TSI721_MSIXPBA_OFFSET); 2288 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); 2289 /* End of FIXUP */ 2290 2291 tsi721_disable_ints(priv); 2292 2293 tsi721_init_pc2sr_mapping(priv); 2294 tsi721_init_sr2pc_mapping(priv); 2295 2296 if (tsi721_bdma_init(priv)) { 2297 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2298 err = -ENOMEM; 2299 goto err_unmap_bars; 2300 } 2301 2302 err = tsi721_doorbell_init(priv); 2303 if (err) 2304 goto err_free_bdma; 2305 2306 tsi721_port_write_init(priv); 2307 2308 err = tsi721_messages_init(priv); 2309 if (err) 2310 goto err_free_consistent; 2311 2312 err = tsi721_setup_mport(priv); 2313 if (err) 2314 goto err_free_consistent; 2315 2316 return 0; 2317 2318 err_free_consistent: 2319 tsi721_doorbell_free(priv); 2320 err_free_bdma: 2321 tsi721_bdma_free(priv); 2322 err_unmap_bars: 2323 if (priv->regs) 2324 iounmap(priv->regs); 2325 if (priv->odb_base) 2326 iounmap(priv->odb_base); 2327 err_free_res: 2328 pci_release_regions(pdev); 2329 pci_clear_master(pdev); 2330 err_disable_pdev: 2331 pci_disable_device(pdev); 2332 err_clean: 2333 kfree(priv); 2334 err_exit: 2335 return err; 2336 } 2337 2338 static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { 2339 { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, 2340 { 0, } /* terminate list */ 2341 }; 2342 2343 MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); 2344 2345 static struct pci_driver tsi721_driver = { 2346 .name = "tsi721", 2347 .id_table = tsi721_pci_tbl, 2348 .probe = tsi721_probe, 2349 }; 2350 2351 static int __init tsi721_init(void) 2352 { 2353 return pci_register_driver(&tsi721_driver); 2354 } 2355 2356 static void __exit tsi721_exit(void) 2357 { 2358 pci_unregister_driver(&tsi721_driver); 2359 } 2360 2361 device_initcall(tsi721_init); 2362