1 /* 2 * Freescale MPC85xx/MPC86xx RapidIO RMU support 3 * 4 * Copyright 2009 Sysgo AG 5 * Thomas Moll <thomas.moll@sysgo.com> 6 * - fixed maintenance access routines, check for aligned access 7 * 8 * Copyright 2009 Integrated Device Technology, Inc. 9 * Alex Bounine <alexandre.bounine@idt.com> 10 * - Added Port-Write message handling 11 * - Added Machine Check exception handling 12 * 13 * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc. 14 * Zhang Wei <wei.zhang@freescale.com> 15 * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com> 16 * Liu Gang <Gang.Liu@freescale.com> 17 * 18 * Copyright 2005 MontaVista Software, Inc. 19 * Matt Porter <mporter@kernel.crashing.org> 20 * 21 * This program is free software; you can redistribute it and/or modify it 22 * under the terms of the GNU General Public License as published by the 23 * Free Software Foundation; either version 2 of the License, or (at your 24 * option) any later version. 25 */ 26 27 #include <linux/types.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/interrupt.h> 30 #include <linux/of_irq.h> 31 #include <linux/of_platform.h> 32 #include <linux/slab.h> 33 34 #include "fsl_rio.h" 35 36 #define GET_RMM_HANDLE(mport) \ 37 (((struct rio_priv *)(mport->priv))->rmm_handle) 38 39 /* RapidIO definition irq, which read from OF-tree */ 40 #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq) 41 #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq) 42 #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq) 43 #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq) 44 45 #define RIO_MIN_TX_RING_SIZE 2 46 #define RIO_MAX_TX_RING_SIZE 2048 47 #define RIO_MIN_RX_RING_SIZE 2 48 #define RIO_MAX_RX_RING_SIZE 2048 49 50 #define RIO_IPWMR_SEN 0x00100000 51 #define RIO_IPWMR_QFIE 0x00000100 52 #define RIO_IPWMR_EIE 0x00000020 53 #define RIO_IPWMR_CQ 0x00000002 54 #define RIO_IPWMR_PWE 0x00000001 55 56 #define RIO_IPWSR_QF 0x00100000 57 #define RIO_IPWSR_TE 0x00000080 58 #define RIO_IPWSR_QFI 0x00000010 59 #define RIO_IPWSR_PWD 0x00000008 60 #define RIO_IPWSR_PWB 0x00000004 61 62 #define RIO_EPWISR 0x10010 63 /* EPWISR Error match value */ 64 #define RIO_EPWISR_PINT1 0x80000000 65 #define RIO_EPWISR_PINT2 0x40000000 66 #define RIO_EPWISR_MU 0x00000002 67 #define RIO_EPWISR_PW 0x00000001 68 69 #define IPWSR_CLEAR 0x98 70 #define OMSR_CLEAR 0x1cb3 71 #define IMSR_CLEAR 0x491 72 #define IDSR_CLEAR 0x91 73 #define ODSR_CLEAR 0x1c00 74 #define LTLEECSR_ENABLE_ALL 0xFFC000FC 75 #define RIO_LTLEECSR 0x060c 76 77 #define RIO_IM0SR 0x64 78 #define RIO_IM1SR 0x164 79 #define RIO_OM0SR 0x4 80 #define RIO_OM1SR 0x104 81 82 #define RIO_DBELL_WIN_SIZE 0x1000 83 84 #define RIO_MSG_OMR_MUI 0x00000002 85 #define RIO_MSG_OSR_TE 0x00000080 86 #define RIO_MSG_OSR_QOI 0x00000020 87 #define RIO_MSG_OSR_QFI 0x00000010 88 #define RIO_MSG_OSR_MUB 0x00000004 89 #define RIO_MSG_OSR_EOMI 0x00000002 90 #define RIO_MSG_OSR_QEI 0x00000001 91 92 #define RIO_MSG_IMR_MI 0x00000002 93 #define RIO_MSG_ISR_TE 0x00000080 94 #define RIO_MSG_ISR_QFI 0x00000010 95 #define RIO_MSG_ISR_DIQI 0x00000001 96 97 #define RIO_MSG_DESC_SIZE 32 98 #define RIO_MSG_BUFFER_SIZE 4096 99 100 #define DOORBELL_DMR_DI 0x00000002 101 #define DOORBELL_DSR_TE 0x00000080 102 #define DOORBELL_DSR_QFI 0x00000010 103 #define DOORBELL_DSR_DIQI 0x00000001 104 105 #define DOORBELL_MESSAGE_SIZE 0x08 106 107 struct rio_msg_regs { 108 u32 omr; 109 u32 osr; 110 u32 pad1; 111 u32 odqdpar; 112 u32 pad2; 113 u32 osar; 114 u32 odpr; 115 u32 odatr; 116 u32 odcr; 117 u32 pad3; 118 u32 odqepar; 119 u32 pad4[13]; 120 u32 imr; 121 u32 isr; 122 u32 pad5; 123 u32 ifqdpar; 124 u32 pad6; 125 u32 ifqepar; 126 }; 127 128 struct rio_dbell_regs { 129 u32 odmr; 130 u32 odsr; 131 u32 pad1[4]; 132 u32 oddpr; 133 u32 oddatr; 134 u32 pad2[3]; 135 u32 odretcr; 136 u32 pad3[12]; 137 u32 dmr; 138 u32 dsr; 139 u32 pad4; 140 u32 dqdpar; 141 u32 pad5; 142 u32 dqepar; 143 }; 144 145 struct rio_pw_regs { 146 u32 pwmr; 147 u32 pwsr; 148 u32 epwqbar; 149 u32 pwqbar; 150 }; 151 152 153 struct rio_tx_desc { 154 u32 pad1; 155 u32 saddr; 156 u32 dport; 157 u32 dattr; 158 u32 pad2; 159 u32 pad3; 160 u32 dwcnt; 161 u32 pad4; 162 }; 163 164 struct rio_msg_tx_ring { 165 void *virt; 166 dma_addr_t phys; 167 void *virt_buffer[RIO_MAX_TX_RING_SIZE]; 168 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; 169 int tx_slot; 170 int size; 171 void *dev_id; 172 }; 173 174 struct rio_msg_rx_ring { 175 void *virt; 176 dma_addr_t phys; 177 void *virt_buffer[RIO_MAX_RX_RING_SIZE]; 178 int rx_slot; 179 int size; 180 void *dev_id; 181 }; 182 183 struct fsl_rmu { 184 struct rio_msg_regs __iomem *msg_regs; 185 struct rio_msg_tx_ring msg_tx_ring; 186 struct rio_msg_rx_ring msg_rx_ring; 187 int txirq; 188 int rxirq; 189 }; 190 191 struct rio_dbell_msg { 192 u16 pad1; 193 u16 tid; 194 u16 sid; 195 u16 info; 196 }; 197 198 /** 199 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 200 * @irq: Linux interrupt number 201 * @dev_instance: Pointer to interrupt-specific data 202 * 203 * Handles outbound message interrupts. Executes a register outbound 204 * mailbox event handler and acks the interrupt occurrence. 205 */ 206 static irqreturn_t 207 fsl_rio_tx_handler(int irq, void *dev_instance) 208 { 209 int osr; 210 struct rio_mport *port = (struct rio_mport *)dev_instance; 211 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 212 213 osr = in_be32(&rmu->msg_regs->osr); 214 215 if (osr & RIO_MSG_OSR_TE) { 216 pr_info("RIO: outbound message transmission error\n"); 217 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE); 218 goto out; 219 } 220 221 if (osr & RIO_MSG_OSR_QOI) { 222 pr_info("RIO: outbound message queue overflow\n"); 223 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI); 224 goto out; 225 } 226 227 if (osr & RIO_MSG_OSR_EOMI) { 228 u32 dqp = in_be32(&rmu->msg_regs->odqdpar); 229 int slot = (dqp - rmu->msg_tx_ring.phys) >> 5; 230 if (port->outb_msg[0].mcback != NULL) { 231 port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id, 232 -1, 233 slot); 234 } 235 /* Ack the end-of-message interrupt */ 236 out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI); 237 } 238 239 out: 240 return IRQ_HANDLED; 241 } 242 243 /** 244 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler 245 * @irq: Linux interrupt number 246 * @dev_instance: Pointer to interrupt-specific data 247 * 248 * Handles inbound message interrupts. Executes a registered inbound 249 * mailbox event handler and acks the interrupt occurrence. 250 */ 251 static irqreturn_t 252 fsl_rio_rx_handler(int irq, void *dev_instance) 253 { 254 int isr; 255 struct rio_mport *port = (struct rio_mport *)dev_instance; 256 struct fsl_rmu *rmu = GET_RMM_HANDLE(port); 257 258 isr = in_be32(&rmu->msg_regs->isr); 259 260 if (isr & RIO_MSG_ISR_TE) { 261 pr_info("RIO: inbound message reception error\n"); 262 out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE); 263 goto out; 264 } 265 266 /* XXX Need to check/dispatch until queue empty */ 267 if (isr & RIO_MSG_ISR_DIQI) { 268 /* 269 * Can receive messages for any mailbox/letter to that 270 * mailbox destination. So, make the callback with an 271 * unknown/invalid mailbox number argument. 272 */ 273 if (port->inb_msg[0].mcback != NULL) 274 port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id, 275 -1, 276 -1); 277 278 /* Ack the queueing interrupt */ 279 out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI); 280 } 281 282 out: 283 return IRQ_HANDLED; 284 } 285 286 /** 287 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler 288 * @irq: Linux interrupt number 289 * @dev_instance: Pointer to interrupt-specific data 290 * 291 * Handles doorbell interrupts. Parses a list of registered 292 * doorbell event handlers and executes a matching event handler. 293 */ 294 static irqreturn_t 295 fsl_rio_dbell_handler(int irq, void *dev_instance) 296 { 297 int dsr; 298 struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance; 299 int i; 300 301 dsr = in_be32(&fsl_dbell->dbell_regs->dsr); 302 303 if (dsr & DOORBELL_DSR_TE) { 304 pr_info("RIO: doorbell reception error\n"); 305 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE); 306 goto out; 307 } 308 309 if (dsr & DOORBELL_DSR_QFI) { 310 pr_info("RIO: doorbell queue full\n"); 311 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI); 312 } 313 314 /* XXX Need to check/dispatch until queue empty */ 315 if (dsr & DOORBELL_DSR_DIQI) { 316 struct rio_dbell_msg *dmsg = 317 fsl_dbell->dbell_ring.virt + 318 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); 319 struct rio_dbell *dbell; 320 int found = 0; 321 322 pr_debug 323 ("RIO: processing doorbell," 324 " sid %2.2x tid %2.2x info %4.4x\n", 325 dmsg->sid, dmsg->tid, dmsg->info); 326 327 for (i = 0; i < MAX_PORT_NUM; i++) { 328 if (fsl_dbell->mport[i]) { 329 list_for_each_entry(dbell, 330 &fsl_dbell->mport[i]->dbells, node) { 331 if ((dbell->res->start 332 <= dmsg->info) 333 && (dbell->res->end 334 >= dmsg->info)) { 335 found = 1; 336 break; 337 } 338 } 339 if (found && dbell->dinb) { 340 dbell->dinb(fsl_dbell->mport[i], 341 dbell->dev_id, dmsg->sid, 342 dmsg->tid, 343 dmsg->info); 344 break; 345 } 346 } 347 } 348 349 if (!found) { 350 pr_debug 351 ("RIO: spurious doorbell," 352 " sid %2.2x tid %2.2x info %4.4x\n", 353 dmsg->sid, dmsg->tid, 354 dmsg->info); 355 } 356 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); 357 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); 358 } 359 360 out: 361 return IRQ_HANDLED; 362 } 363 364 void msg_unit_error_handler(void) 365 { 366 367 /*XXX: Error recovery is not implemented, we just clear errors */ 368 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); 369 370 out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR); 371 out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR); 372 out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR); 373 out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR); 374 375 out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR); 376 out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR); 377 378 out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR); 379 } 380 381 /** 382 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 383 * @irq: Linux interrupt number 384 * @dev_instance: Pointer to interrupt-specific data 385 * 386 * Handles port write interrupts. Parses a list of registered 387 * port write event handlers and executes a matching event handler. 388 */ 389 static irqreturn_t 390 fsl_rio_port_write_handler(int irq, void *dev_instance) 391 { 392 u32 ipwmr, ipwsr; 393 struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance; 394 u32 epwisr, tmp; 395 396 epwisr = in_be32(rio_regs_win + RIO_EPWISR); 397 if (!(epwisr & RIO_EPWISR_PW)) 398 goto pw_done; 399 400 ipwmr = in_be32(&pw->pw_regs->pwmr); 401 ipwsr = in_be32(&pw->pw_regs->pwsr); 402 403 #ifdef DEBUG_PW 404 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); 405 if (ipwsr & RIO_IPWSR_QF) 406 pr_debug(" QF"); 407 if (ipwsr & RIO_IPWSR_TE) 408 pr_debug(" TE"); 409 if (ipwsr & RIO_IPWSR_QFI) 410 pr_debug(" QFI"); 411 if (ipwsr & RIO_IPWSR_PWD) 412 pr_debug(" PWD"); 413 if (ipwsr & RIO_IPWSR_PWB) 414 pr_debug(" PWB"); 415 pr_debug(" )\n"); 416 #endif 417 /* Schedule deferred processing if PW was received */ 418 if (ipwsr & RIO_IPWSR_QFI) { 419 /* Save PW message (if there is room in FIFO), 420 * otherwise discard it. 421 */ 422 if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) { 423 pw->port_write_msg.msg_count++; 424 kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt, 425 RIO_PW_MSG_SIZE); 426 } else { 427 pw->port_write_msg.discard_count++; 428 pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", 429 pw->port_write_msg.discard_count); 430 } 431 /* Clear interrupt and issue Clear Queue command. This allows 432 * another port-write to be received. 433 */ 434 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI); 435 out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ); 436 437 schedule_work(&pw->pw_work); 438 } 439 440 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { 441 pw->port_write_msg.err_count++; 442 pr_debug("RIO: Port-Write Transaction Err (%d)\n", 443 pw->port_write_msg.err_count); 444 /* Clear Transaction Error: port-write controller should be 445 * disabled when clearing this error 446 */ 447 out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); 448 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE); 449 out_be32(&pw->pw_regs->pwmr, ipwmr); 450 } 451 452 if (ipwsr & RIO_IPWSR_PWD) { 453 pw->port_write_msg.discard_count++; 454 pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", 455 pw->port_write_msg.discard_count); 456 out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD); 457 } 458 459 pw_done: 460 if (epwisr & RIO_EPWISR_PINT1) { 461 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 462 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 463 fsl_rio_port_error_handler(0); 464 } 465 466 if (epwisr & RIO_EPWISR_PINT2) { 467 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 468 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 469 fsl_rio_port_error_handler(1); 470 } 471 472 if (epwisr & RIO_EPWISR_MU) { 473 tmp = in_be32(rio_regs_win + RIO_LTLEDCSR); 474 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 475 msg_unit_error_handler(); 476 } 477 478 return IRQ_HANDLED; 479 } 480 481 static void fsl_pw_dpc(struct work_struct *work) 482 { 483 struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work); 484 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; 485 486 /* 487 * Process port-write messages 488 */ 489 while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)msg_buffer, 490 RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) { 491 /* Process one message */ 492 #ifdef DEBUG_PW 493 { 494 u32 i; 495 pr_debug("%s : Port-Write Message:", __func__); 496 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { 497 if ((i%4) == 0) 498 pr_debug("\n0x%02x: 0x%08x", i*4, 499 msg_buffer[i]); 500 else 501 pr_debug(" 0x%08x", msg_buffer[i]); 502 } 503 pr_debug("\n"); 504 } 505 #endif 506 /* Pass the port-write message to RIO core for processing */ 507 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); 508 } 509 } 510 511 /** 512 * fsl_rio_pw_enable - enable/disable port-write interface init 513 * @mport: Master port implementing the port write unit 514 * @enable: 1=enable; 0=disable port-write message handling 515 */ 516 int fsl_rio_pw_enable(struct rio_mport *mport, int enable) 517 { 518 u32 rval; 519 520 rval = in_be32(&pw->pw_regs->pwmr); 521 522 if (enable) 523 rval |= RIO_IPWMR_PWE; 524 else 525 rval &= ~RIO_IPWMR_PWE; 526 527 out_be32(&pw->pw_regs->pwmr, rval); 528 529 return 0; 530 } 531 532 /** 533 * fsl_rio_port_write_init - MPC85xx port write interface init 534 * @mport: Master port implementing the port write unit 535 * 536 * Initializes port write unit hardware and DMA buffer 537 * ring. Called from fsl_rio_setup(). Returns %0 on success 538 * or %-ENOMEM on failure. 539 */ 540 541 int fsl_rio_port_write_init(struct fsl_rio_pw *pw) 542 { 543 int rc = 0; 544 545 /* Following configurations require a disabled port write controller */ 546 out_be32(&pw->pw_regs->pwmr, 547 in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE); 548 549 /* Initialize port write */ 550 pw->port_write_msg.virt = dma_alloc_coherent(pw->dev, 551 RIO_PW_MSG_SIZE, 552 &pw->port_write_msg.phys, GFP_KERNEL); 553 if (!pw->port_write_msg.virt) { 554 pr_err("RIO: unable allocate port write queue\n"); 555 return -ENOMEM; 556 } 557 558 pw->port_write_msg.err_count = 0; 559 pw->port_write_msg.discard_count = 0; 560 561 /* Point dequeue/enqueue pointers at first entry */ 562 out_be32(&pw->pw_regs->epwqbar, 0); 563 out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys); 564 565 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", 566 in_be32(&pw->pw_regs->epwqbar), 567 in_be32(&pw->pw_regs->pwqbar)); 568 569 /* Clear interrupt status IPWSR */ 570 out_be32(&pw->pw_regs->pwsr, 571 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); 572 573 /* Configure port write contoller for snooping enable all reporting, 574 clear queue full */ 575 out_be32(&pw->pw_regs->pwmr, 576 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); 577 578 579 /* Hook up port-write handler */ 580 rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler, 581 IRQF_SHARED, "port-write", (void *)pw); 582 if (rc < 0) { 583 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 584 goto err_out; 585 } 586 /* Enable Error Interrupt */ 587 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); 588 589 INIT_WORK(&pw->pw_work, fsl_pw_dpc); 590 spin_lock_init(&pw->pw_fifo_lock); 591 if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { 592 pr_err("FIFO allocation failed\n"); 593 rc = -ENOMEM; 594 goto err_out_irq; 595 } 596 597 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", 598 in_be32(&pw->pw_regs->pwmr), 599 in_be32(&pw->pw_regs->pwsr)); 600 601 return rc; 602 603 err_out_irq: 604 free_irq(IRQ_RIO_PW(pw), (void *)pw); 605 err_out: 606 dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE, 607 pw->port_write_msg.virt, 608 pw->port_write_msg.phys); 609 return rc; 610 } 611 612 /** 613 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 614 * @mport: RapidIO master port info 615 * @index: ID of RapidIO interface 616 * @destid: Destination ID of target device 617 * @data: 16-bit info field of RapidIO doorbell message 618 * 619 * Sends a MPC85xx doorbell message. Returns %0 on success or 620 * %-EINVAL on failure. 621 */ 622 int fsl_rio_doorbell_send(struct rio_mport *mport, 623 int index, u16 destid, u16 data) 624 { 625 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", 626 index, destid, data); 627 628 /* In the serial version silicons, such as MPC8548, MPC8641, 629 * below operations is must be. 630 */ 631 out_be32(&dbell->dbell_regs->odmr, 0x00000000); 632 out_be32(&dbell->dbell_regs->odretcr, 0x00000004); 633 out_be32(&dbell->dbell_regs->oddpr, destid << 16); 634 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); 635 out_be32(&dbell->dbell_regs->odmr, 0x00000001); 636 637 return 0; 638 } 639 640 /** 641 * fsl_add_outb_message - Add message to the MPC85xx outbound message queue 642 * @mport: Master port with outbound message queue 643 * @rdev: Target of outbound message 644 * @mbox: Outbound mailbox 645 * @buffer: Message to add to outbound queue 646 * @len: Length of message 647 * 648 * Adds the @buffer message to the MPC85xx outbound message queue. Returns 649 * %0 on success or %-EINVAL on failure. 650 */ 651 int 652 fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, 653 void *buffer, size_t len) 654 { 655 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 656 u32 omr; 657 struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt 658 + rmu->msg_tx_ring.tx_slot; 659 int ret = 0; 660 661 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ 662 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len); 663 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 664 ret = -EINVAL; 665 goto out; 666 } 667 668 /* Copy and clear rest of buffer */ 669 memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer, 670 len); 671 if (len < (RIO_MAX_MSG_SIZE - 4)) 672 memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot] 673 + len, 0, RIO_MAX_MSG_SIZE - len); 674 675 /* Set mbox field for message, and set destid */ 676 desc->dport = (rdev->destid << 16) | (mbox & 0x3); 677 678 /* Enable EOMI interrupt and priority */ 679 desc->dattr = 0x28000000 | ((mport->index) << 20); 680 681 /* Set transfer size aligned to next power of 2 (in double words) */ 682 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); 683 684 /* Set snooping and source buffer address */ 685 desc->saddr = 0x00000004 686 | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot]; 687 688 /* Increment enqueue pointer */ 689 omr = in_be32(&rmu->msg_regs->omr); 690 out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI); 691 692 /* Go to next descriptor */ 693 if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size) 694 rmu->msg_tx_ring.tx_slot = 0; 695 696 out: 697 return ret; 698 } 699 700 /** 701 * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox 702 * @mport: Master port implementing the outbound message unit 703 * @dev_id: Device specific pointer to pass on event 704 * @mbox: Mailbox to open 705 * @entries: Number of entries in the outbound mailbox ring 706 * 707 * Initializes buffer ring, request the outbound message interrupt, 708 * and enables the outbound message unit. Returns %0 on success and 709 * %-EINVAL or %-ENOMEM on failure. 710 */ 711 int 712 fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 713 { 714 int i, j, rc = 0; 715 struct rio_priv *priv = mport->priv; 716 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 717 718 if ((entries < RIO_MIN_TX_RING_SIZE) || 719 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { 720 rc = -EINVAL; 721 goto out; 722 } 723 724 /* Initialize shadow copy ring */ 725 rmu->msg_tx_ring.dev_id = dev_id; 726 rmu->msg_tx_ring.size = entries; 727 728 for (i = 0; i < rmu->msg_tx_ring.size; i++) { 729 rmu->msg_tx_ring.virt_buffer[i] = 730 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 731 &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL); 732 if (!rmu->msg_tx_ring.virt_buffer[i]) { 733 rc = -ENOMEM; 734 for (j = 0; j < rmu->msg_tx_ring.size; j++) 735 if (rmu->msg_tx_ring.virt_buffer[j]) 736 dma_free_coherent(priv->dev, 737 RIO_MSG_BUFFER_SIZE, 738 rmu->msg_tx_ring. 739 virt_buffer[j], 740 rmu->msg_tx_ring. 741 phys_buffer[j]); 742 goto out; 743 } 744 } 745 746 /* Initialize outbound message descriptor ring */ 747 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, 748 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 749 &rmu->msg_tx_ring.phys, GFP_KERNEL); 750 if (!rmu->msg_tx_ring.virt) { 751 rc = -ENOMEM; 752 goto out_dma; 753 } 754 memset(rmu->msg_tx_ring.virt, 0, 755 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE); 756 rmu->msg_tx_ring.tx_slot = 0; 757 758 /* Point dequeue/enqueue pointers at first entry in ring */ 759 out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys); 760 out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys); 761 762 /* Configure for snooping */ 763 out_be32(&rmu->msg_regs->osar, 0x00000004); 764 765 /* Clear interrupt status */ 766 out_be32(&rmu->msg_regs->osr, 0x000000b3); 767 768 /* Hook up outbound message handler */ 769 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, 770 "msg_tx", (void *)mport); 771 if (rc < 0) 772 goto out_irq; 773 774 /* 775 * Configure outbound message unit 776 * Snooping 777 * Interrupts (all enabled, except QEIE) 778 * Chaining mode 779 * Disable 780 */ 781 out_be32(&rmu->msg_regs->omr, 0x00100220); 782 783 /* Set number of entries */ 784 out_be32(&rmu->msg_regs->omr, 785 in_be32(&rmu->msg_regs->omr) | 786 ((get_bitmask_order(entries) - 2) << 12)); 787 788 /* Now enable the unit */ 789 out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1); 790 791 out: 792 return rc; 793 794 out_irq: 795 dma_free_coherent(priv->dev, 796 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 797 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 798 799 out_dma: 800 for (i = 0; i < rmu->msg_tx_ring.size; i++) 801 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, 802 rmu->msg_tx_ring.virt_buffer[i], 803 rmu->msg_tx_ring.phys_buffer[i]); 804 805 return rc; 806 } 807 808 /** 809 * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox 810 * @mport: Master port implementing the outbound message unit 811 * @mbox: Mailbox to close 812 * 813 * Disables the outbound message unit, free all buffers, and 814 * frees the outbound message interrupt. 815 */ 816 void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) 817 { 818 struct rio_priv *priv = mport->priv; 819 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 820 821 /* Disable inbound message unit */ 822 out_be32(&rmu->msg_regs->omr, 0); 823 824 /* Free ring */ 825 dma_free_coherent(priv->dev, 826 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 827 rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys); 828 829 /* Free interrupt */ 830 free_irq(IRQ_RIO_TX(mport), (void *)mport); 831 } 832 833 /** 834 * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox 835 * @mport: Master port implementing the inbound message unit 836 * @dev_id: Device specific pointer to pass on event 837 * @mbox: Mailbox to open 838 * @entries: Number of entries in the inbound mailbox ring 839 * 840 * Initializes buffer ring, request the inbound message interrupt, 841 * and enables the inbound message unit. Returns %0 on success 842 * and %-EINVAL or %-ENOMEM on failure. 843 */ 844 int 845 fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) 846 { 847 int i, rc = 0; 848 struct rio_priv *priv = mport->priv; 849 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 850 851 if ((entries < RIO_MIN_RX_RING_SIZE) || 852 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { 853 rc = -EINVAL; 854 goto out; 855 } 856 857 /* Initialize client buffer ring */ 858 rmu->msg_rx_ring.dev_id = dev_id; 859 rmu->msg_rx_ring.size = entries; 860 rmu->msg_rx_ring.rx_slot = 0; 861 for (i = 0; i < rmu->msg_rx_ring.size; i++) 862 rmu->msg_rx_ring.virt_buffer[i] = NULL; 863 864 /* Initialize inbound message ring */ 865 rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, 866 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 867 &rmu->msg_rx_ring.phys, GFP_KERNEL); 868 if (!rmu->msg_rx_ring.virt) { 869 rc = -ENOMEM; 870 goto out; 871 } 872 873 /* Point dequeue/enqueue pointers at first entry in ring */ 874 out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys); 875 out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys); 876 877 /* Clear interrupt status */ 878 out_be32(&rmu->msg_regs->isr, 0x00000091); 879 880 /* Hook up inbound message handler */ 881 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, 882 "msg_rx", (void *)mport); 883 if (rc < 0) { 884 dma_free_coherent(priv->dev, 885 rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 886 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); 887 goto out; 888 } 889 890 /* 891 * Configure inbound message unit: 892 * Snooping 893 * 4KB max message size 894 * Unmask all interrupt sources 895 * Disable 896 */ 897 out_be32(&rmu->msg_regs->imr, 0x001b0060); 898 899 /* Set number of queue entries */ 900 setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); 901 902 /* Now enable the unit */ 903 setbits32(&rmu->msg_regs->imr, 0x1); 904 905 out: 906 return rc; 907 } 908 909 /** 910 * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox 911 * @mport: Master port implementing the inbound message unit 912 * @mbox: Mailbox to close 913 * 914 * Disables the inbound message unit, free all buffers, and 915 * frees the inbound message interrupt. 916 */ 917 void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) 918 { 919 struct rio_priv *priv = mport->priv; 920 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 921 922 /* Disable inbound message unit */ 923 out_be32(&rmu->msg_regs->imr, 0); 924 925 /* Free ring */ 926 dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE, 927 rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys); 928 929 /* Free interrupt */ 930 free_irq(IRQ_RIO_RX(mport), (void *)mport); 931 } 932 933 /** 934 * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue 935 * @mport: Master port implementing the inbound message unit 936 * @mbox: Inbound mailbox number 937 * @buf: Buffer to add to inbound queue 938 * 939 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns 940 * %0 on success or %-EINVAL on failure. 941 */ 942 int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) 943 { 944 int rc = 0; 945 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 946 947 pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", 948 rmu->msg_rx_ring.rx_slot); 949 950 if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) { 951 printk(KERN_ERR 952 "RIO: error adding inbound buffer %d, buffer exists\n", 953 rmu->msg_rx_ring.rx_slot); 954 rc = -EINVAL; 955 goto out; 956 } 957 958 rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf; 959 if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size) 960 rmu->msg_rx_ring.rx_slot = 0; 961 962 out: 963 return rc; 964 } 965 966 /** 967 * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit 968 * @mport: Master port implementing the inbound message unit 969 * @mbox: Inbound mailbox number 970 * 971 * Gets the next available inbound message from the inbound message queue. 972 * A pointer to the message is returned on success or NULL on failure. 973 */ 974 void *fsl_get_inb_message(struct rio_mport *mport, int mbox) 975 { 976 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 977 u32 phys_buf; 978 void *virt_buf; 979 void *buf = NULL; 980 int buf_idx; 981 982 phys_buf = in_be32(&rmu->msg_regs->ifqdpar); 983 984 /* If no more messages, then bail out */ 985 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) 986 goto out2; 987 988 virt_buf = rmu->msg_rx_ring.virt + (phys_buf 989 - rmu->msg_rx_ring.phys); 990 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; 991 buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; 992 993 if (!buf) { 994 printk(KERN_ERR 995 "RIO: inbound message copy failed, no buffers\n"); 996 goto out1; 997 } 998 999 /* Copy max message size, caller is expected to allocate that big */ 1000 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE); 1001 1002 /* Clear the available buffer */ 1003 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; 1004 1005 out1: 1006 setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI); 1007 1008 out2: 1009 return buf; 1010 } 1011 1012 /** 1013 * fsl_rio_doorbell_init - MPC85xx doorbell interface init 1014 * @mport: Master port implementing the inbound doorbell unit 1015 * 1016 * Initializes doorbell unit hardware and inbound DMA buffer 1017 * ring. Called from fsl_rio_setup(). Returns %0 on success 1018 * or %-ENOMEM on failure. 1019 */ 1020 int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell) 1021 { 1022 int rc = 0; 1023 1024 /* Initialize inbound doorbells */ 1025 dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 * 1026 DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL); 1027 if (!dbell->dbell_ring.virt) { 1028 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); 1029 rc = -ENOMEM; 1030 goto out; 1031 } 1032 1033 /* Point dequeue/enqueue pointers at first entry in ring */ 1034 out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys); 1035 out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys); 1036 1037 /* Clear interrupt status */ 1038 out_be32(&dbell->dbell_regs->dsr, 0x00000091); 1039 1040 /* Hook up doorbell handler */ 1041 rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0, 1042 "dbell_rx", (void *)dbell); 1043 if (rc < 0) { 1044 dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE, 1045 dbell->dbell_ring.virt, dbell->dbell_ring.phys); 1046 printk(KERN_ERR 1047 "MPC85xx RIO: unable to request inbound doorbell irq"); 1048 goto out; 1049 } 1050 1051 /* Configure doorbells for snooping, 512 entries, and enable */ 1052 out_be32(&dbell->dbell_regs->dmr, 0x00108161); 1053 1054 out: 1055 return rc; 1056 } 1057 1058 int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) 1059 { 1060 struct rio_priv *priv; 1061 struct fsl_rmu *rmu; 1062 u64 msg_start; 1063 const u32 *msg_addr; 1064 int mlen; 1065 int aw; 1066 1067 if (!mport || !mport->priv) 1068 return -EINVAL; 1069 1070 priv = mport->priv; 1071 1072 if (!node) { 1073 dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n", 1074 priv->dev->of_node->full_name); 1075 return -EINVAL; 1076 } 1077 1078 rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL); 1079 if (!rmu) 1080 return -ENOMEM; 1081 1082 aw = of_n_addr_cells(node); 1083 msg_addr = of_get_property(node, "reg", &mlen); 1084 if (!msg_addr) { 1085 pr_err("%s: unable to find 'reg' property of message-unit\n", 1086 node->full_name); 1087 kfree(rmu); 1088 return -ENOMEM; 1089 } 1090 msg_start = of_read_number(msg_addr, aw); 1091 1092 rmu->msg_regs = (struct rio_msg_regs *) 1093 (rmu_regs_win + (u32)msg_start); 1094 1095 rmu->txirq = irq_of_parse_and_map(node, 0); 1096 rmu->rxirq = irq_of_parse_and_map(node, 1); 1097 printk(KERN_INFO "%s: txirq: %d, rxirq %d\n", 1098 node->full_name, rmu->txirq, rmu->rxirq); 1099 1100 priv->rmm_handle = rmu; 1101 1102 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1103 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1104 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); 1105 1106 return 0; 1107 } 1108