1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */ 97 98 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 99 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 100 101 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 102 103 /* An entry in an event ring */ 104 struct gsi_event { 105 __le64 xfer_ptr; 106 __le16 len; 107 u8 reserved1; 108 u8 code; 109 __le16 reserved2; 110 u8 type; 111 u8 chid; 112 }; 113 114 /** gsi_channel_scratch_gpi - GPI protocol scratch register 115 * @max_outstanding_tre: 116 * Defines the maximum number of TREs allowed in a single transaction 117 * on a channel (in bytes). This determines the amount of prefetch 118 * performed by the hardware. We configure this to equal the size of 119 * the TLV FIFO for the channel. 120 * @outstanding_threshold: 121 * Defines the threshold (in bytes) determining when the sequencer 122 * should update the channel doorbell. We configure this to equal 123 * the size of two TREs. 124 */ 125 struct gsi_channel_scratch_gpi { 126 u64 reserved1; 127 u16 reserved2; 128 u16 max_outstanding_tre; 129 u16 reserved3; 130 u16 outstanding_threshold; 131 }; 132 133 /** gsi_channel_scratch - channel scratch configuration area 134 * 135 * The exact interpretation of this register is protocol-specific. 136 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 137 */ 138 union gsi_channel_scratch { 139 struct gsi_channel_scratch_gpi gpi; 140 struct { 141 u32 word1; 142 u32 word2; 143 u32 word3; 144 u32 word4; 145 } data; 146 }; 147 148 /* Check things that can be validated at build time. */ 149 static void gsi_validate_build(void) 150 { 151 /* This is used as a divisor */ 152 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 153 154 /* Code assumes the size of channel and event ring element are 155 * the same (and fixed). Make sure the size of an event ring 156 * element is what's expected. 157 */ 158 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 159 160 /* Hardware requires a 2^n ring size. We ensure the number of 161 * elements in an event ring is a power of 2 elsewhere; this 162 * ensure the elements themselves meet the requirement. 163 */ 164 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 165 166 /* The channel element size must fit in this field */ 167 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 168 169 /* The event ring element size must fit in this field */ 170 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 171 } 172 173 /* Return the channel id associated with a given channel */ 174 static u32 gsi_channel_id(struct gsi_channel *channel) 175 { 176 return channel - &channel->gsi->channel[0]; 177 } 178 179 /* An initialized channel has a non-null GSI pointer */ 180 static bool gsi_channel_initialized(struct gsi_channel *channel) 181 { 182 return !!channel->gsi; 183 } 184 185 /* Update the GSI IRQ type register with the cached value */ 186 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 187 { 188 gsi->type_enabled_bitmap = val; 189 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 190 } 191 192 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 193 { 194 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 195 } 196 197 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 198 { 199 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 200 } 201 202 /* Event ring commands are performed one at a time. Their completion 203 * is signaled by the event ring control GSI interrupt type, which is 204 * only enabled when we issue an event ring command. Only the event 205 * ring being operated on has this interrupt enabled. 206 */ 207 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 208 { 209 u32 val = BIT(evt_ring_id); 210 211 /* There's a small chance that a previous command completed 212 * after the interrupt was disabled, so make sure we have no 213 * pending interrupts before we enable them. 214 */ 215 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 216 217 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 218 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 219 } 220 221 /* Disable event ring control interrupts */ 222 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 223 { 224 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 225 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 226 } 227 228 /* Channel commands are performed one at a time. Their completion is 229 * signaled by the channel control GSI interrupt type, which is only 230 * enabled when we issue a channel command. Only the channel being 231 * operated on has this interrupt enabled. 232 */ 233 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 234 { 235 u32 val = BIT(channel_id); 236 237 /* There's a small chance that a previous command completed 238 * after the interrupt was disabled, so make sure we have no 239 * pending interrupts before we enable them. 240 */ 241 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 242 243 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 244 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 245 } 246 247 /* Disable channel control interrupts */ 248 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 249 { 250 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 251 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 252 } 253 254 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) 255 { 256 bool enable_ieob = !gsi->ieob_enabled_bitmap; 257 u32 val; 258 259 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 260 val = gsi->ieob_enabled_bitmap; 261 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 262 263 /* Enable the interrupt type if this is the first channel enabled */ 264 if (enable_ieob) 265 gsi_irq_type_enable(gsi, GSI_IEOB); 266 } 267 268 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) 269 { 270 u32 val; 271 272 gsi->ieob_enabled_bitmap &= ~event_mask; 273 274 /* Disable the interrupt type if this was the last enabled channel */ 275 if (!gsi->ieob_enabled_bitmap) 276 gsi_irq_type_disable(gsi, GSI_IEOB); 277 278 val = gsi->ieob_enabled_bitmap; 279 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 280 } 281 282 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) 283 { 284 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); 285 } 286 287 /* Enable all GSI_interrupt types */ 288 static void gsi_irq_enable(struct gsi *gsi) 289 { 290 u32 val; 291 292 /* Global interrupts include hardware error reports. Enable 293 * that so we can at least report the error should it occur. 294 */ 295 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 296 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 297 298 /* General GSI interrupts are reported to all EEs; if they occur 299 * they are unrecoverable (without reset). A breakpoint interrupt 300 * also exists, but we don't support that. We want to be notified 301 * of errors so we can report them, even if they can't be handled. 302 */ 303 val = BIT(BUS_ERROR); 304 val |= BIT(CMD_FIFO_OVRFLOW); 305 val |= BIT(MCS_STACK_OVRFLOW); 306 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 307 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 308 } 309 310 /* Disable all GSI interrupt types */ 311 static void gsi_irq_disable(struct gsi *gsi) 312 { 313 gsi_irq_type_update(gsi, 0); 314 315 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 316 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 317 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 318 } 319 320 /* Return the virtual address associated with a ring index */ 321 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 322 { 323 /* Note: index *must* be used modulo the ring count here */ 324 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 325 } 326 327 /* Return the 32-bit DMA address associated with a ring index */ 328 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 329 { 330 return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; 331 } 332 333 /* Return the ring index of a 32-bit ring offset */ 334 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 335 { 336 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 337 } 338 339 /* Issue a GSI command by writing a value to a register, then wait for 340 * completion to be signaled. Returns true if the command completes 341 * or false if it times out. 342 */ 343 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) 344 { 345 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 346 struct completion *completion = &gsi->completion; 347 348 reinit_completion(completion); 349 350 iowrite32(val, gsi->virt + reg); 351 352 return !!wait_for_completion_timeout(completion, timeout); 353 } 354 355 /* Return the hardware's notion of the current state of an event ring */ 356 static enum gsi_evt_ring_state 357 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 358 { 359 u32 val; 360 361 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 362 363 return u32_get_bits(val, EV_CHSTATE_FMASK); 364 } 365 366 /* Issue an event ring command and wait for it to complete */ 367 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 368 enum gsi_evt_cmd_opcode opcode) 369 { 370 struct device *dev = gsi->dev; 371 bool timeout; 372 u32 val; 373 374 /* Enable the completion interrupt for the command */ 375 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 376 377 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 378 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 379 380 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val); 381 382 gsi_irq_ev_ctrl_disable(gsi); 383 384 if (!timeout) 385 return; 386 387 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 388 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); 389 } 390 391 /* Allocate an event ring in NOT_ALLOCATED state */ 392 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 393 { 394 enum gsi_evt_ring_state state; 395 396 /* Get initial event ring state */ 397 state = gsi_evt_ring_state(gsi, evt_ring_id); 398 if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 399 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 400 evt_ring_id, state); 401 return -EINVAL; 402 } 403 404 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 405 406 /* If successful the event ring state will have changed */ 407 state = gsi_evt_ring_state(gsi, evt_ring_id); 408 if (state == GSI_EVT_RING_STATE_ALLOCATED) 409 return 0; 410 411 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 412 evt_ring_id, state); 413 414 return -EIO; 415 } 416 417 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 418 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 419 { 420 enum gsi_evt_ring_state state; 421 422 state = gsi_evt_ring_state(gsi, evt_ring_id); 423 if (state != GSI_EVT_RING_STATE_ALLOCATED && 424 state != GSI_EVT_RING_STATE_ERROR) { 425 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 426 evt_ring_id, state); 427 return; 428 } 429 430 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 431 432 /* If successful the event ring state will have changed */ 433 state = gsi_evt_ring_state(gsi, evt_ring_id); 434 if (state == GSI_EVT_RING_STATE_ALLOCATED) 435 return; 436 437 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 438 evt_ring_id, state); 439 } 440 441 /* Issue a hardware de-allocation request for an allocated event ring */ 442 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 443 { 444 enum gsi_evt_ring_state state; 445 446 state = gsi_evt_ring_state(gsi, evt_ring_id); 447 if (state != GSI_EVT_RING_STATE_ALLOCATED) { 448 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 449 evt_ring_id, state); 450 return; 451 } 452 453 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 454 455 /* If successful the event ring state will have changed */ 456 state = gsi_evt_ring_state(gsi, evt_ring_id); 457 if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 458 return; 459 460 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 461 evt_ring_id, state); 462 } 463 464 /* Fetch the current state of a channel from hardware */ 465 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 466 { 467 u32 channel_id = gsi_channel_id(channel); 468 void __iomem *virt = channel->gsi->virt; 469 u32 val; 470 471 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 472 473 return u32_get_bits(val, CHSTATE_FMASK); 474 } 475 476 /* Issue a channel command and wait for it to complete */ 477 static void 478 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 479 { 480 u32 channel_id = gsi_channel_id(channel); 481 struct gsi *gsi = channel->gsi; 482 struct device *dev = gsi->dev; 483 bool timeout; 484 u32 val; 485 486 /* Enable the completion interrupt for the command */ 487 gsi_irq_ch_ctrl_enable(gsi, channel_id); 488 489 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 490 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 491 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val); 492 493 gsi_irq_ch_ctrl_disable(gsi); 494 495 if (!timeout) 496 return; 497 498 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 499 opcode, channel_id, gsi_channel_state(channel)); 500 } 501 502 /* Allocate GSI channel in NOT_ALLOCATED state */ 503 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 504 { 505 struct gsi_channel *channel = &gsi->channel[channel_id]; 506 struct device *dev = gsi->dev; 507 enum gsi_channel_state state; 508 509 /* Get initial channel state */ 510 state = gsi_channel_state(channel); 511 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 512 dev_err(dev, "channel %u bad state %u before alloc\n", 513 channel_id, state); 514 return -EINVAL; 515 } 516 517 gsi_channel_command(channel, GSI_CH_ALLOCATE); 518 519 /* If successful the channel state will have changed */ 520 state = gsi_channel_state(channel); 521 if (state == GSI_CHANNEL_STATE_ALLOCATED) 522 return 0; 523 524 dev_err(dev, "channel %u bad state %u after alloc\n", 525 channel_id, state); 526 527 return -EIO; 528 } 529 530 /* Start an ALLOCATED channel */ 531 static int gsi_channel_start_command(struct gsi_channel *channel) 532 { 533 struct device *dev = channel->gsi->dev; 534 enum gsi_channel_state state; 535 536 state = gsi_channel_state(channel); 537 if (state != GSI_CHANNEL_STATE_ALLOCATED && 538 state != GSI_CHANNEL_STATE_STOPPED) { 539 dev_err(dev, "channel %u bad state %u before start\n", 540 gsi_channel_id(channel), state); 541 return -EINVAL; 542 } 543 544 gsi_channel_command(channel, GSI_CH_START); 545 546 /* If successful the channel state will have changed */ 547 state = gsi_channel_state(channel); 548 if (state == GSI_CHANNEL_STATE_STARTED) 549 return 0; 550 551 dev_err(dev, "channel %u bad state %u after start\n", 552 gsi_channel_id(channel), state); 553 554 return -EIO; 555 } 556 557 /* Stop a GSI channel in STARTED state */ 558 static int gsi_channel_stop_command(struct gsi_channel *channel) 559 { 560 struct device *dev = channel->gsi->dev; 561 enum gsi_channel_state state; 562 563 state = gsi_channel_state(channel); 564 565 /* Channel could have entered STOPPED state since last call 566 * if it timed out. If so, we're done. 567 */ 568 if (state == GSI_CHANNEL_STATE_STOPPED) 569 return 0; 570 571 if (state != GSI_CHANNEL_STATE_STARTED && 572 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 573 dev_err(dev, "channel %u bad state %u before stop\n", 574 gsi_channel_id(channel), state); 575 return -EINVAL; 576 } 577 578 gsi_channel_command(channel, GSI_CH_STOP); 579 580 /* If successful the channel state will have changed */ 581 state = gsi_channel_state(channel); 582 if (state == GSI_CHANNEL_STATE_STOPPED) 583 return 0; 584 585 /* We may have to try again if stop is in progress */ 586 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 587 return -EAGAIN; 588 589 dev_err(dev, "channel %u bad state %u after stop\n", 590 gsi_channel_id(channel), state); 591 592 return -EIO; 593 } 594 595 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 596 static void gsi_channel_reset_command(struct gsi_channel *channel) 597 { 598 struct device *dev = channel->gsi->dev; 599 enum gsi_channel_state state; 600 601 /* A short delay is required before a RESET command */ 602 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 603 604 state = gsi_channel_state(channel); 605 if (state != GSI_CHANNEL_STATE_STOPPED && 606 state != GSI_CHANNEL_STATE_ERROR) { 607 /* No need to reset a channel already in ALLOCATED state */ 608 if (state != GSI_CHANNEL_STATE_ALLOCATED) 609 dev_err(dev, "channel %u bad state %u before reset\n", 610 gsi_channel_id(channel), state); 611 return; 612 } 613 614 gsi_channel_command(channel, GSI_CH_RESET); 615 616 /* If successful the channel state will have changed */ 617 state = gsi_channel_state(channel); 618 if (state != GSI_CHANNEL_STATE_ALLOCATED) 619 dev_err(dev, "channel %u bad state %u after reset\n", 620 gsi_channel_id(channel), state); 621 } 622 623 /* Deallocate an ALLOCATED GSI channel */ 624 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 625 { 626 struct gsi_channel *channel = &gsi->channel[channel_id]; 627 struct device *dev = gsi->dev; 628 enum gsi_channel_state state; 629 630 state = gsi_channel_state(channel); 631 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 632 dev_err(dev, "channel %u bad state %u before dealloc\n", 633 channel_id, state); 634 return; 635 } 636 637 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 638 639 /* If successful the channel state will have changed */ 640 state = gsi_channel_state(channel); 641 642 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 643 dev_err(dev, "channel %u bad state %u after dealloc\n", 644 channel_id, state); 645 } 646 647 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 648 * The index argument (modulo the ring count) is the first unfilled entry, so 649 * we supply one less than that with the doorbell. Update the event ring 650 * index field with the value provided. 651 */ 652 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 653 { 654 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 655 u32 val; 656 657 ring->index = index; /* Next unused entry */ 658 659 /* Note: index *must* be used modulo the ring count here */ 660 val = gsi_ring_addr(ring, (index - 1) % ring->count); 661 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 662 } 663 664 /* Program an event ring for use */ 665 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 666 { 667 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 668 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 669 u32 val; 670 671 /* We program all event rings as GPI type/protocol */ 672 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 673 val |= EV_INTYPE_FMASK; 674 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 675 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 676 677 val = ev_r_length_encoded(gsi->version, size); 678 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 679 680 /* The context 2 and 3 registers store the low-order and 681 * high-order 32 bits of the address of the event ring, 682 * respectively. 683 */ 684 val = lower_32_bits(evt_ring->ring.addr); 685 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 686 val = upper_32_bits(evt_ring->ring.addr); 687 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 688 689 /* Enable interrupt moderation by setting the moderation delay */ 690 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 691 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 692 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 693 694 /* No MSI write data, and MSI address high and low address is 0 */ 695 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 696 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 697 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 698 699 /* We don't need to get event read pointer updates */ 700 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 701 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 702 703 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 704 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 705 } 706 707 /* Find the transaction whose completion indicates a channel is quiesced */ 708 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 709 { 710 struct gsi_trans_info *trans_info = &channel->trans_info; 711 const struct list_head *list; 712 struct gsi_trans *trans; 713 714 spin_lock_bh(&trans_info->spinlock); 715 716 /* There is a small chance a TX transaction got allocated just 717 * before we disabled transmits, so check for that. 718 */ 719 if (channel->toward_ipa) { 720 list = &trans_info->alloc; 721 if (!list_empty(list)) 722 goto done; 723 list = &trans_info->pending; 724 if (!list_empty(list)) 725 goto done; 726 } 727 728 /* Otherwise (TX or RX) we want to wait for anything that 729 * has completed, or has been polled but not released yet. 730 */ 731 list = &trans_info->complete; 732 if (!list_empty(list)) 733 goto done; 734 list = &trans_info->polled; 735 if (list_empty(list)) 736 list = NULL; 737 done: 738 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; 739 740 /* Caller will wait for this, so take a reference */ 741 if (trans) 742 refcount_inc(&trans->refcount); 743 744 spin_unlock_bh(&trans_info->spinlock); 745 746 return trans; 747 } 748 749 /* Wait for transaction activity on a channel to complete */ 750 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 751 { 752 struct gsi_trans *trans; 753 754 /* Get the last transaction, and wait for it to complete */ 755 trans = gsi_channel_trans_last(channel); 756 if (trans) { 757 wait_for_completion(&trans->completion); 758 gsi_trans_free(trans); 759 } 760 } 761 762 /* Program a channel for use; there is no gsi_channel_deprogram() */ 763 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 764 { 765 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 766 u32 channel_id = gsi_channel_id(channel); 767 union gsi_channel_scratch scr = { }; 768 struct gsi_channel_scratch_gpi *gpi; 769 struct gsi *gsi = channel->gsi; 770 u32 wrr_weight = 0; 771 u32 val; 772 773 /* Arbitrarily pick TRE 0 as the first channel element to use */ 774 channel->tre_ring.index = 0; 775 776 /* We program all channels as GPI type/protocol */ 777 val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI); 778 if (channel->toward_ipa) 779 val |= CHTYPE_DIR_FMASK; 780 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 781 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 782 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 783 784 val = r_length_encoded(gsi->version, size); 785 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 786 787 /* The context 2 and 3 registers store the low-order and 788 * high-order 32 bits of the address of the channel ring, 789 * respectively. 790 */ 791 val = lower_32_bits(channel->tre_ring.addr); 792 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 793 val = upper_32_bits(channel->tre_ring.addr); 794 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 795 796 /* Command channel gets low weighted round-robin priority */ 797 if (channel->command) 798 wrr_weight = field_max(WRR_WEIGHT_FMASK); 799 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 800 801 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 802 803 /* No need to use the doorbell engine starting at IPA v4.0 */ 804 if (gsi->version < IPA_VERSION_4_0 && doorbell) 805 val |= USE_DB_ENG_FMASK; 806 807 /* v4.0 introduces an escape buffer for prefetch. We use it 808 * on all but the AP command channel. 809 */ 810 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { 811 /* If not otherwise set, prefetch buffers are used */ 812 if (gsi->version < IPA_VERSION_4_5) 813 val |= USE_ESCAPE_BUF_ONLY_FMASK; 814 else 815 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 816 PREFETCH_MODE_FMASK); 817 } 818 /* All channels set DB_IN_BYTES */ 819 if (gsi->version >= IPA_VERSION_4_9) 820 val |= DB_IN_BYTES; 821 822 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 823 824 /* Now update the scratch registers for GPI protocol */ 825 gpi = &scr.gpi; 826 gpi->max_outstanding_tre = channel->trans_tre_max * 827 GSI_RING_ELEMENT_SIZE; 828 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 829 830 val = scr.data.word1; 831 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 832 833 val = scr.data.word2; 834 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 835 836 val = scr.data.word3; 837 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 838 839 /* We must preserve the upper 16 bits of the last scratch register. 840 * The next sequence assumes those bits remain unchanged between the 841 * read and the write. 842 */ 843 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 844 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 845 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 846 847 /* All done! */ 848 } 849 850 static int __gsi_channel_start(struct gsi_channel *channel, bool resume) 851 { 852 struct gsi *gsi = channel->gsi; 853 int ret; 854 855 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ 856 if (resume && gsi->version < IPA_VERSION_4_0) 857 return 0; 858 859 mutex_lock(&gsi->mutex); 860 861 ret = gsi_channel_start_command(channel); 862 863 mutex_unlock(&gsi->mutex); 864 865 return ret; 866 } 867 868 /* Start an allocated GSI channel */ 869 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 870 { 871 struct gsi_channel *channel = &gsi->channel[channel_id]; 872 int ret; 873 874 /* Enable NAPI and the completion interrupt */ 875 napi_enable(&channel->napi); 876 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); 877 878 ret = __gsi_channel_start(channel, false); 879 if (ret) { 880 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 881 napi_disable(&channel->napi); 882 } 883 884 return ret; 885 } 886 887 static int gsi_channel_stop_retry(struct gsi_channel *channel) 888 { 889 u32 retries = GSI_CHANNEL_STOP_RETRIES; 890 int ret; 891 892 do { 893 ret = gsi_channel_stop_command(channel); 894 if (ret != -EAGAIN) 895 break; 896 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 897 } while (retries--); 898 899 return ret; 900 } 901 902 static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) 903 { 904 struct gsi *gsi = channel->gsi; 905 int ret; 906 907 /* Wait for any underway transactions to complete before stopping. */ 908 gsi_channel_trans_quiesce(channel); 909 910 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ 911 if (suspend && gsi->version < IPA_VERSION_4_0) 912 return 0; 913 914 mutex_lock(&gsi->mutex); 915 916 ret = gsi_channel_stop_retry(channel); 917 918 mutex_unlock(&gsi->mutex); 919 920 return ret; 921 } 922 923 /* Stop a started channel */ 924 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 925 { 926 struct gsi_channel *channel = &gsi->channel[channel_id]; 927 int ret; 928 929 ret = __gsi_channel_stop(channel, false); 930 if (ret) 931 return ret; 932 933 /* Disable the completion interrupt and NAPI if successful */ 934 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); 935 napi_disable(&channel->napi); 936 937 return 0; 938 } 939 940 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 941 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 942 { 943 struct gsi_channel *channel = &gsi->channel[channel_id]; 944 945 mutex_lock(&gsi->mutex); 946 947 gsi_channel_reset_command(channel); 948 /* Due to a hardware quirk we may need to reset RX channels twice. */ 949 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) 950 gsi_channel_reset_command(channel); 951 952 gsi_channel_program(channel, doorbell); 953 gsi_channel_trans_cancel_pending(channel); 954 955 mutex_unlock(&gsi->mutex); 956 } 957 958 /* Stop a started channel for suspend */ 959 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) 960 { 961 struct gsi_channel *channel = &gsi->channel[channel_id]; 962 int ret; 963 964 ret = __gsi_channel_stop(channel, true); 965 if (ret) 966 return ret; 967 968 /* Ensure NAPI polling has finished. */ 969 napi_synchronize(&channel->napi); 970 971 return 0; 972 } 973 974 /* Resume a suspended channel (starting if stopped) */ 975 int gsi_channel_resume(struct gsi *gsi, u32 channel_id) 976 { 977 struct gsi_channel *channel = &gsi->channel[channel_id]; 978 979 return __gsi_channel_start(channel, true); 980 } 981 982 /* Prevent all GSI interrupts while suspended */ 983 void gsi_suspend(struct gsi *gsi) 984 { 985 disable_irq(gsi->irq); 986 } 987 988 /* Allow all GSI interrupts again when resuming */ 989 void gsi_resume(struct gsi *gsi) 990 { 991 enable_irq(gsi->irq); 992 } 993 994 void gsi_trans_tx_committed(struct gsi_trans *trans) 995 { 996 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; 997 998 channel->trans_count++; 999 channel->byte_count += trans->len; 1000 1001 trans->trans_count = channel->trans_count; 1002 trans->byte_count = channel->byte_count; 1003 } 1004 1005 void gsi_trans_tx_queued(struct gsi_trans *trans) 1006 { 1007 u32 channel_id = trans->channel_id; 1008 struct gsi *gsi = trans->gsi; 1009 struct gsi_channel *channel; 1010 u32 trans_count; 1011 u32 byte_count; 1012 1013 channel = &gsi->channel[channel_id]; 1014 1015 byte_count = channel->byte_count - channel->queued_byte_count; 1016 trans_count = channel->trans_count - channel->queued_trans_count; 1017 channel->queued_byte_count = channel->byte_count; 1018 channel->queued_trans_count = channel->trans_count; 1019 1020 ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count); 1021 } 1022 1023 /** 1024 * gsi_trans_tx_completed() - Report completed TX transactions 1025 * @trans: TX channel transaction that has completed 1026 * 1027 * Report that a transaction on a TX channel has completed. At the time a 1028 * transaction is committed, we record *in the transaction* its channel's 1029 * committed transaction and byte counts. Transactions are completed in 1030 * order, and the difference between the channel's byte/transaction count 1031 * when the transaction was committed and when it completes tells us 1032 * exactly how much data has been transferred while the transaction was 1033 * pending. 1034 * 1035 * We report this information to the network stack, which uses it to manage 1036 * the rate at which data is sent to hardware. 1037 */ 1038 static void gsi_trans_tx_completed(struct gsi_trans *trans) 1039 { 1040 u32 channel_id = trans->channel_id; 1041 struct gsi *gsi = trans->gsi; 1042 struct gsi_channel *channel; 1043 u32 trans_count; 1044 u32 byte_count; 1045 1046 channel = &gsi->channel[channel_id]; 1047 trans_count = trans->trans_count - channel->compl_trans_count; 1048 byte_count = trans->byte_count - channel->compl_byte_count; 1049 1050 channel->compl_trans_count += trans_count; 1051 channel->compl_byte_count += byte_count; 1052 1053 ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count); 1054 } 1055 1056 /* Channel control interrupt handler */ 1057 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1058 { 1059 u32 channel_mask; 1060 1061 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1062 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1063 1064 while (channel_mask) { 1065 u32 channel_id = __ffs(channel_mask); 1066 1067 channel_mask ^= BIT(channel_id); 1068 1069 complete(&gsi->completion); 1070 } 1071 } 1072 1073 /* Event ring control interrupt handler */ 1074 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1075 { 1076 u32 event_mask; 1077 1078 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1079 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1080 1081 while (event_mask) { 1082 u32 evt_ring_id = __ffs(event_mask); 1083 1084 event_mask ^= BIT(evt_ring_id); 1085 1086 complete(&gsi->completion); 1087 } 1088 } 1089 1090 /* Global channel error interrupt handler */ 1091 static void 1092 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1093 { 1094 if (code == GSI_OUT_OF_RESOURCES) { 1095 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1096 complete(&gsi->completion); 1097 return; 1098 } 1099 1100 /* Report, but otherwise ignore all other error codes */ 1101 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1102 channel_id, err_ee, code); 1103 } 1104 1105 /* Global event error interrupt handler */ 1106 static void 1107 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1108 { 1109 if (code == GSI_OUT_OF_RESOURCES) { 1110 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1111 u32 channel_id = gsi_channel_id(evt_ring->channel); 1112 1113 complete(&gsi->completion); 1114 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1115 channel_id); 1116 return; 1117 } 1118 1119 /* Report, but otherwise ignore all other error codes */ 1120 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1121 evt_ring_id, err_ee, code); 1122 } 1123 1124 /* Global error interrupt handler */ 1125 static void gsi_isr_glob_err(struct gsi *gsi) 1126 { 1127 enum gsi_err_type type; 1128 enum gsi_err_code code; 1129 u32 which; 1130 u32 val; 1131 u32 ee; 1132 1133 /* Get the logged error, then reinitialize the log */ 1134 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1135 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1136 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1137 1138 ee = u32_get_bits(val, ERR_EE_FMASK); 1139 type = u32_get_bits(val, ERR_TYPE_FMASK); 1140 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1141 code = u32_get_bits(val, ERR_CODE_FMASK); 1142 1143 if (type == GSI_ERR_TYPE_CHAN) 1144 gsi_isr_glob_chan_err(gsi, ee, which, code); 1145 else if (type == GSI_ERR_TYPE_EVT) 1146 gsi_isr_glob_evt_err(gsi, ee, which, code); 1147 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1148 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1149 } 1150 1151 /* Generic EE interrupt handler */ 1152 static void gsi_isr_gp_int1(struct gsi *gsi) 1153 { 1154 u32 result; 1155 u32 val; 1156 1157 /* This interrupt is used to handle completions of GENERIC GSI 1158 * commands. We use these to allocate and halt channels on the 1159 * modem's behalf due to a hardware quirk on IPA v4.2. The modem 1160 * "owns" channels even when the AP allocates them, and have no 1161 * way of knowing whether a modem channel's state has been changed. 1162 * 1163 * We also use GENERIC commands to enable/disable channel flow 1164 * control for IPA v4.2+. 1165 * 1166 * It is recommended that we halt the modem channels we allocated 1167 * when shutting down, but it's possible the channel isn't running 1168 * at the time we issue the HALT command. We'll get an error in 1169 * that case, but it's harmless (the channel is already halted). 1170 * Similarly, we could get an error back when updating flow control 1171 * on a channel because it's not in the proper state. 1172 * 1173 * In either case, we silently ignore a INCORRECT_CHANNEL_STATE 1174 * error if we receive it. 1175 */ 1176 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1177 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1178 1179 switch (result) { 1180 case GENERIC_EE_SUCCESS: 1181 case GENERIC_EE_INCORRECT_CHANNEL_STATE: 1182 gsi->result = 0; 1183 break; 1184 1185 case GENERIC_EE_RETRY: 1186 gsi->result = -EAGAIN; 1187 break; 1188 1189 default: 1190 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1191 gsi->result = -EIO; 1192 break; 1193 } 1194 1195 complete(&gsi->completion); 1196 } 1197 1198 /* Inter-EE interrupt handler */ 1199 static void gsi_isr_glob_ee(struct gsi *gsi) 1200 { 1201 u32 val; 1202 1203 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1204 1205 if (val & BIT(ERROR_INT)) 1206 gsi_isr_glob_err(gsi); 1207 1208 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1209 1210 val &= ~BIT(ERROR_INT); 1211 1212 if (val & BIT(GP_INT1)) { 1213 val ^= BIT(GP_INT1); 1214 gsi_isr_gp_int1(gsi); 1215 } 1216 1217 if (val) 1218 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1219 } 1220 1221 /* I/O completion interrupt event */ 1222 static void gsi_isr_ieob(struct gsi *gsi) 1223 { 1224 u32 event_mask; 1225 1226 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1227 gsi_irq_ieob_disable(gsi, event_mask); 1228 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1229 1230 while (event_mask) { 1231 u32 evt_ring_id = __ffs(event_mask); 1232 1233 event_mask ^= BIT(evt_ring_id); 1234 1235 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1236 } 1237 } 1238 1239 /* General event interrupts represent serious problems, so report them */ 1240 static void gsi_isr_general(struct gsi *gsi) 1241 { 1242 struct device *dev = gsi->dev; 1243 u32 val; 1244 1245 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1246 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1247 1248 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1249 } 1250 1251 /** 1252 * gsi_isr() - Top level GSI interrupt service routine 1253 * @irq: Interrupt number (ignored) 1254 * @dev_id: GSI pointer supplied to request_irq() 1255 * 1256 * This is the main handler function registered for the GSI IRQ. Each type 1257 * of interrupt has a separate handler function that is called from here. 1258 */ 1259 static irqreturn_t gsi_isr(int irq, void *dev_id) 1260 { 1261 struct gsi *gsi = dev_id; 1262 u32 intr_mask; 1263 u32 cnt = 0; 1264 1265 /* enum gsi_irq_type_id defines GSI interrupt types */ 1266 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1267 /* intr_mask contains bitmask of pending GSI interrupts */ 1268 do { 1269 u32 gsi_intr = BIT(__ffs(intr_mask)); 1270 1271 intr_mask ^= gsi_intr; 1272 1273 switch (gsi_intr) { 1274 case BIT(GSI_CH_CTRL): 1275 gsi_isr_chan_ctrl(gsi); 1276 break; 1277 case BIT(GSI_EV_CTRL): 1278 gsi_isr_evt_ctrl(gsi); 1279 break; 1280 case BIT(GSI_GLOB_EE): 1281 gsi_isr_glob_ee(gsi); 1282 break; 1283 case BIT(GSI_IEOB): 1284 gsi_isr_ieob(gsi); 1285 break; 1286 case BIT(GSI_GENERAL): 1287 gsi_isr_general(gsi); 1288 break; 1289 default: 1290 dev_err(gsi->dev, 1291 "unrecognized interrupt type 0x%08x\n", 1292 gsi_intr); 1293 break; 1294 } 1295 } while (intr_mask); 1296 1297 if (++cnt > GSI_ISR_MAX_ITER) { 1298 dev_err(gsi->dev, "interrupt flood\n"); 1299 break; 1300 } 1301 } 1302 1303 return IRQ_HANDLED; 1304 } 1305 1306 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ 1307 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1308 { 1309 int ret; 1310 1311 ret = platform_get_irq_byname(pdev, "gsi"); 1312 if (ret <= 0) 1313 return ret ? : -EINVAL; 1314 1315 gsi->irq = ret; 1316 1317 return 0; 1318 } 1319 1320 /* Return the transaction associated with a transfer completion event */ 1321 static struct gsi_trans * 1322 gsi_event_trans(struct gsi *gsi, struct gsi_event *event) 1323 { 1324 u32 channel_id = event->chid; 1325 struct gsi_channel *channel; 1326 struct gsi_trans *trans; 1327 u32 tre_offset; 1328 u32 tre_index; 1329 1330 channel = &gsi->channel[channel_id]; 1331 if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id)) 1332 return NULL; 1333 1334 /* Event xfer_ptr records the TRE it's associated with */ 1335 tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); 1336 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1337 1338 trans = gsi_channel_trans_mapped(channel, tre_index); 1339 1340 if (WARN(!trans, "channel %u event with no transaction\n", channel_id)) 1341 return NULL; 1342 1343 return trans; 1344 } 1345 1346 /** 1347 * gsi_evt_ring_update() - Update transaction state from hardware 1348 * @gsi: GSI pointer 1349 * @evt_ring_id: Event ring ID 1350 * @index: Event index in ring reported by hardware 1351 * 1352 * Events for RX channels contain the actual number of bytes received into 1353 * the buffer. Every event has a transaction associated with it, and here 1354 * we update transactions to record their actual received lengths. 1355 * 1356 * When an event for a TX channel arrives we use information in the 1357 * transaction to report the number of requests and bytes have been 1358 * transferred. 1359 * 1360 * This function is called whenever we learn that the GSI hardware has filled 1361 * new events since the last time we checked. The ring's index field tells 1362 * the first entry in need of processing. The index provided is the 1363 * first *unfilled* event in the ring (following the last filled one). 1364 * 1365 * Events are sequential within the event ring, and transactions are 1366 * sequential within the transaction pool. 1367 * 1368 * Note that @index always refers to an element *within* the event ring. 1369 */ 1370 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index) 1371 { 1372 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1373 struct gsi_ring *ring = &evt_ring->ring; 1374 struct gsi_event *event_done; 1375 struct gsi_event *event; 1376 u32 event_avail; 1377 u32 old_index; 1378 1379 /* Starting with the oldest un-processed event, determine which 1380 * transaction (and which channel) is associated with the event. 1381 * For RX channels, update each completed transaction with the 1382 * number of bytes that were actually received. For TX channels 1383 * associated with a network device, report to the network stack 1384 * the number of transfers and bytes this completion represents. 1385 */ 1386 old_index = ring->index; 1387 event = gsi_ring_virt(ring, old_index); 1388 1389 /* Compute the number of events to process before we wrap, 1390 * and determine when we'll be done processing events. 1391 */ 1392 event_avail = ring->count - old_index % ring->count; 1393 event_done = gsi_ring_virt(ring, index); 1394 do { 1395 struct gsi_trans *trans; 1396 1397 trans = gsi_event_trans(gsi, event); 1398 if (!trans) 1399 return; 1400 1401 if (trans->direction == DMA_FROM_DEVICE) 1402 trans->len = __le16_to_cpu(event->len); 1403 else 1404 gsi_trans_tx_completed(trans); 1405 1406 gsi_trans_move_complete(trans); 1407 1408 /* Move on to the next event and transaction */ 1409 if (--event_avail) 1410 event++; 1411 else 1412 event = gsi_ring_virt(ring, 0); 1413 } while (event != event_done); 1414 1415 /* Tell the hardware we've handled these events */ 1416 gsi_evt_ring_doorbell(gsi, evt_ring_id, index); 1417 } 1418 1419 /* Initialize a ring, including allocating DMA memory for its entries */ 1420 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1421 { 1422 u32 size = count * GSI_RING_ELEMENT_SIZE; 1423 struct device *dev = gsi->dev; 1424 dma_addr_t addr; 1425 1426 /* Hardware requires a 2^n ring size, with alignment equal to size. 1427 * The DMA address returned by dma_alloc_coherent() is guaranteed to 1428 * be a power-of-2 number of pages, which satisfies the requirement. 1429 */ 1430 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1431 if (!ring->virt) 1432 return -ENOMEM; 1433 1434 ring->addr = addr; 1435 ring->count = count; 1436 1437 return 0; 1438 } 1439 1440 /* Free a previously-allocated ring */ 1441 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1442 { 1443 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1444 1445 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1446 } 1447 1448 /* Allocate an available event ring id */ 1449 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1450 { 1451 u32 evt_ring_id; 1452 1453 if (gsi->event_bitmap == ~0U) { 1454 dev_err(gsi->dev, "event rings exhausted\n"); 1455 return -ENOSPC; 1456 } 1457 1458 evt_ring_id = ffz(gsi->event_bitmap); 1459 gsi->event_bitmap |= BIT(evt_ring_id); 1460 1461 return (int)evt_ring_id; 1462 } 1463 1464 /* Free a previously-allocated event ring id */ 1465 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1466 { 1467 gsi->event_bitmap &= ~BIT(evt_ring_id); 1468 } 1469 1470 /* Ring a channel doorbell, reporting the first un-filled entry */ 1471 void gsi_channel_doorbell(struct gsi_channel *channel) 1472 { 1473 struct gsi_ring *tre_ring = &channel->tre_ring; 1474 u32 channel_id = gsi_channel_id(channel); 1475 struct gsi *gsi = channel->gsi; 1476 u32 val; 1477 1478 /* Note: index *must* be used modulo the ring count here */ 1479 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1480 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1481 } 1482 1483 /* Consult hardware, move any newly completed transactions to completed list */ 1484 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) 1485 { 1486 u32 evt_ring_id = channel->evt_ring_id; 1487 struct gsi *gsi = channel->gsi; 1488 struct gsi_evt_ring *evt_ring; 1489 struct gsi_trans *trans; 1490 struct gsi_ring *ring; 1491 u32 offset; 1492 u32 index; 1493 1494 evt_ring = &gsi->evt_ring[evt_ring_id]; 1495 ring = &evt_ring->ring; 1496 1497 /* See if there's anything new to process; if not, we're done. Note 1498 * that index always refers to an entry *within* the event ring. 1499 */ 1500 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1501 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1502 if (index == ring->index % ring->count) 1503 return NULL; 1504 1505 /* Get the transaction for the latest completed event. */ 1506 trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1)); 1507 if (!trans) 1508 return NULL; 1509 1510 /* For RX channels, update each completed transaction with the number 1511 * of bytes that were actually received. For TX channels, report 1512 * the number of transactions and bytes this completion represents 1513 * up the network stack. 1514 */ 1515 gsi_evt_ring_update(gsi, evt_ring_id, index); 1516 1517 return gsi_channel_trans_complete(channel); 1518 } 1519 1520 /** 1521 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1522 * @channel: Channel to be polled 1523 * 1524 * Return: Transaction pointer, or null if none are available 1525 * 1526 * This function returns the first entry on a channel's completed transaction 1527 * list. If that list is empty, the hardware is consulted to determine 1528 * whether any new transactions have completed. If so, they're moved to the 1529 * completed list and the new first entry is returned. If there are no more 1530 * completed transactions, a null pointer is returned. 1531 */ 1532 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1533 { 1534 struct gsi_trans *trans; 1535 1536 /* Get the first transaction from the completed list */ 1537 trans = gsi_channel_trans_complete(channel); 1538 if (!trans) /* List is empty; see if there's more to do */ 1539 trans = gsi_channel_update(channel); 1540 1541 if (trans) 1542 gsi_trans_move_polled(trans); 1543 1544 return trans; 1545 } 1546 1547 /** 1548 * gsi_channel_poll() - NAPI poll function for a channel 1549 * @napi: NAPI structure for the channel 1550 * @budget: Budget supplied by NAPI core 1551 * 1552 * Return: Number of items polled (<= budget) 1553 * 1554 * Single transactions completed by hardware are polled until either 1555 * the budget is exhausted, or there are no more. Each transaction 1556 * polled is passed to gsi_trans_complete(), to perform remaining 1557 * completion processing and retire/free the transaction. 1558 */ 1559 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1560 { 1561 struct gsi_channel *channel; 1562 int count; 1563 1564 channel = container_of(napi, struct gsi_channel, napi); 1565 for (count = 0; count < budget; count++) { 1566 struct gsi_trans *trans; 1567 1568 trans = gsi_channel_poll_one(channel); 1569 if (!trans) 1570 break; 1571 gsi_trans_complete(trans); 1572 } 1573 1574 if (count < budget && napi_complete(napi)) 1575 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); 1576 1577 return count; 1578 } 1579 1580 /* The event bitmap represents which event ids are available for allocation. 1581 * Set bits are not available, clear bits can be used. This function 1582 * initializes the map so all events supported by the hardware are available, 1583 * then precludes any reserved events from being allocated. 1584 */ 1585 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1586 { 1587 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1588 1589 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1590 1591 return event_bitmap; 1592 } 1593 1594 /* Setup function for a single channel */ 1595 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1596 { 1597 struct gsi_channel *channel = &gsi->channel[channel_id]; 1598 u32 evt_ring_id = channel->evt_ring_id; 1599 int ret; 1600 1601 if (!gsi_channel_initialized(channel)) 1602 return 0; 1603 1604 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1605 if (ret) 1606 return ret; 1607 1608 gsi_evt_ring_program(gsi, evt_ring_id); 1609 1610 ret = gsi_channel_alloc_command(gsi, channel_id); 1611 if (ret) 1612 goto err_evt_ring_de_alloc; 1613 1614 gsi_channel_program(channel, true); 1615 1616 if (channel->toward_ipa) 1617 netif_napi_add_tx(&gsi->dummy_dev, &channel->napi, 1618 gsi_channel_poll); 1619 else 1620 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1621 gsi_channel_poll, NAPI_POLL_WEIGHT); 1622 1623 return 0; 1624 1625 err_evt_ring_de_alloc: 1626 /* We've done nothing with the event ring yet so don't reset */ 1627 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1628 1629 return ret; 1630 } 1631 1632 /* Inverse of gsi_channel_setup_one() */ 1633 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1634 { 1635 struct gsi_channel *channel = &gsi->channel[channel_id]; 1636 u32 evt_ring_id = channel->evt_ring_id; 1637 1638 if (!gsi_channel_initialized(channel)) 1639 return; 1640 1641 netif_napi_del(&channel->napi); 1642 1643 gsi_channel_de_alloc_command(gsi, channel_id); 1644 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1645 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1646 } 1647 1648 /* We use generic commands only to operate on modem channels. We don't have 1649 * the ability to determine channel state for a modem channel, so we simply 1650 * issue the command and wait for it to complete. 1651 */ 1652 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1653 enum gsi_generic_cmd_opcode opcode, 1654 u8 params) 1655 { 1656 bool timeout; 1657 u32 val; 1658 1659 /* The error global interrupt type is always enabled (until we tear 1660 * down), so we will keep it enabled. 1661 * 1662 * A generic EE command completes with a GSI global interrupt of 1663 * type GP_INT1. We only perform one generic command at a time 1664 * (to allocate, halt, or enable/disable flow control on a modem 1665 * channel), and only from this function. So we enable the GP_INT1 1666 * IRQ type here, and disable it again after the command completes. 1667 */ 1668 val = BIT(ERROR_INT) | BIT(GP_INT1); 1669 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1670 1671 /* First zero the result code field */ 1672 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1673 val &= ~GENERIC_EE_RESULT_FMASK; 1674 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1675 1676 /* Now issue the command */ 1677 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1678 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1679 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1680 val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK); 1681 1682 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val); 1683 1684 /* Disable the GP_INT1 IRQ type again */ 1685 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1686 1687 if (!timeout) 1688 return gsi->result; 1689 1690 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1691 opcode, channel_id); 1692 1693 return -ETIMEDOUT; 1694 } 1695 1696 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1697 { 1698 return gsi_generic_command(gsi, channel_id, 1699 GSI_GENERIC_ALLOCATE_CHANNEL, 0); 1700 } 1701 1702 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1703 { 1704 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1705 int ret; 1706 1707 do 1708 ret = gsi_generic_command(gsi, channel_id, 1709 GSI_GENERIC_HALT_CHANNEL, 0); 1710 while (ret == -EAGAIN && retries--); 1711 1712 if (ret) 1713 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1714 ret, channel_id); 1715 } 1716 1717 /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */ 1718 void 1719 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable) 1720 { 1721 u32 retries = 0; 1722 u32 command; 1723 int ret; 1724 1725 command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL 1726 : GSI_GENERIC_DISABLE_FLOW_CONTROL; 1727 /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable 1728 * is underway. In this case we need to retry the command. 1729 */ 1730 if (!enable && gsi->version >= IPA_VERSION_4_11) 1731 retries = GSI_CHANNEL_MODEM_FLOW_RETRIES; 1732 1733 do 1734 ret = gsi_generic_command(gsi, channel_id, command, 0); 1735 while (ret == -EAGAIN && retries--); 1736 1737 if (ret) 1738 dev_err(gsi->dev, 1739 "error %d %sabling mode channel %u flow control\n", 1740 ret, enable ? "en" : "dis", channel_id); 1741 } 1742 1743 /* Setup function for channels */ 1744 static int gsi_channel_setup(struct gsi *gsi) 1745 { 1746 u32 channel_id = 0; 1747 u32 mask; 1748 int ret; 1749 1750 gsi_irq_enable(gsi); 1751 1752 mutex_lock(&gsi->mutex); 1753 1754 do { 1755 ret = gsi_channel_setup_one(gsi, channel_id); 1756 if (ret) 1757 goto err_unwind; 1758 } while (++channel_id < gsi->channel_count); 1759 1760 /* Make sure no channels were defined that hardware does not support */ 1761 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1762 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1763 1764 if (!gsi_channel_initialized(channel)) 1765 continue; 1766 1767 ret = -EINVAL; 1768 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1769 channel_id - 1); 1770 channel_id = gsi->channel_count; 1771 goto err_unwind; 1772 } 1773 1774 /* Allocate modem channels if necessary */ 1775 mask = gsi->modem_channel_bitmap; 1776 while (mask) { 1777 u32 modem_channel_id = __ffs(mask); 1778 1779 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1780 if (ret) 1781 goto err_unwind_modem; 1782 1783 /* Clear bit from mask only after success (for unwind) */ 1784 mask ^= BIT(modem_channel_id); 1785 } 1786 1787 mutex_unlock(&gsi->mutex); 1788 1789 return 0; 1790 1791 err_unwind_modem: 1792 /* Compute which modem channels need to be deallocated */ 1793 mask ^= gsi->modem_channel_bitmap; 1794 while (mask) { 1795 channel_id = __fls(mask); 1796 1797 mask ^= BIT(channel_id); 1798 1799 gsi_modem_channel_halt(gsi, channel_id); 1800 } 1801 1802 err_unwind: 1803 while (channel_id--) 1804 gsi_channel_teardown_one(gsi, channel_id); 1805 1806 mutex_unlock(&gsi->mutex); 1807 1808 gsi_irq_disable(gsi); 1809 1810 return ret; 1811 } 1812 1813 /* Inverse of gsi_channel_setup() */ 1814 static void gsi_channel_teardown(struct gsi *gsi) 1815 { 1816 u32 mask = gsi->modem_channel_bitmap; 1817 u32 channel_id; 1818 1819 mutex_lock(&gsi->mutex); 1820 1821 while (mask) { 1822 channel_id = __fls(mask); 1823 1824 mask ^= BIT(channel_id); 1825 1826 gsi_modem_channel_halt(gsi, channel_id); 1827 } 1828 1829 channel_id = gsi->channel_count - 1; 1830 do 1831 gsi_channel_teardown_one(gsi, channel_id); 1832 while (channel_id--); 1833 1834 mutex_unlock(&gsi->mutex); 1835 1836 gsi_irq_disable(gsi); 1837 } 1838 1839 /* Turn off all GSI interrupts initially */ 1840 static int gsi_irq_setup(struct gsi *gsi) 1841 { 1842 int ret; 1843 1844 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1845 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1846 1847 /* Disable all interrupt types */ 1848 gsi_irq_type_update(gsi, 0); 1849 1850 /* Clear all type-specific interrupt masks */ 1851 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 1852 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 1853 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1854 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 1855 1856 /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ 1857 if (gsi->version > IPA_VERSION_3_1) { 1858 u32 offset; 1859 1860 /* These registers are in the non-adjusted address range */ 1861 offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; 1862 iowrite32(0, gsi->virt_raw + offset); 1863 offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; 1864 iowrite32(0, gsi->virt_raw + offset); 1865 } 1866 1867 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 1868 1869 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); 1870 if (ret) 1871 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); 1872 1873 return ret; 1874 } 1875 1876 static void gsi_irq_teardown(struct gsi *gsi) 1877 { 1878 free_irq(gsi->irq, gsi); 1879 } 1880 1881 /* Get # supported channel and event rings; there is no gsi_ring_teardown() */ 1882 static int gsi_ring_setup(struct gsi *gsi) 1883 { 1884 struct device *dev = gsi->dev; 1885 u32 count; 1886 u32 val; 1887 1888 if (gsi->version < IPA_VERSION_3_5_1) { 1889 /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ 1890 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1891 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1892 1893 return 0; 1894 } 1895 1896 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1897 1898 count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1899 if (!count) { 1900 dev_err(dev, "GSI reports zero channels supported\n"); 1901 return -EINVAL; 1902 } 1903 if (count > GSI_CHANNEL_COUNT_MAX) { 1904 dev_warn(dev, "limiting to %u channels; hardware supports %u\n", 1905 GSI_CHANNEL_COUNT_MAX, count); 1906 count = GSI_CHANNEL_COUNT_MAX; 1907 } 1908 gsi->channel_count = count; 1909 1910 count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1911 if (!count) { 1912 dev_err(dev, "GSI reports zero event rings supported\n"); 1913 return -EINVAL; 1914 } 1915 if (count > GSI_EVT_RING_COUNT_MAX) { 1916 dev_warn(dev, 1917 "limiting to %u event rings; hardware supports %u\n", 1918 GSI_EVT_RING_COUNT_MAX, count); 1919 count = GSI_EVT_RING_COUNT_MAX; 1920 } 1921 gsi->evt_ring_count = count; 1922 1923 return 0; 1924 } 1925 1926 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1927 int gsi_setup(struct gsi *gsi) 1928 { 1929 u32 val; 1930 int ret; 1931 1932 /* Here is where we first touch the GSI hardware */ 1933 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1934 if (!(val & ENABLED_FMASK)) { 1935 dev_err(gsi->dev, "GSI has not been enabled\n"); 1936 return -EIO; 1937 } 1938 1939 ret = gsi_irq_setup(gsi); 1940 if (ret) 1941 return ret; 1942 1943 ret = gsi_ring_setup(gsi); /* No matching teardown required */ 1944 if (ret) 1945 goto err_irq_teardown; 1946 1947 /* Initialize the error log */ 1948 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1949 1950 ret = gsi_channel_setup(gsi); 1951 if (ret) 1952 goto err_irq_teardown; 1953 1954 return 0; 1955 1956 err_irq_teardown: 1957 gsi_irq_teardown(gsi); 1958 1959 return ret; 1960 } 1961 1962 /* Inverse of gsi_setup() */ 1963 void gsi_teardown(struct gsi *gsi) 1964 { 1965 gsi_channel_teardown(gsi); 1966 gsi_irq_teardown(gsi); 1967 } 1968 1969 /* Initialize a channel's event ring */ 1970 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1971 { 1972 struct gsi *gsi = channel->gsi; 1973 struct gsi_evt_ring *evt_ring; 1974 int ret; 1975 1976 ret = gsi_evt_ring_id_alloc(gsi); 1977 if (ret < 0) 1978 return ret; 1979 channel->evt_ring_id = ret; 1980 1981 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1982 evt_ring->channel = channel; 1983 1984 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1985 if (!ret) 1986 return 0; /* Success! */ 1987 1988 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1989 ret, gsi_channel_id(channel)); 1990 1991 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1992 1993 return ret; 1994 } 1995 1996 /* Inverse of gsi_channel_evt_ring_init() */ 1997 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1998 { 1999 u32 evt_ring_id = channel->evt_ring_id; 2000 struct gsi *gsi = channel->gsi; 2001 struct gsi_evt_ring *evt_ring; 2002 2003 evt_ring = &gsi->evt_ring[evt_ring_id]; 2004 gsi_ring_free(gsi, &evt_ring->ring); 2005 gsi_evt_ring_id_free(gsi, evt_ring_id); 2006 } 2007 2008 static bool gsi_channel_data_valid(struct gsi *gsi, bool command, 2009 const struct ipa_gsi_endpoint_data *data) 2010 { 2011 const struct gsi_channel_data *channel_data; 2012 u32 channel_id = data->channel_id; 2013 struct device *dev = gsi->dev; 2014 2015 /* Make sure channel ids are in the range driver supports */ 2016 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 2017 dev_err(dev, "bad channel id %u; must be less than %u\n", 2018 channel_id, GSI_CHANNEL_COUNT_MAX); 2019 return false; 2020 } 2021 2022 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 2023 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 2024 return false; 2025 } 2026 2027 if (command && !data->toward_ipa) { 2028 dev_err(dev, "command channel %u is not TX\n", channel_id); 2029 return false; 2030 } 2031 2032 channel_data = &data->channel; 2033 2034 if (!channel_data->tlv_count || 2035 channel_data->tlv_count > GSI_TLV_MAX) { 2036 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 2037 channel_id, channel_data->tlv_count, GSI_TLV_MAX); 2038 return false; 2039 } 2040 2041 if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) { 2042 dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n", 2043 channel_id, IPA_COMMAND_TRANS_TRE_MAX, 2044 channel_data->tlv_count); 2045 return false; 2046 } 2047 2048 /* We have to allow at least one maximally-sized transaction to 2049 * be outstanding (which would use tlv_count TREs). Given how 2050 * gsi_channel_tre_max() is computed, tre_count has to be almost 2051 * twice the TLV FIFO size to satisfy this requirement. 2052 */ 2053 if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) { 2054 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 2055 channel_id, channel_data->tlv_count, 2056 channel_data->tre_count); 2057 return false; 2058 } 2059 2060 if (!is_power_of_2(channel_data->tre_count)) { 2061 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 2062 channel_id, channel_data->tre_count); 2063 return false; 2064 } 2065 2066 if (!is_power_of_2(channel_data->event_count)) { 2067 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 2068 channel_id, channel_data->event_count); 2069 return false; 2070 } 2071 2072 return true; 2073 } 2074 2075 /* Init function for a single channel */ 2076 static int gsi_channel_init_one(struct gsi *gsi, 2077 const struct ipa_gsi_endpoint_data *data, 2078 bool command) 2079 { 2080 struct gsi_channel *channel; 2081 u32 tre_count; 2082 int ret; 2083 2084 if (!gsi_channel_data_valid(gsi, command, data)) 2085 return -EINVAL; 2086 2087 /* Worst case we need an event for every outstanding TRE */ 2088 if (data->channel.tre_count > data->channel.event_count) { 2089 tre_count = data->channel.event_count; 2090 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 2091 data->channel_id, tre_count); 2092 } else { 2093 tre_count = data->channel.tre_count; 2094 } 2095 2096 channel = &gsi->channel[data->channel_id]; 2097 memset(channel, 0, sizeof(*channel)); 2098 2099 channel->gsi = gsi; 2100 channel->toward_ipa = data->toward_ipa; 2101 channel->command = command; 2102 channel->trans_tre_max = data->channel.tlv_count; 2103 channel->tre_count = tre_count; 2104 channel->event_count = data->channel.event_count; 2105 2106 ret = gsi_channel_evt_ring_init(channel); 2107 if (ret) 2108 goto err_clear_gsi; 2109 2110 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2111 if (ret) { 2112 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2113 ret, data->channel_id); 2114 goto err_channel_evt_ring_exit; 2115 } 2116 2117 ret = gsi_channel_trans_init(gsi, data->channel_id); 2118 if (ret) 2119 goto err_ring_free; 2120 2121 if (command) { 2122 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2123 2124 ret = ipa_cmd_pool_init(channel, tre_max); 2125 } 2126 if (!ret) 2127 return 0; /* Success! */ 2128 2129 gsi_channel_trans_exit(channel); 2130 err_ring_free: 2131 gsi_ring_free(gsi, &channel->tre_ring); 2132 err_channel_evt_ring_exit: 2133 gsi_channel_evt_ring_exit(channel); 2134 err_clear_gsi: 2135 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2136 2137 return ret; 2138 } 2139 2140 /* Inverse of gsi_channel_init_one() */ 2141 static void gsi_channel_exit_one(struct gsi_channel *channel) 2142 { 2143 if (!gsi_channel_initialized(channel)) 2144 return; 2145 2146 if (channel->command) 2147 ipa_cmd_pool_exit(channel); 2148 gsi_channel_trans_exit(channel); 2149 gsi_ring_free(channel->gsi, &channel->tre_ring); 2150 gsi_channel_evt_ring_exit(channel); 2151 } 2152 2153 /* Init function for channels */ 2154 static int gsi_channel_init(struct gsi *gsi, u32 count, 2155 const struct ipa_gsi_endpoint_data *data) 2156 { 2157 bool modem_alloc; 2158 int ret = 0; 2159 u32 i; 2160 2161 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2162 modem_alloc = gsi->version == IPA_VERSION_4_2; 2163 2164 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 2165 gsi->ieob_enabled_bitmap = 0; 2166 2167 /* The endpoint data array is indexed by endpoint name */ 2168 for (i = 0; i < count; i++) { 2169 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2170 2171 if (ipa_gsi_endpoint_data_empty(&data[i])) 2172 continue; /* Skip over empty slots */ 2173 2174 /* Mark modem channels to be allocated (hardware workaround) */ 2175 if (data[i].ee_id == GSI_EE_MODEM) { 2176 if (modem_alloc) 2177 gsi->modem_channel_bitmap |= 2178 BIT(data[i].channel_id); 2179 continue; 2180 } 2181 2182 ret = gsi_channel_init_one(gsi, &data[i], command); 2183 if (ret) 2184 goto err_unwind; 2185 } 2186 2187 return ret; 2188 2189 err_unwind: 2190 while (i--) { 2191 if (ipa_gsi_endpoint_data_empty(&data[i])) 2192 continue; 2193 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2194 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2195 continue; 2196 } 2197 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2198 } 2199 2200 return ret; 2201 } 2202 2203 /* Inverse of gsi_channel_init() */ 2204 static void gsi_channel_exit(struct gsi *gsi) 2205 { 2206 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2207 2208 do 2209 gsi_channel_exit_one(&gsi->channel[channel_id]); 2210 while (channel_id--); 2211 gsi->modem_channel_bitmap = 0; 2212 } 2213 2214 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2215 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2216 enum ipa_version version, u32 count, 2217 const struct ipa_gsi_endpoint_data *data) 2218 { 2219 struct device *dev = &pdev->dev; 2220 struct resource *res; 2221 resource_size_t size; 2222 u32 adjust; 2223 int ret; 2224 2225 gsi_validate_build(); 2226 2227 gsi->dev = dev; 2228 gsi->version = version; 2229 2230 /* GSI uses NAPI on all channels. Create a dummy network device 2231 * for the channel NAPI contexts to be associated with. 2232 */ 2233 init_dummy_netdev(&gsi->dummy_dev); 2234 2235 /* Get GSI memory range and map it */ 2236 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2237 if (!res) { 2238 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2239 return -ENODEV; 2240 } 2241 2242 size = resource_size(res); 2243 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2244 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2245 return -EINVAL; 2246 } 2247 2248 /* Make sure we can make our pointer adjustment if necessary */ 2249 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2250 if (res->start < adjust) { 2251 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2252 adjust); 2253 return -EINVAL; 2254 } 2255 2256 gsi->virt_raw = ioremap(res->start, size); 2257 if (!gsi->virt_raw) { 2258 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2259 return -ENOMEM; 2260 } 2261 /* Most registers are accessed using an adjusted register range */ 2262 gsi->virt = gsi->virt_raw - adjust; 2263 2264 init_completion(&gsi->completion); 2265 2266 ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ 2267 if (ret) 2268 goto err_iounmap; 2269 2270 ret = gsi_channel_init(gsi, count, data); 2271 if (ret) 2272 goto err_iounmap; 2273 2274 mutex_init(&gsi->mutex); 2275 2276 return 0; 2277 2278 err_iounmap: 2279 iounmap(gsi->virt_raw); 2280 2281 return ret; 2282 } 2283 2284 /* Inverse of gsi_init() */ 2285 void gsi_exit(struct gsi *gsi) 2286 { 2287 mutex_destroy(&gsi->mutex); 2288 gsi_channel_exit(gsi); 2289 iounmap(gsi->virt_raw); 2290 } 2291 2292 /* The maximum number of outstanding TREs on a channel. This limits 2293 * a channel's maximum number of transactions outstanding (worst case 2294 * is one TRE per transaction). 2295 * 2296 * The absolute limit is the number of TREs in the channel's TRE ring, 2297 * and in theory we should be able use all of them. But in practice, 2298 * doing that led to the hardware reporting exhaustion of event ring 2299 * slots for writing completion information. So the hardware limit 2300 * would be (tre_count - 1). 2301 * 2302 * We reduce it a bit further though. Transaction resource pools are 2303 * sized to be a little larger than this maximum, to allow resource 2304 * allocations to always be contiguous. The number of entries in a 2305 * TRE ring buffer is a power of 2, and the extra resources in a pool 2306 * tends to nearly double the memory allocated for it. Reducing the 2307 * maximum number of outstanding TREs allows the number of entries in 2308 * a pool to avoid crossing that power-of-2 boundary, and this can 2309 * substantially reduce pool memory requirements. The number we 2310 * reduce it by matches the number added in gsi_trans_pool_init(). 2311 */ 2312 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2313 { 2314 struct gsi_channel *channel = &gsi->channel[channel_id]; 2315 2316 /* Hardware limit is channel->tre_count - 1 */ 2317 return channel->tre_count - (channel->trans_tre_max - 1); 2318 } 2319