1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 50 /* milliseconds */ 93 94 #define GSI_CHANNEL_STOP_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* Update the GSI IRQ type register with the cached value */ 179 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 180 { 181 gsi->type_enabled_bitmap = val; 182 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 183 } 184 185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 186 { 187 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 188 } 189 190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 191 { 192 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 193 } 194 195 /* Turn off all GSI interrupts initially */ 196 static void gsi_irq_setup(struct gsi *gsi) 197 { 198 u32 adjust; 199 200 /* Disable all interrupt types */ 201 gsi_irq_type_update(gsi, 0); 202 203 /* Clear all type-specific interrupt masks */ 204 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 205 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 206 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 207 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 208 209 /* Reverse the offset adjustment for inter-EE register offsets */ 210 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 211 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); 212 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); 213 214 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 215 } 216 217 /* Turn off all GSI interrupts when we're all done */ 218 static void gsi_irq_teardown(struct gsi *gsi) 219 { 220 /* Nothing to do */ 221 } 222 223 /* Event ring commands are performed one at a time. Their completion 224 * is signaled by the event ring control GSI interrupt type, which is 225 * only enabled when we issue an event ring command. Only the event 226 * ring being operated on has this interrupt enabled. 227 */ 228 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) 229 { 230 u32 val = BIT(evt_ring_id); 231 232 /* There's a small chance that a previous command completed 233 * after the interrupt was disabled, so make sure we have no 234 * pending interrupts before we enable them. 235 */ 236 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 237 238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 239 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 240 } 241 242 /* Disable event ring control interrupts */ 243 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) 244 { 245 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 246 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 247 } 248 249 /* Channel commands are performed one at a time. Their completion is 250 * signaled by the channel control GSI interrupt type, which is only 251 * enabled when we issue a channel command. Only the channel being 252 * operated on has this interrupt enabled. 253 */ 254 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) 255 { 256 u32 val = BIT(channel_id); 257 258 /* There's a small chance that a previous command completed 259 * after the interrupt was disabled, so make sure we have no 260 * pending interrupts before we enable them. 261 */ 262 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 263 264 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 265 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 266 } 267 268 /* Disable channel control interrupts */ 269 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) 270 { 271 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 272 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 273 } 274 275 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) 276 { 277 bool enable_ieob = !gsi->ieob_enabled_bitmap; 278 u32 val; 279 280 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 281 val = gsi->ieob_enabled_bitmap; 282 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 283 284 /* Enable the interrupt type if this is the first channel enabled */ 285 if (enable_ieob) 286 gsi_irq_type_enable(gsi, GSI_IEOB); 287 } 288 289 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) 290 { 291 u32 val; 292 293 gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); 294 295 /* Disable the interrupt type if this was the last enabled channel */ 296 if (!gsi->ieob_enabled_bitmap) 297 gsi_irq_type_disable(gsi, GSI_IEOB); 298 299 val = gsi->ieob_enabled_bitmap; 300 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 301 } 302 303 /* Enable all GSI_interrupt types */ 304 static void gsi_irq_enable(struct gsi *gsi) 305 { 306 u32 val; 307 308 /* Global interrupts include hardware error reports. Enable 309 * that so we can at least report the error should it occur. 310 */ 311 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 312 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 313 314 /* General GSI interrupts are reported to all EEs; if they occur 315 * they are unrecoverable (without reset). A breakpoint interrupt 316 * also exists, but we don't support that. We want to be notified 317 * of errors so we can report them, even if they can't be handled. 318 */ 319 val = BIT(BUS_ERROR); 320 val |= BIT(CMD_FIFO_OVRFLOW); 321 val |= BIT(MCS_STACK_OVRFLOW); 322 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 323 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 324 } 325 326 /* Disable all GSI interrupt types */ 327 static void gsi_irq_disable(struct gsi *gsi) 328 { 329 gsi_irq_type_update(gsi, 0); 330 331 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 332 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 333 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 334 } 335 336 /* Return the virtual address associated with a ring index */ 337 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 338 { 339 /* Note: index *must* be used modulo the ring count here */ 340 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 341 } 342 343 /* Return the 32-bit DMA address associated with a ring index */ 344 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 345 { 346 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; 347 } 348 349 /* Return the ring index of a 32-bit ring offset */ 350 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 351 { 352 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 353 } 354 355 /* Issue a GSI command by writing a value to a register, then wait for 356 * completion to be signaled. Returns true if the command completes 357 * or false if it times out. 358 */ 359 static bool 360 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 361 { 362 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); 363 364 reinit_completion(completion); 365 366 iowrite32(val, gsi->virt + reg); 367 368 return !!wait_for_completion_timeout(completion, timeout); 369 } 370 371 /* Return the hardware's notion of the current state of an event ring */ 372 static enum gsi_evt_ring_state 373 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 374 { 375 u32 val; 376 377 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 378 379 return u32_get_bits(val, EV_CHSTATE_FMASK); 380 } 381 382 /* Issue an event ring command and wait for it to complete */ 383 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 384 enum gsi_evt_cmd_opcode opcode) 385 { 386 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 387 struct completion *completion = &evt_ring->completion; 388 struct device *dev = gsi->dev; 389 bool timeout; 390 u32 val; 391 392 /* Enable the completion interrupt for the command */ 393 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); 394 395 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 396 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 397 398 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 399 400 gsi_irq_ev_ctrl_disable(gsi); 401 402 if (!timeout) 403 return; 404 405 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 406 opcode, evt_ring_id, evt_ring->state); 407 } 408 409 /* Allocate an event ring in NOT_ALLOCATED state */ 410 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 411 { 412 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 413 414 /* Get initial event ring state */ 415 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 416 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 417 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 418 evt_ring_id, evt_ring->state); 419 return -EINVAL; 420 } 421 422 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 423 424 /* If successful the event ring state will have changed */ 425 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) 426 return 0; 427 428 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 429 evt_ring_id, evt_ring->state); 430 431 return -EIO; 432 } 433 434 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 435 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 436 { 437 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 438 enum gsi_evt_ring_state state = evt_ring->state; 439 440 if (state != GSI_EVT_RING_STATE_ALLOCATED && 441 state != GSI_EVT_RING_STATE_ERROR) { 442 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 443 evt_ring_id, evt_ring->state); 444 return; 445 } 446 447 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 448 449 /* If successful the event ring state will have changed */ 450 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) 451 return; 452 453 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 454 evt_ring_id, evt_ring->state); 455 } 456 457 /* Issue a hardware de-allocation request for an allocated event ring */ 458 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 459 { 460 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 461 462 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 463 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 464 evt_ring_id, evt_ring->state); 465 return; 466 } 467 468 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 469 470 /* If successful the event ring state will have changed */ 471 if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) 472 return; 473 474 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 475 evt_ring_id, evt_ring->state); 476 } 477 478 /* Fetch the current state of a channel from hardware */ 479 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 480 { 481 u32 channel_id = gsi_channel_id(channel); 482 void *virt = channel->gsi->virt; 483 u32 val; 484 485 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 486 487 return u32_get_bits(val, CHSTATE_FMASK); 488 } 489 490 /* Issue a channel command and wait for it to complete */ 491 static void 492 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 493 { 494 struct completion *completion = &channel->completion; 495 u32 channel_id = gsi_channel_id(channel); 496 struct gsi *gsi = channel->gsi; 497 struct device *dev = gsi->dev; 498 bool timeout; 499 u32 val; 500 501 /* Enable the completion interrupt for the command */ 502 gsi_irq_ch_ctrl_enable(gsi, channel_id); 503 504 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 505 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 506 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 507 508 gsi_irq_ch_ctrl_disable(gsi); 509 510 if (!timeout) 511 return; 512 513 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 514 opcode, channel_id, gsi_channel_state(channel)); 515 } 516 517 /* Allocate GSI channel in NOT_ALLOCATED state */ 518 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 519 { 520 struct gsi_channel *channel = &gsi->channel[channel_id]; 521 struct device *dev = gsi->dev; 522 enum gsi_channel_state state; 523 524 /* Get initial channel state */ 525 state = gsi_channel_state(channel); 526 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 527 dev_err(dev, "channel %u bad state %u before alloc\n", 528 channel_id, state); 529 return -EINVAL; 530 } 531 532 gsi_channel_command(channel, GSI_CH_ALLOCATE); 533 534 /* If successful the channel state will have changed */ 535 state = gsi_channel_state(channel); 536 if (state == GSI_CHANNEL_STATE_ALLOCATED) 537 return 0; 538 539 dev_err(dev, "channel %u bad state %u after alloc\n", 540 channel_id, state); 541 542 return -EIO; 543 } 544 545 /* Start an ALLOCATED channel */ 546 static int gsi_channel_start_command(struct gsi_channel *channel) 547 { 548 struct device *dev = channel->gsi->dev; 549 enum gsi_channel_state state; 550 551 state = gsi_channel_state(channel); 552 if (state != GSI_CHANNEL_STATE_ALLOCATED && 553 state != GSI_CHANNEL_STATE_STOPPED) { 554 dev_err(dev, "channel %u bad state %u before start\n", 555 gsi_channel_id(channel), state); 556 return -EINVAL; 557 } 558 559 gsi_channel_command(channel, GSI_CH_START); 560 561 /* If successful the channel state will have changed */ 562 state = gsi_channel_state(channel); 563 if (state == GSI_CHANNEL_STATE_STARTED) 564 return 0; 565 566 dev_err(dev, "channel %u bad state %u after start\n", 567 gsi_channel_id(channel), state); 568 569 return -EIO; 570 } 571 572 /* Stop a GSI channel in STARTED state */ 573 static int gsi_channel_stop_command(struct gsi_channel *channel) 574 { 575 struct device *dev = channel->gsi->dev; 576 enum gsi_channel_state state; 577 578 state = gsi_channel_state(channel); 579 580 /* Channel could have entered STOPPED state since last call 581 * if it timed out. If so, we're done. 582 */ 583 if (state == GSI_CHANNEL_STATE_STOPPED) 584 return 0; 585 586 if (state != GSI_CHANNEL_STATE_STARTED && 587 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 588 dev_err(dev, "channel %u bad state %u before stop\n", 589 gsi_channel_id(channel), state); 590 return -EINVAL; 591 } 592 593 gsi_channel_command(channel, GSI_CH_STOP); 594 595 /* If successful the channel state will have changed */ 596 state = gsi_channel_state(channel); 597 if (state == GSI_CHANNEL_STATE_STOPPED) 598 return 0; 599 600 /* We may have to try again if stop is in progress */ 601 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 602 return -EAGAIN; 603 604 dev_err(dev, "channel %u bad state %u after stop\n", 605 gsi_channel_id(channel), state); 606 607 return -EIO; 608 } 609 610 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 611 static void gsi_channel_reset_command(struct gsi_channel *channel) 612 { 613 struct device *dev = channel->gsi->dev; 614 enum gsi_channel_state state; 615 616 /* A short delay is required before a RESET command */ 617 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 618 619 state = gsi_channel_state(channel); 620 if (state != GSI_CHANNEL_STATE_STOPPED && 621 state != GSI_CHANNEL_STATE_ERROR) { 622 /* No need to reset a channel already in ALLOCATED state */ 623 if (state != GSI_CHANNEL_STATE_ALLOCATED) 624 dev_err(dev, "channel %u bad state %u before reset\n", 625 gsi_channel_id(channel), state); 626 return; 627 } 628 629 gsi_channel_command(channel, GSI_CH_RESET); 630 631 /* If successful the channel state will have changed */ 632 state = gsi_channel_state(channel); 633 if (state != GSI_CHANNEL_STATE_ALLOCATED) 634 dev_err(dev, "channel %u bad state %u after reset\n", 635 gsi_channel_id(channel), state); 636 } 637 638 /* Deallocate an ALLOCATED GSI channel */ 639 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 640 { 641 struct gsi_channel *channel = &gsi->channel[channel_id]; 642 struct device *dev = gsi->dev; 643 enum gsi_channel_state state; 644 645 state = gsi_channel_state(channel); 646 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 647 dev_err(dev, "channel %u bad state %u before dealloc\n", 648 channel_id, state); 649 return; 650 } 651 652 gsi_channel_command(channel, GSI_CH_DE_ALLOC); 653 654 /* If successful the channel state will have changed */ 655 state = gsi_channel_state(channel); 656 657 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 658 dev_err(dev, "channel %u bad state %u after dealloc\n", 659 channel_id, state); 660 } 661 662 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 663 * The index argument (modulo the ring count) is the first unfilled entry, so 664 * we supply one less than that with the doorbell. Update the event ring 665 * index field with the value provided. 666 */ 667 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 668 { 669 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 670 u32 val; 671 672 ring->index = index; /* Next unused entry */ 673 674 /* Note: index *must* be used modulo the ring count here */ 675 val = gsi_ring_addr(ring, (index - 1) % ring->count); 676 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 677 } 678 679 /* Program an event ring for use */ 680 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 681 { 682 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 683 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 684 u32 val; 685 686 /* We program all event rings as GPI type/protocol */ 687 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 688 val |= EV_INTYPE_FMASK; 689 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 690 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 691 692 val = u32_encode_bits(size, EV_R_LENGTH_FMASK); 693 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 694 695 /* The context 2 and 3 registers store the low-order and 696 * high-order 32 bits of the address of the event ring, 697 * respectively. 698 */ 699 val = evt_ring->ring.addr & GENMASK(31, 0); 700 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 701 702 val = evt_ring->ring.addr >> 32; 703 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 704 705 /* Enable interrupt moderation by setting the moderation delay */ 706 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 707 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 708 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 709 710 /* No MSI write data, and MSI address high and low address is 0 */ 711 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 712 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 713 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 714 715 /* We don't need to get event read pointer updates */ 716 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 717 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 718 719 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 720 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 721 } 722 723 /* Return the last (most recent) transaction completed on a channel. */ 724 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 725 { 726 struct gsi_trans_info *trans_info = &channel->trans_info; 727 struct gsi_trans *trans; 728 729 spin_lock_bh(&trans_info->spinlock); 730 731 if (!list_empty(&trans_info->complete)) 732 trans = list_last_entry(&trans_info->complete, 733 struct gsi_trans, links); 734 else if (!list_empty(&trans_info->polled)) 735 trans = list_last_entry(&trans_info->polled, 736 struct gsi_trans, links); 737 else 738 trans = NULL; 739 740 /* Caller will wait for this, so take a reference */ 741 if (trans) 742 refcount_inc(&trans->refcount); 743 744 spin_unlock_bh(&trans_info->spinlock); 745 746 return trans; 747 } 748 749 /* Wait for transaction activity on a channel to complete */ 750 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 751 { 752 struct gsi_trans *trans; 753 754 /* Get the last transaction, and wait for it to complete */ 755 trans = gsi_channel_trans_last(channel); 756 if (trans) { 757 wait_for_completion(&trans->completion); 758 gsi_trans_free(trans); 759 } 760 } 761 762 /* Stop channel activity. Transactions may not be allocated until thawed. */ 763 static void gsi_channel_freeze(struct gsi_channel *channel) 764 { 765 gsi_channel_trans_quiesce(channel); 766 767 napi_disable(&channel->napi); 768 769 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); 770 } 771 772 /* Allow transactions to be used on the channel again. */ 773 static void gsi_channel_thaw(struct gsi_channel *channel) 774 { 775 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 776 777 napi_enable(&channel->napi); 778 } 779 780 /* Program a channel for use */ 781 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 782 { 783 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 784 u32 channel_id = gsi_channel_id(channel); 785 union gsi_channel_scratch scr = { }; 786 struct gsi_channel_scratch_gpi *gpi; 787 struct gsi *gsi = channel->gsi; 788 u32 wrr_weight = 0; 789 u32 val; 790 791 /* Arbitrarily pick TRE 0 as the first channel element to use */ 792 channel->tre_ring.index = 0; 793 794 /* We program all channels as GPI type/protocol */ 795 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); 796 if (channel->toward_ipa) 797 val |= CHTYPE_DIR_FMASK; 798 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 799 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 800 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 801 802 val = u32_encode_bits(size, R_LENGTH_FMASK); 803 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 804 805 /* The context 2 and 3 registers store the low-order and 806 * high-order 32 bits of the address of the channel ring, 807 * respectively. 808 */ 809 val = channel->tre_ring.addr & GENMASK(31, 0); 810 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 811 812 val = channel->tre_ring.addr >> 32; 813 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 814 815 /* Command channel gets low weighted round-robin priority */ 816 if (channel->command) 817 wrr_weight = field_max(WRR_WEIGHT_FMASK); 818 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 819 820 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 821 822 /* We enable the doorbell engine for IPA v3.5.1 */ 823 if (gsi->version == IPA_VERSION_3_5_1 && doorbell) 824 val |= USE_DB_ENG_FMASK; 825 826 /* v4.0 introduces an escape buffer for prefetch. We use it 827 * on all but the AP command channel. 828 */ 829 if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { 830 /* If not otherwise set, prefetch buffers are used */ 831 if (gsi->version < IPA_VERSION_4_5) 832 val |= USE_ESCAPE_BUF_ONLY_FMASK; 833 else 834 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 835 PREFETCH_MODE_FMASK); 836 } 837 838 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 839 840 /* Now update the scratch registers for GPI protocol */ 841 gpi = &scr.gpi; 842 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 843 GSI_RING_ELEMENT_SIZE; 844 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 845 846 val = scr.data.word1; 847 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 848 849 val = scr.data.word2; 850 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 851 852 val = scr.data.word3; 853 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 854 855 /* We must preserve the upper 16 bits of the last scratch register. 856 * The next sequence assumes those bits remain unchanged between the 857 * read and the write. 858 */ 859 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 860 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 861 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 862 863 /* All done! */ 864 } 865 866 static void gsi_channel_deprogram(struct gsi_channel *channel) 867 { 868 /* Nothing to do */ 869 } 870 871 /* Start an allocated GSI channel */ 872 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 873 { 874 struct gsi_channel *channel = &gsi->channel[channel_id]; 875 int ret; 876 877 mutex_lock(&gsi->mutex); 878 879 ret = gsi_channel_start_command(channel); 880 881 mutex_unlock(&gsi->mutex); 882 883 gsi_channel_thaw(channel); 884 885 return ret; 886 } 887 888 /* Stop a started channel */ 889 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 890 { 891 struct gsi_channel *channel = &gsi->channel[channel_id]; 892 u32 retries = GSI_CHANNEL_STOP_RETRIES; 893 int ret; 894 895 gsi_channel_freeze(channel); 896 897 mutex_lock(&gsi->mutex); 898 899 do { 900 ret = gsi_channel_stop_command(channel); 901 if (ret != -EAGAIN) 902 break; 903 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); 904 } while (retries--); 905 906 mutex_unlock(&gsi->mutex); 907 908 /* Thaw the channel if we need to retry (or on error) */ 909 if (ret) 910 gsi_channel_thaw(channel); 911 912 return ret; 913 } 914 915 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 916 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 917 { 918 struct gsi_channel *channel = &gsi->channel[channel_id]; 919 920 mutex_lock(&gsi->mutex); 921 922 gsi_channel_reset_command(channel); 923 /* Due to a hardware quirk we may need to reset RX channels twice. */ 924 if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) 925 gsi_channel_reset_command(channel); 926 927 gsi_channel_program(channel, doorbell); 928 gsi_channel_trans_cancel_pending(channel); 929 930 mutex_unlock(&gsi->mutex); 931 } 932 933 /* Stop a STARTED channel for suspend (using stop if requested) */ 934 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 935 { 936 struct gsi_channel *channel = &gsi->channel[channel_id]; 937 938 if (stop) 939 return gsi_channel_stop(gsi, channel_id); 940 941 gsi_channel_freeze(channel); 942 943 return 0; 944 } 945 946 /* Resume a suspended channel (starting will be requested if STOPPED) */ 947 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 948 { 949 struct gsi_channel *channel = &gsi->channel[channel_id]; 950 951 if (start) 952 return gsi_channel_start(gsi, channel_id); 953 954 gsi_channel_thaw(channel); 955 956 return 0; 957 } 958 959 /** 960 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 961 * @channel: Channel for which to report 962 * 963 * Report to the network stack the number of bytes and transactions that 964 * have been queued to hardware since last call. This and the next function 965 * supply information used by the network stack for throttling. 966 * 967 * For each channel we track the number of transactions used and bytes of 968 * data those transactions represent. We also track what those values are 969 * each time this function is called. Subtracting the two tells us 970 * the number of bytes and transactions that have been added between 971 * successive calls. 972 * 973 * Calling this each time we ring the channel doorbell allows us to 974 * provide accurate information to the network stack about how much 975 * work we've given the hardware at any point in time. 976 */ 977 void gsi_channel_tx_queued(struct gsi_channel *channel) 978 { 979 u32 trans_count; 980 u32 byte_count; 981 982 byte_count = channel->byte_count - channel->queued_byte_count; 983 trans_count = channel->trans_count - channel->queued_trans_count; 984 channel->queued_byte_count = channel->byte_count; 985 channel->queued_trans_count = channel->trans_count; 986 987 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 988 trans_count, byte_count); 989 } 990 991 /** 992 * gsi_channel_tx_update() - Report completed TX transfers 993 * @channel: Channel that has completed transmitting packets 994 * @trans: Last transation known to be complete 995 * 996 * Compute the number of transactions and bytes that have been transferred 997 * over a TX channel since the given transaction was committed. Report this 998 * information to the network stack. 999 * 1000 * At the time a transaction is committed, we record its channel's 1001 * committed transaction and byte counts *in the transaction*. 1002 * Completions are signaled by the hardware with an interrupt, and 1003 * we can determine the latest completed transaction at that time. 1004 * 1005 * The difference between the byte/transaction count recorded in 1006 * the transaction and the count last time we recorded a completion 1007 * tells us exactly how much data has been transferred between 1008 * completions. 1009 * 1010 * Calling this each time we learn of a newly-completed transaction 1011 * allows us to provide accurate information to the network stack 1012 * about how much work has been completed by the hardware at a given 1013 * point in time. 1014 */ 1015 static void 1016 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 1017 { 1018 u64 byte_count = trans->byte_count + trans->len; 1019 u64 trans_count = trans->trans_count + 1; 1020 1021 byte_count -= channel->compl_byte_count; 1022 channel->compl_byte_count += byte_count; 1023 trans_count -= channel->compl_trans_count; 1024 channel->compl_trans_count += trans_count; 1025 1026 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 1027 trans_count, byte_count); 1028 } 1029 1030 /* Channel control interrupt handler */ 1031 static void gsi_isr_chan_ctrl(struct gsi *gsi) 1032 { 1033 u32 channel_mask; 1034 1035 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1036 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1037 1038 while (channel_mask) { 1039 u32 channel_id = __ffs(channel_mask); 1040 struct gsi_channel *channel; 1041 1042 channel_mask ^= BIT(channel_id); 1043 1044 channel = &gsi->channel[channel_id]; 1045 1046 complete(&channel->completion); 1047 } 1048 } 1049 1050 /* Event ring control interrupt handler */ 1051 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1052 { 1053 u32 event_mask; 1054 1055 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1056 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1057 1058 while (event_mask) { 1059 u32 evt_ring_id = __ffs(event_mask); 1060 struct gsi_evt_ring *evt_ring; 1061 1062 event_mask ^= BIT(evt_ring_id); 1063 1064 evt_ring = &gsi->evt_ring[evt_ring_id]; 1065 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 1066 1067 complete(&evt_ring->completion); 1068 } 1069 } 1070 1071 /* Global channel error interrupt handler */ 1072 static void 1073 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1074 { 1075 if (code == GSI_OUT_OF_RESOURCES) { 1076 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1077 complete(&gsi->channel[channel_id].completion); 1078 return; 1079 } 1080 1081 /* Report, but otherwise ignore all other error codes */ 1082 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1083 channel_id, err_ee, code); 1084 } 1085 1086 /* Global event error interrupt handler */ 1087 static void 1088 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1089 { 1090 if (code == GSI_OUT_OF_RESOURCES) { 1091 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1092 u32 channel_id = gsi_channel_id(evt_ring->channel); 1093 1094 complete(&evt_ring->completion); 1095 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1096 channel_id); 1097 return; 1098 } 1099 1100 /* Report, but otherwise ignore all other error codes */ 1101 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1102 evt_ring_id, err_ee, code); 1103 } 1104 1105 /* Global error interrupt handler */ 1106 static void gsi_isr_glob_err(struct gsi *gsi) 1107 { 1108 enum gsi_err_type type; 1109 enum gsi_err_code code; 1110 u32 which; 1111 u32 val; 1112 u32 ee; 1113 1114 /* Get the logged error, then reinitialize the log */ 1115 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1116 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1117 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1118 1119 ee = u32_get_bits(val, ERR_EE_FMASK); 1120 type = u32_get_bits(val, ERR_TYPE_FMASK); 1121 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1122 code = u32_get_bits(val, ERR_CODE_FMASK); 1123 1124 if (type == GSI_ERR_TYPE_CHAN) 1125 gsi_isr_glob_chan_err(gsi, ee, which, code); 1126 else if (type == GSI_ERR_TYPE_EVT) 1127 gsi_isr_glob_evt_err(gsi, ee, which, code); 1128 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1129 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1130 } 1131 1132 /* Generic EE interrupt handler */ 1133 static void gsi_isr_gp_int1(struct gsi *gsi) 1134 { 1135 u32 result; 1136 u32 val; 1137 1138 /* This interrupt is used to handle completions of the two GENERIC 1139 * GSI commands. We use these to allocate and halt channels on 1140 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1141 * allocated, the modem "owns" these channels, and as a result we 1142 * have no way of knowing the channel's state at any given time. 1143 * 1144 * It is recommended that we halt the modem channels we allocated 1145 * when shutting down, but it's possible the channel isn't running 1146 * at the time we issue the HALT command. We'll get an error in 1147 * that case, but it's harmless (the channel is already halted). 1148 * 1149 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1150 * if we receive it. 1151 */ 1152 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1153 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1154 1155 switch (result) { 1156 case GENERIC_EE_SUCCESS: 1157 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1158 gsi->result = 0; 1159 break; 1160 1161 case GENERIC_EE_RETRY: 1162 gsi->result = -EAGAIN; 1163 break; 1164 1165 default: 1166 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1167 gsi->result = -EIO; 1168 break; 1169 } 1170 1171 complete(&gsi->completion); 1172 } 1173 1174 /* Inter-EE interrupt handler */ 1175 static void gsi_isr_glob_ee(struct gsi *gsi) 1176 { 1177 u32 val; 1178 1179 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1180 1181 if (val & BIT(ERROR_INT)) 1182 gsi_isr_glob_err(gsi); 1183 1184 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1185 1186 val &= ~BIT(ERROR_INT); 1187 1188 if (val & BIT(GP_INT1)) { 1189 val ^= BIT(GP_INT1); 1190 gsi_isr_gp_int1(gsi); 1191 } 1192 1193 if (val) 1194 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1195 } 1196 1197 /* I/O completion interrupt event */ 1198 static void gsi_isr_ieob(struct gsi *gsi) 1199 { 1200 u32 event_mask; 1201 1202 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1203 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1204 1205 while (event_mask) { 1206 u32 evt_ring_id = __ffs(event_mask); 1207 1208 event_mask ^= BIT(evt_ring_id); 1209 1210 gsi_irq_ieob_disable(gsi, evt_ring_id); 1211 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1212 } 1213 } 1214 1215 /* General event interrupts represent serious problems, so report them */ 1216 static void gsi_isr_general(struct gsi *gsi) 1217 { 1218 struct device *dev = gsi->dev; 1219 u32 val; 1220 1221 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1222 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1223 1224 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1225 } 1226 1227 /** 1228 * gsi_isr() - Top level GSI interrupt service routine 1229 * @irq: Interrupt number (ignored) 1230 * @dev_id: GSI pointer supplied to request_irq() 1231 * 1232 * This is the main handler function registered for the GSI IRQ. Each type 1233 * of interrupt has a separate handler function that is called from here. 1234 */ 1235 static irqreturn_t gsi_isr(int irq, void *dev_id) 1236 { 1237 struct gsi *gsi = dev_id; 1238 u32 intr_mask; 1239 u32 cnt = 0; 1240 1241 /* enum gsi_irq_type_id defines GSI interrupt types */ 1242 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1243 /* intr_mask contains bitmask of pending GSI interrupts */ 1244 do { 1245 u32 gsi_intr = BIT(__ffs(intr_mask)); 1246 1247 intr_mask ^= gsi_intr; 1248 1249 switch (gsi_intr) { 1250 case BIT(GSI_CH_CTRL): 1251 gsi_isr_chan_ctrl(gsi); 1252 break; 1253 case BIT(GSI_EV_CTRL): 1254 gsi_isr_evt_ctrl(gsi); 1255 break; 1256 case BIT(GSI_GLOB_EE): 1257 gsi_isr_glob_ee(gsi); 1258 break; 1259 case BIT(GSI_IEOB): 1260 gsi_isr_ieob(gsi); 1261 break; 1262 case BIT(GSI_GENERAL): 1263 gsi_isr_general(gsi); 1264 break; 1265 default: 1266 dev_err(gsi->dev, 1267 "unrecognized interrupt type 0x%08x\n", 1268 gsi_intr); 1269 break; 1270 } 1271 } while (intr_mask); 1272 1273 if (++cnt > GSI_ISR_MAX_ITER) { 1274 dev_err(gsi->dev, "interrupt flood\n"); 1275 break; 1276 } 1277 } 1278 1279 return IRQ_HANDLED; 1280 } 1281 1282 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1283 { 1284 struct device *dev = &pdev->dev; 1285 unsigned int irq; 1286 int ret; 1287 1288 ret = platform_get_irq_byname(pdev, "gsi"); 1289 if (ret <= 0) { 1290 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); 1291 return ret ? : -EINVAL; 1292 } 1293 irq = ret; 1294 1295 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1296 if (ret) { 1297 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1298 return ret; 1299 } 1300 gsi->irq = irq; 1301 1302 return 0; 1303 } 1304 1305 static void gsi_irq_exit(struct gsi *gsi) 1306 { 1307 free_irq(gsi->irq, gsi); 1308 } 1309 1310 /* Return the transaction associated with a transfer completion event */ 1311 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1312 struct gsi_event *event) 1313 { 1314 u32 tre_offset; 1315 u32 tre_index; 1316 1317 /* Event xfer_ptr records the TRE it's associated with */ 1318 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); 1319 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1320 1321 return gsi_channel_trans_mapped(channel, tre_index); 1322 } 1323 1324 /** 1325 * gsi_evt_ring_rx_update() - Record lengths of received data 1326 * @evt_ring: Event ring associated with channel that received packets 1327 * @index: Event index in ring reported by hardware 1328 * 1329 * Events for RX channels contain the actual number of bytes received into 1330 * the buffer. Every event has a transaction associated with it, and here 1331 * we update transactions to record their actual received lengths. 1332 * 1333 * This function is called whenever we learn that the GSI hardware has filled 1334 * new events since the last time we checked. The ring's index field tells 1335 * the first entry in need of processing. The index provided is the 1336 * first *unfilled* event in the ring (following the last filled one). 1337 * 1338 * Events are sequential within the event ring, and transactions are 1339 * sequential within the transaction pool. 1340 * 1341 * Note that @index always refers to an element *within* the event ring. 1342 */ 1343 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1344 { 1345 struct gsi_channel *channel = evt_ring->channel; 1346 struct gsi_ring *ring = &evt_ring->ring; 1347 struct gsi_trans_info *trans_info; 1348 struct gsi_event *event_done; 1349 struct gsi_event *event; 1350 struct gsi_trans *trans; 1351 u32 byte_count = 0; 1352 u32 old_index; 1353 u32 event_avail; 1354 1355 trans_info = &channel->trans_info; 1356 1357 /* We'll start with the oldest un-processed event. RX channels 1358 * replenish receive buffers in single-TRE transactions, so we 1359 * can just map that event to its transaction. Transactions 1360 * associated with completion events are consecutive. 1361 */ 1362 old_index = ring->index; 1363 event = gsi_ring_virt(ring, old_index); 1364 trans = gsi_event_trans(channel, event); 1365 1366 /* Compute the number of events to process before we wrap, 1367 * and determine when we'll be done processing events. 1368 */ 1369 event_avail = ring->count - old_index % ring->count; 1370 event_done = gsi_ring_virt(ring, index); 1371 do { 1372 trans->len = __le16_to_cpu(event->len); 1373 byte_count += trans->len; 1374 1375 /* Move on to the next event and transaction */ 1376 if (--event_avail) 1377 event++; 1378 else 1379 event = gsi_ring_virt(ring, 0); 1380 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1381 } while (event != event_done); 1382 1383 /* We record RX bytes when they are received */ 1384 channel->byte_count += byte_count; 1385 channel->trans_count++; 1386 } 1387 1388 /* Initialize a ring, including allocating DMA memory for its entries */ 1389 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1390 { 1391 size_t size = count * GSI_RING_ELEMENT_SIZE; 1392 struct device *dev = gsi->dev; 1393 dma_addr_t addr; 1394 1395 /* Hardware requires a 2^n ring size, with alignment equal to size */ 1396 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1397 if (ring->virt && addr % size) { 1398 dma_free_coherent(dev, size, ring->virt, ring->addr); 1399 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", 1400 size); 1401 return -EINVAL; /* Not a good error value, but distinct */ 1402 } else if (!ring->virt) { 1403 return -ENOMEM; 1404 } 1405 ring->addr = addr; 1406 ring->count = count; 1407 1408 return 0; 1409 } 1410 1411 /* Free a previously-allocated ring */ 1412 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1413 { 1414 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1415 1416 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1417 } 1418 1419 /* Allocate an available event ring id */ 1420 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1421 { 1422 u32 evt_ring_id; 1423 1424 if (gsi->event_bitmap == ~0U) { 1425 dev_err(gsi->dev, "event rings exhausted\n"); 1426 return -ENOSPC; 1427 } 1428 1429 evt_ring_id = ffz(gsi->event_bitmap); 1430 gsi->event_bitmap |= BIT(evt_ring_id); 1431 1432 return (int)evt_ring_id; 1433 } 1434 1435 /* Free a previously-allocated event ring id */ 1436 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1437 { 1438 gsi->event_bitmap &= ~BIT(evt_ring_id); 1439 } 1440 1441 /* Ring a channel doorbell, reporting the first un-filled entry */ 1442 void gsi_channel_doorbell(struct gsi_channel *channel) 1443 { 1444 struct gsi_ring *tre_ring = &channel->tre_ring; 1445 u32 channel_id = gsi_channel_id(channel); 1446 struct gsi *gsi = channel->gsi; 1447 u32 val; 1448 1449 /* Note: index *must* be used modulo the ring count here */ 1450 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1451 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1452 } 1453 1454 /* Consult hardware, move any newly completed transactions to completed list */ 1455 static void gsi_channel_update(struct gsi_channel *channel) 1456 { 1457 u32 evt_ring_id = channel->evt_ring_id; 1458 struct gsi *gsi = channel->gsi; 1459 struct gsi_evt_ring *evt_ring; 1460 struct gsi_trans *trans; 1461 struct gsi_ring *ring; 1462 u32 offset; 1463 u32 index; 1464 1465 evt_ring = &gsi->evt_ring[evt_ring_id]; 1466 ring = &evt_ring->ring; 1467 1468 /* See if there's anything new to process; if not, we're done. Note 1469 * that index always refers to an entry *within* the event ring. 1470 */ 1471 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1472 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1473 if (index == ring->index % ring->count) 1474 return; 1475 1476 /* Get the transaction for the latest completed event. Take a 1477 * reference to keep it from completing before we give the events 1478 * for this and previous transactions back to the hardware. 1479 */ 1480 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1481 refcount_inc(&trans->refcount); 1482 1483 /* For RX channels, update each completed transaction with the number 1484 * of bytes that were actually received. For TX channels, report 1485 * the number of transactions and bytes this completion represents 1486 * up the network stack. 1487 */ 1488 if (channel->toward_ipa) 1489 gsi_channel_tx_update(channel, trans); 1490 else 1491 gsi_evt_ring_rx_update(evt_ring, index); 1492 1493 gsi_trans_move_complete(trans); 1494 1495 /* Tell the hardware we've handled these events */ 1496 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1497 1498 gsi_trans_free(trans); 1499 } 1500 1501 /** 1502 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1503 * @channel: Channel to be polled 1504 * 1505 * Return: Transaction pointer, or null if none are available 1506 * 1507 * This function returns the first entry on a channel's completed transaction 1508 * list. If that list is empty, the hardware is consulted to determine 1509 * whether any new transactions have completed. If so, they're moved to the 1510 * completed list and the new first entry is returned. If there are no more 1511 * completed transactions, a null pointer is returned. 1512 */ 1513 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1514 { 1515 struct gsi_trans *trans; 1516 1517 /* Get the first transaction from the completed list */ 1518 trans = gsi_channel_trans_complete(channel); 1519 if (!trans) { 1520 /* List is empty; see if there's more to do */ 1521 gsi_channel_update(channel); 1522 trans = gsi_channel_trans_complete(channel); 1523 } 1524 1525 if (trans) 1526 gsi_trans_move_polled(trans); 1527 1528 return trans; 1529 } 1530 1531 /** 1532 * gsi_channel_poll() - NAPI poll function for a channel 1533 * @napi: NAPI structure for the channel 1534 * @budget: Budget supplied by NAPI core 1535 * 1536 * Return: Number of items polled (<= budget) 1537 * 1538 * Single transactions completed by hardware are polled until either 1539 * the budget is exhausted, or there are no more. Each transaction 1540 * polled is passed to gsi_trans_complete(), to perform remaining 1541 * completion processing and retire/free the transaction. 1542 */ 1543 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1544 { 1545 struct gsi_channel *channel; 1546 int count = 0; 1547 1548 channel = container_of(napi, struct gsi_channel, napi); 1549 while (count < budget) { 1550 struct gsi_trans *trans; 1551 1552 count++; 1553 trans = gsi_channel_poll_one(channel); 1554 if (!trans) 1555 break; 1556 gsi_trans_complete(trans); 1557 } 1558 1559 if (count < budget) { 1560 napi_complete(&channel->napi); 1561 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 1562 } 1563 1564 return count; 1565 } 1566 1567 /* The event bitmap represents which event ids are available for allocation. 1568 * Set bits are not available, clear bits can be used. This function 1569 * initializes the map so all events supported by the hardware are available, 1570 * then precludes any reserved events from being allocated. 1571 */ 1572 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1573 { 1574 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1575 1576 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1577 1578 return event_bitmap; 1579 } 1580 1581 /* Setup function for event rings */ 1582 static void gsi_evt_ring_setup(struct gsi *gsi) 1583 { 1584 /* Nothing to do */ 1585 } 1586 1587 /* Inverse of gsi_evt_ring_setup() */ 1588 static void gsi_evt_ring_teardown(struct gsi *gsi) 1589 { 1590 /* Nothing to do */ 1591 } 1592 1593 /* Setup function for a single channel */ 1594 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1595 { 1596 struct gsi_channel *channel = &gsi->channel[channel_id]; 1597 u32 evt_ring_id = channel->evt_ring_id; 1598 int ret; 1599 1600 if (!channel->gsi) 1601 return 0; /* Ignore uninitialized channels */ 1602 1603 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1604 if (ret) 1605 return ret; 1606 1607 gsi_evt_ring_program(gsi, evt_ring_id); 1608 1609 ret = gsi_channel_alloc_command(gsi, channel_id); 1610 if (ret) 1611 goto err_evt_ring_de_alloc; 1612 1613 gsi_channel_program(channel, true); 1614 1615 if (channel->toward_ipa) 1616 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1617 gsi_channel_poll, NAPI_POLL_WEIGHT); 1618 else 1619 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1620 gsi_channel_poll, NAPI_POLL_WEIGHT); 1621 1622 return 0; 1623 1624 err_evt_ring_de_alloc: 1625 /* We've done nothing with the event ring yet so don't reset */ 1626 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1627 1628 return ret; 1629 } 1630 1631 /* Inverse of gsi_channel_setup_one() */ 1632 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1633 { 1634 struct gsi_channel *channel = &gsi->channel[channel_id]; 1635 u32 evt_ring_id = channel->evt_ring_id; 1636 1637 if (!channel->gsi) 1638 return; /* Ignore uninitialized channels */ 1639 1640 netif_napi_del(&channel->napi); 1641 1642 gsi_channel_deprogram(channel); 1643 gsi_channel_de_alloc_command(gsi, channel_id); 1644 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1645 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1646 } 1647 1648 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1649 enum gsi_generic_cmd_opcode opcode) 1650 { 1651 struct completion *completion = &gsi->completion; 1652 bool timeout; 1653 u32 val; 1654 1655 /* The error global interrupt type is always enabled (until we 1656 * teardown), so we won't change that. A generic EE command 1657 * completes with a GSI global interrupt of type GP_INT1. We 1658 * only perform one generic command at a time (to allocate or 1659 * halt a modem channel) and only from this function. So we 1660 * enable the GP_INT1 IRQ type here while we're expecting it. 1661 */ 1662 val = BIT(ERROR_INT) | BIT(GP_INT1); 1663 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1664 1665 /* First zero the result code field */ 1666 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1667 val &= ~GENERIC_EE_RESULT_FMASK; 1668 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1669 1670 /* Now issue the command */ 1671 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1672 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1673 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1674 1675 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1676 1677 /* Disable the GP_INT1 IRQ type again */ 1678 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1679 1680 if (!timeout) 1681 return gsi->result; 1682 1683 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1684 opcode, channel_id); 1685 1686 return -ETIMEDOUT; 1687 } 1688 1689 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1690 { 1691 return gsi_generic_command(gsi, channel_id, 1692 GSI_GENERIC_ALLOCATE_CHANNEL); 1693 } 1694 1695 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1696 { 1697 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1698 int ret; 1699 1700 do 1701 ret = gsi_generic_command(gsi, channel_id, 1702 GSI_GENERIC_HALT_CHANNEL); 1703 while (ret == -EAGAIN && retries--); 1704 1705 if (ret) 1706 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1707 ret, channel_id); 1708 } 1709 1710 /* Setup function for channels */ 1711 static int gsi_channel_setup(struct gsi *gsi) 1712 { 1713 u32 channel_id = 0; 1714 u32 mask; 1715 int ret; 1716 1717 gsi_evt_ring_setup(gsi); 1718 gsi_irq_enable(gsi); 1719 1720 mutex_lock(&gsi->mutex); 1721 1722 do { 1723 ret = gsi_channel_setup_one(gsi, channel_id); 1724 if (ret) 1725 goto err_unwind; 1726 } while (++channel_id < gsi->channel_count); 1727 1728 /* Make sure no channels were defined that hardware does not support */ 1729 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1730 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1731 1732 if (!channel->gsi) 1733 continue; /* Ignore uninitialized channels */ 1734 1735 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1736 channel_id - 1); 1737 channel_id = gsi->channel_count; 1738 goto err_unwind; 1739 } 1740 1741 /* Allocate modem channels if necessary */ 1742 mask = gsi->modem_channel_bitmap; 1743 while (mask) { 1744 u32 modem_channel_id = __ffs(mask); 1745 1746 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1747 if (ret) 1748 goto err_unwind_modem; 1749 1750 /* Clear bit from mask only after success (for unwind) */ 1751 mask ^= BIT(modem_channel_id); 1752 } 1753 1754 mutex_unlock(&gsi->mutex); 1755 1756 return 0; 1757 1758 err_unwind_modem: 1759 /* Compute which modem channels need to be deallocated */ 1760 mask ^= gsi->modem_channel_bitmap; 1761 while (mask) { 1762 channel_id = __fls(mask); 1763 1764 mask ^= BIT(channel_id); 1765 1766 gsi_modem_channel_halt(gsi, channel_id); 1767 } 1768 1769 err_unwind: 1770 while (channel_id--) 1771 gsi_channel_teardown_one(gsi, channel_id); 1772 1773 mutex_unlock(&gsi->mutex); 1774 1775 gsi_irq_disable(gsi); 1776 gsi_evt_ring_teardown(gsi); 1777 1778 return ret; 1779 } 1780 1781 /* Inverse of gsi_channel_setup() */ 1782 static void gsi_channel_teardown(struct gsi *gsi) 1783 { 1784 u32 mask = gsi->modem_channel_bitmap; 1785 u32 channel_id; 1786 1787 mutex_lock(&gsi->mutex); 1788 1789 while (mask) { 1790 channel_id = __fls(mask); 1791 1792 mask ^= BIT(channel_id); 1793 1794 gsi_modem_channel_halt(gsi, channel_id); 1795 } 1796 1797 channel_id = gsi->channel_count - 1; 1798 do 1799 gsi_channel_teardown_one(gsi, channel_id); 1800 while (channel_id--); 1801 1802 mutex_unlock(&gsi->mutex); 1803 1804 gsi_irq_disable(gsi); 1805 gsi_evt_ring_teardown(gsi); 1806 } 1807 1808 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1809 int gsi_setup(struct gsi *gsi) 1810 { 1811 struct device *dev = gsi->dev; 1812 u32 val; 1813 int ret; 1814 1815 /* Here is where we first touch the GSI hardware */ 1816 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1817 if (!(val & ENABLED_FMASK)) { 1818 dev_err(dev, "GSI has not been enabled\n"); 1819 return -EIO; 1820 } 1821 1822 gsi_irq_setup(gsi); 1823 1824 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1825 1826 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1827 if (!gsi->channel_count) { 1828 dev_err(dev, "GSI reports zero channels supported\n"); 1829 return -EINVAL; 1830 } 1831 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1832 dev_warn(dev, 1833 "limiting to %u channels; hardware supports %u\n", 1834 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1835 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1836 } 1837 1838 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1839 if (!gsi->evt_ring_count) { 1840 dev_err(dev, "GSI reports zero event rings supported\n"); 1841 return -EINVAL; 1842 } 1843 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1844 dev_warn(dev, 1845 "limiting to %u event rings; hardware supports %u\n", 1846 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1847 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1848 } 1849 1850 /* Initialize the error log */ 1851 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1852 1853 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1854 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1855 1856 ret = gsi_channel_setup(gsi); 1857 if (ret) 1858 gsi_irq_teardown(gsi); 1859 1860 return ret; 1861 } 1862 1863 /* Inverse of gsi_setup() */ 1864 void gsi_teardown(struct gsi *gsi) 1865 { 1866 gsi_channel_teardown(gsi); 1867 gsi_irq_teardown(gsi); 1868 } 1869 1870 /* Initialize a channel's event ring */ 1871 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1872 { 1873 struct gsi *gsi = channel->gsi; 1874 struct gsi_evt_ring *evt_ring; 1875 int ret; 1876 1877 ret = gsi_evt_ring_id_alloc(gsi); 1878 if (ret < 0) 1879 return ret; 1880 channel->evt_ring_id = ret; 1881 1882 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1883 evt_ring->channel = channel; 1884 1885 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1886 if (!ret) 1887 return 0; /* Success! */ 1888 1889 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1890 ret, gsi_channel_id(channel)); 1891 1892 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1893 1894 return ret; 1895 } 1896 1897 /* Inverse of gsi_channel_evt_ring_init() */ 1898 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1899 { 1900 u32 evt_ring_id = channel->evt_ring_id; 1901 struct gsi *gsi = channel->gsi; 1902 struct gsi_evt_ring *evt_ring; 1903 1904 evt_ring = &gsi->evt_ring[evt_ring_id]; 1905 gsi_ring_free(gsi, &evt_ring->ring); 1906 gsi_evt_ring_id_free(gsi, evt_ring_id); 1907 } 1908 1909 /* Init function for event rings */ 1910 static void gsi_evt_ring_init(struct gsi *gsi) 1911 { 1912 u32 evt_ring_id = 0; 1913 1914 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1915 gsi->ieob_enabled_bitmap = 0; 1916 do 1917 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1918 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1919 } 1920 1921 /* Inverse of gsi_evt_ring_init() */ 1922 static void gsi_evt_ring_exit(struct gsi *gsi) 1923 { 1924 /* Nothing to do */ 1925 } 1926 1927 static bool gsi_channel_data_valid(struct gsi *gsi, 1928 const struct ipa_gsi_endpoint_data *data) 1929 { 1930 #ifdef IPA_VALIDATION 1931 u32 channel_id = data->channel_id; 1932 struct device *dev = gsi->dev; 1933 1934 /* Make sure channel ids are in the range driver supports */ 1935 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1936 dev_err(dev, "bad channel id %u; must be less than %u\n", 1937 channel_id, GSI_CHANNEL_COUNT_MAX); 1938 return false; 1939 } 1940 1941 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1942 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1943 return false; 1944 } 1945 1946 if (!data->channel.tlv_count || 1947 data->channel.tlv_count > GSI_TLV_MAX) { 1948 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1949 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1950 return false; 1951 } 1952 1953 /* We have to allow at least one maximally-sized transaction to 1954 * be outstanding (which would use tlv_count TREs). Given how 1955 * gsi_channel_tre_max() is computed, tre_count has to be almost 1956 * twice the TLV FIFO size to satisfy this requirement. 1957 */ 1958 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1959 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1960 channel_id, data->channel.tlv_count, 1961 data->channel.tre_count); 1962 return false; 1963 } 1964 1965 if (!is_power_of_2(data->channel.tre_count)) { 1966 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 1967 channel_id, data->channel.tre_count); 1968 return false; 1969 } 1970 1971 if (!is_power_of_2(data->channel.event_count)) { 1972 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 1973 channel_id, data->channel.event_count); 1974 return false; 1975 } 1976 #endif /* IPA_VALIDATION */ 1977 1978 return true; 1979 } 1980 1981 /* Init function for a single channel */ 1982 static int gsi_channel_init_one(struct gsi *gsi, 1983 const struct ipa_gsi_endpoint_data *data, 1984 bool command) 1985 { 1986 struct gsi_channel *channel; 1987 u32 tre_count; 1988 int ret; 1989 1990 if (!gsi_channel_data_valid(gsi, data)) 1991 return -EINVAL; 1992 1993 /* Worst case we need an event for every outstanding TRE */ 1994 if (data->channel.tre_count > data->channel.event_count) { 1995 tre_count = data->channel.event_count; 1996 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1997 data->channel_id, tre_count); 1998 } else { 1999 tre_count = data->channel.tre_count; 2000 } 2001 2002 channel = &gsi->channel[data->channel_id]; 2003 memset(channel, 0, sizeof(*channel)); 2004 2005 channel->gsi = gsi; 2006 channel->toward_ipa = data->toward_ipa; 2007 channel->command = command; 2008 channel->tlv_count = data->channel.tlv_count; 2009 channel->tre_count = tre_count; 2010 channel->event_count = data->channel.event_count; 2011 init_completion(&channel->completion); 2012 2013 ret = gsi_channel_evt_ring_init(channel); 2014 if (ret) 2015 goto err_clear_gsi; 2016 2017 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 2018 if (ret) { 2019 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 2020 ret, data->channel_id); 2021 goto err_channel_evt_ring_exit; 2022 } 2023 2024 ret = gsi_channel_trans_init(gsi, data->channel_id); 2025 if (ret) 2026 goto err_ring_free; 2027 2028 if (command) { 2029 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 2030 2031 ret = ipa_cmd_pool_init(channel, tre_max); 2032 } 2033 if (!ret) 2034 return 0; /* Success! */ 2035 2036 gsi_channel_trans_exit(channel); 2037 err_ring_free: 2038 gsi_ring_free(gsi, &channel->tre_ring); 2039 err_channel_evt_ring_exit: 2040 gsi_channel_evt_ring_exit(channel); 2041 err_clear_gsi: 2042 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2043 2044 return ret; 2045 } 2046 2047 /* Inverse of gsi_channel_init_one() */ 2048 static void gsi_channel_exit_one(struct gsi_channel *channel) 2049 { 2050 if (!channel->gsi) 2051 return; /* Ignore uninitialized channels */ 2052 2053 if (channel->command) 2054 ipa_cmd_pool_exit(channel); 2055 gsi_channel_trans_exit(channel); 2056 gsi_ring_free(channel->gsi, &channel->tre_ring); 2057 gsi_channel_evt_ring_exit(channel); 2058 } 2059 2060 /* Init function for channels */ 2061 static int gsi_channel_init(struct gsi *gsi, u32 count, 2062 const struct ipa_gsi_endpoint_data *data) 2063 { 2064 bool modem_alloc; 2065 int ret = 0; 2066 u32 i; 2067 2068 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2069 modem_alloc = gsi->version == IPA_VERSION_4_2; 2070 2071 gsi_evt_ring_init(gsi); 2072 2073 /* The endpoint data array is indexed by endpoint name */ 2074 for (i = 0; i < count; i++) { 2075 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2076 2077 if (ipa_gsi_endpoint_data_empty(&data[i])) 2078 continue; /* Skip over empty slots */ 2079 2080 /* Mark modem channels to be allocated (hardware workaround) */ 2081 if (data[i].ee_id == GSI_EE_MODEM) { 2082 if (modem_alloc) 2083 gsi->modem_channel_bitmap |= 2084 BIT(data[i].channel_id); 2085 continue; 2086 } 2087 2088 ret = gsi_channel_init_one(gsi, &data[i], command); 2089 if (ret) 2090 goto err_unwind; 2091 } 2092 2093 return ret; 2094 2095 err_unwind: 2096 while (i--) { 2097 if (ipa_gsi_endpoint_data_empty(&data[i])) 2098 continue; 2099 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2100 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2101 continue; 2102 } 2103 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2104 } 2105 gsi_evt_ring_exit(gsi); 2106 2107 return ret; 2108 } 2109 2110 /* Inverse of gsi_channel_init() */ 2111 static void gsi_channel_exit(struct gsi *gsi) 2112 { 2113 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2114 2115 do 2116 gsi_channel_exit_one(&gsi->channel[channel_id]); 2117 while (channel_id--); 2118 gsi->modem_channel_bitmap = 0; 2119 2120 gsi_evt_ring_exit(gsi); 2121 } 2122 2123 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2124 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2125 enum ipa_version version, u32 count, 2126 const struct ipa_gsi_endpoint_data *data) 2127 { 2128 struct device *dev = &pdev->dev; 2129 struct resource *res; 2130 resource_size_t size; 2131 u32 adjust; 2132 int ret; 2133 2134 gsi_validate_build(); 2135 2136 gsi->dev = dev; 2137 gsi->version = version; 2138 2139 /* The GSI layer performs NAPI on all endpoints. NAPI requires a 2140 * network device structure, but the GSI layer does not have one, 2141 * so we must create a dummy network device for this purpose. 2142 */ 2143 init_dummy_netdev(&gsi->dummy_dev); 2144 2145 /* Get GSI memory range and map it */ 2146 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2147 if (!res) { 2148 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2149 return -ENODEV; 2150 } 2151 2152 size = resource_size(res); 2153 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2154 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2155 return -EINVAL; 2156 } 2157 2158 /* Make sure we can make our pointer adjustment if necessary */ 2159 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2160 if (res->start < adjust) { 2161 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2162 adjust); 2163 return -EINVAL; 2164 } 2165 2166 gsi->virt = ioremap(res->start, size); 2167 if (!gsi->virt) { 2168 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2169 return -ENOMEM; 2170 } 2171 /* Adjust register range pointer downward for newer IPA versions */ 2172 gsi->virt -= adjust; 2173 2174 init_completion(&gsi->completion); 2175 2176 ret = gsi_irq_init(gsi, pdev); 2177 if (ret) 2178 goto err_iounmap; 2179 2180 ret = gsi_channel_init(gsi, count, data); 2181 if (ret) 2182 goto err_irq_exit; 2183 2184 mutex_init(&gsi->mutex); 2185 2186 return 0; 2187 2188 err_irq_exit: 2189 gsi_irq_exit(gsi); 2190 err_iounmap: 2191 iounmap(gsi->virt); 2192 2193 return ret; 2194 } 2195 2196 /* Inverse of gsi_init() */ 2197 void gsi_exit(struct gsi *gsi) 2198 { 2199 mutex_destroy(&gsi->mutex); 2200 gsi_channel_exit(gsi); 2201 gsi_irq_exit(gsi); 2202 iounmap(gsi->virt); 2203 } 2204 2205 /* The maximum number of outstanding TREs on a channel. This limits 2206 * a channel's maximum number of transactions outstanding (worst case 2207 * is one TRE per transaction). 2208 * 2209 * The absolute limit is the number of TREs in the channel's TRE ring, 2210 * and in theory we should be able use all of them. But in practice, 2211 * doing that led to the hardware reporting exhaustion of event ring 2212 * slots for writing completion information. So the hardware limit 2213 * would be (tre_count - 1). 2214 * 2215 * We reduce it a bit further though. Transaction resource pools are 2216 * sized to be a little larger than this maximum, to allow resource 2217 * allocations to always be contiguous. The number of entries in a 2218 * TRE ring buffer is a power of 2, and the extra resources in a pool 2219 * tends to nearly double the memory allocated for it. Reducing the 2220 * maximum number of outstanding TREs allows the number of entries in 2221 * a pool to avoid crossing that power-of-2 boundary, and this can 2222 * substantially reduce pool memory requirements. The number we 2223 * reduce it by matches the number added in gsi_trans_pool_init(). 2224 */ 2225 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2226 { 2227 struct gsi_channel *channel = &gsi->channel[channel_id]; 2228 2229 /* Hardware limit is channel->tre_count - 1 */ 2230 return channel->tre_count - (channel->tlv_count - 1); 2231 } 2232 2233 /* Returns the maximum number of TREs in a single transaction for a channel */ 2234 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2235 { 2236 struct gsi_channel *channel = &gsi->channel[channel_id]; 2237 2238 return channel->tlv_count; 2239 } 2240