1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 #include "ipa_version.h" 25 26 /** 27 * DOC: The IPA Generic Software Interface 28 * 29 * The generic software interface (GSI) is an integral component of the IPA, 30 * providing a well-defined communication layer between the AP subsystem 31 * and the IPA core. The modem uses the GSI layer as well. 32 * 33 * -------- --------- 34 * | | | | 35 * | AP +<---. .----+ Modem | 36 * | +--. | | .->+ | 37 * | | | | | | | | 38 * -------- | | | | --------- 39 * v | v | 40 * --+-+---+-+-- 41 * | GSI | 42 * |-----------| 43 * | | 44 * | IPA | 45 * | | 46 * ------------- 47 * 48 * In the above diagram, the AP and Modem represent "execution environments" 49 * (EEs), which are independent operating environments that use the IPA for 50 * data transfer. 51 * 52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 53 * of data to or from the IPA. A channel is implemented as a ring buffer, 54 * with a DRAM-resident array of "transfer elements" (TREs) available to 55 * describe transfers to or from other EEs through the IPA. A transfer 56 * element can also contain an immediate command, requesting the IPA perform 57 * actions other than data transfer. 58 * 59 * Each TRE refers to a block of data--also located DRAM. After writing one 60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 61 * doorbell register to inform the receiving side how many elements have 62 * been written. 63 * 64 * Each channel has a GSI "event ring" associated with it. An event ring 65 * is implemented very much like a channel ring, but is always directed from 66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 67 * events by adding an entry to the event ring associated with the channel. 68 * The GSI then writes its doorbell for the event ring, causing the target 69 * EE to be interrupted. Each entry in an event ring contains a pointer 70 * to the channel TRE whose completion the event represents. 71 * 72 * Each TRE in a channel ring has a set of flags. One flag indicates whether 73 * the completion of the transfer operation generates an entry (and possibly 74 * an interrupt) in the channel's event ring. Other flags allow transfer 75 * elements to be chained together, forming a single logical transaction. 76 * TRE flags are used to control whether and when interrupts are generated 77 * to signal completion of channel transfers. 78 * 79 * Elements in channel and event rings are completed (or consumed) strictly 80 * in order. Completion of one entry implies the completion of all preceding 81 * entries. A single completion interrupt can therefore communicate the 82 * completion of many transfers. 83 * 84 * Note that all GSI registers are little-endian, which is the assumed 85 * endianness of I/O space accesses. The accessor functions perform byte 86 * swapping if needed (i.e., for a big endian CPU). 87 */ 88 89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 90 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 91 92 #define GSI_CMD_TIMEOUT 5 /* seconds */ 93 94 #define GSI_CHANNEL_STOP_RX_RETRIES 10 95 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 96 97 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 98 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 99 100 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 101 102 /* An entry in an event ring */ 103 struct gsi_event { 104 __le64 xfer_ptr; 105 __le16 len; 106 u8 reserved1; 107 u8 code; 108 __le16 reserved2; 109 u8 type; 110 u8 chid; 111 }; 112 113 /** gsi_channel_scratch_gpi - GPI protocol scratch register 114 * @max_outstanding_tre: 115 * Defines the maximum number of TREs allowed in a single transaction 116 * on a channel (in bytes). This determines the amount of prefetch 117 * performed by the hardware. We configure this to equal the size of 118 * the TLV FIFO for the channel. 119 * @outstanding_threshold: 120 * Defines the threshold (in bytes) determining when the sequencer 121 * should update the channel doorbell. We configure this to equal 122 * the size of two TREs. 123 */ 124 struct gsi_channel_scratch_gpi { 125 u64 reserved1; 126 u16 reserved2; 127 u16 max_outstanding_tre; 128 u16 reserved3; 129 u16 outstanding_threshold; 130 }; 131 132 /** gsi_channel_scratch - channel scratch configuration area 133 * 134 * The exact interpretation of this register is protocol-specific. 135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 136 */ 137 union gsi_channel_scratch { 138 struct gsi_channel_scratch_gpi gpi; 139 struct { 140 u32 word1; 141 u32 word2; 142 u32 word3; 143 u32 word4; 144 } data; 145 }; 146 147 /* Check things that can be validated at build time. */ 148 static void gsi_validate_build(void) 149 { 150 /* This is used as a divisor */ 151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 152 153 /* Code assumes the size of channel and event ring element are 154 * the same (and fixed). Make sure the size of an event ring 155 * element is what's expected. 156 */ 157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 158 159 /* Hardware requires a 2^n ring size. We ensure the number of 160 * elements in an event ring is a power of 2 elsewhere; this 161 * ensure the elements themselves meet the requirement. 162 */ 163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 164 165 /* The channel element size must fit in this field */ 166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 167 168 /* The event ring element size must fit in this field */ 169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 170 } 171 172 /* Return the channel id associated with a given channel */ 173 static u32 gsi_channel_id(struct gsi_channel *channel) 174 { 175 return channel - &channel->gsi->channel[0]; 176 } 177 178 /* Update the GSI IRQ type register with the cached value */ 179 static void gsi_irq_type_update(struct gsi *gsi, u32 val) 180 { 181 gsi->type_enabled_bitmap = val; 182 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 183 } 184 185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) 186 { 187 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); 188 } 189 190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) 191 { 192 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); 193 } 194 195 /* Turn off all GSI interrupts initially */ 196 static void gsi_irq_setup(struct gsi *gsi) 197 { 198 u32 adjust; 199 200 /* Disable all interrupt types */ 201 gsi_irq_type_update(gsi, 0); 202 203 /* Clear all type-specific interrupt masks */ 204 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 205 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 206 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 207 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 208 209 /* Reverse the offset adjustment for inter-EE register offsets */ 210 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 211 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); 212 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); 213 214 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 215 } 216 217 /* Turn off all GSI interrupts when we're all done */ 218 static void gsi_irq_teardown(struct gsi *gsi) 219 { 220 /* Nothing to do */ 221 } 222 223 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) 224 { 225 bool enable_ieob = !gsi->ieob_enabled_bitmap; 226 u32 val; 227 228 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); 229 val = gsi->ieob_enabled_bitmap; 230 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 231 232 /* Enable the interrupt type if this is the first channel enabled */ 233 if (enable_ieob) 234 gsi_irq_type_enable(gsi, GSI_IEOB); 235 } 236 237 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) 238 { 239 u32 val; 240 241 gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); 242 243 /* Disable the interrupt type if this was the last enabled channel */ 244 if (!gsi->ieob_enabled_bitmap) 245 gsi_irq_type_disable(gsi, GSI_IEOB); 246 247 val = gsi->ieob_enabled_bitmap; 248 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 249 } 250 251 /* Enable all GSI_interrupt types */ 252 static void gsi_irq_enable(struct gsi *gsi) 253 { 254 u32 val; 255 256 /* Global interrupts include hardware error reports. Enable 257 * that so we can at least report the error should it occur. 258 */ 259 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 260 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); 261 262 /* General GSI interrupts are reported to all EEs; if they occur 263 * they are unrecoverable (without reset). A breakpoint interrupt 264 * also exists, but we don't support that. We want to be notified 265 * of errors so we can report them, even if they can't be handled. 266 */ 267 val = BIT(BUS_ERROR); 268 val |= BIT(CMD_FIFO_OVRFLOW); 269 val |= BIT(MCS_STACK_OVRFLOW); 270 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 271 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); 272 } 273 274 /* Disable all GSI interrupt types */ 275 static void gsi_irq_disable(struct gsi *gsi) 276 { 277 gsi_irq_type_update(gsi, 0); 278 279 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ 280 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 281 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 282 } 283 284 /* Return the virtual address associated with a ring index */ 285 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 286 { 287 /* Note: index *must* be used modulo the ring count here */ 288 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 289 } 290 291 /* Return the 32-bit DMA address associated with a ring index */ 292 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 293 { 294 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; 295 } 296 297 /* Return the ring index of a 32-bit ring offset */ 298 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 299 { 300 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 301 } 302 303 /* Issue a GSI command by writing a value to a register, then wait for 304 * completion to be signaled. Returns true if the command completes 305 * or false if it times out. 306 */ 307 static bool 308 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 309 { 310 reinit_completion(completion); 311 312 iowrite32(val, gsi->virt + reg); 313 314 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ); 315 } 316 317 /* Return the hardware's notion of the current state of an event ring */ 318 static enum gsi_evt_ring_state 319 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 320 { 321 u32 val; 322 323 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 324 325 return u32_get_bits(val, EV_CHSTATE_FMASK); 326 } 327 328 /* Issue an event ring command and wait for it to complete */ 329 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 330 enum gsi_evt_cmd_opcode opcode) 331 { 332 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 333 struct completion *completion = &evt_ring->completion; 334 struct device *dev = gsi->dev; 335 bool success; 336 u32 val; 337 338 /* We only perform one event ring command at a time, and event 339 * control interrupts should only occur when such a command 340 * is issued here. Only permit *this* event ring to trigger 341 * an interrupt, and only enable the event control IRQ type 342 * when we expect it to occur. 343 */ 344 val = BIT(evt_ring_id); 345 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 346 gsi_irq_type_enable(gsi, GSI_EV_CTRL); 347 348 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 349 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 350 351 success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); 352 353 /* Disable the interrupt again */ 354 gsi_irq_type_disable(gsi, GSI_EV_CTRL); 355 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 356 357 if (success) 358 return 0; 359 360 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", 361 opcode, evt_ring_id, evt_ring->state); 362 363 return -ETIMEDOUT; 364 } 365 366 /* Allocate an event ring in NOT_ALLOCATED state */ 367 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 368 { 369 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 370 int ret; 371 372 /* Get initial event ring state */ 373 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 374 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { 375 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", 376 evt_ring_id, evt_ring->state); 377 return -EINVAL; 378 } 379 380 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 381 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 382 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", 383 evt_ring_id, evt_ring->state); 384 ret = -EIO; 385 } 386 387 return ret; 388 } 389 390 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 391 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 392 { 393 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 394 enum gsi_evt_ring_state state = evt_ring->state; 395 int ret; 396 397 if (state != GSI_EVT_RING_STATE_ALLOCATED && 398 state != GSI_EVT_RING_STATE_ERROR) { 399 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", 400 evt_ring_id, evt_ring->state); 401 return; 402 } 403 404 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 405 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) 406 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", 407 evt_ring_id, evt_ring->state); 408 } 409 410 /* Issue a hardware de-allocation request for an allocated event ring */ 411 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 412 { 413 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 414 int ret; 415 416 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 417 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", 418 evt_ring_id, evt_ring->state); 419 return; 420 } 421 422 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 423 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) 424 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", 425 evt_ring_id, evt_ring->state); 426 } 427 428 /* Fetch the current state of a channel from hardware */ 429 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 430 { 431 u32 channel_id = gsi_channel_id(channel); 432 void *virt = channel->gsi->virt; 433 u32 val; 434 435 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 436 437 return u32_get_bits(val, CHSTATE_FMASK); 438 } 439 440 /* Issue a channel command and wait for it to complete */ 441 static int 442 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 443 { 444 struct completion *completion = &channel->completion; 445 u32 channel_id = gsi_channel_id(channel); 446 struct gsi *gsi = channel->gsi; 447 struct device *dev = gsi->dev; 448 bool success; 449 u32 val; 450 451 /* We only perform one channel command at a time, and channel 452 * control interrupts should only occur when such a command is 453 * issued here. So we only permit *this* channel to trigger 454 * an interrupt and only enable the channel control IRQ type 455 * when we expect it to occur. 456 */ 457 val = BIT(channel_id); 458 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 459 gsi_irq_type_enable(gsi, GSI_CH_CTRL); 460 461 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 462 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 463 success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); 464 465 /* Disable the interrupt again */ 466 gsi_irq_type_disable(gsi, GSI_CH_CTRL); 467 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 468 469 if (success) 470 return 0; 471 472 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", 473 opcode, channel_id, gsi_channel_state(channel)); 474 475 return -ETIMEDOUT; 476 } 477 478 /* Allocate GSI channel in NOT_ALLOCATED state */ 479 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 480 { 481 struct gsi_channel *channel = &gsi->channel[channel_id]; 482 struct device *dev = gsi->dev; 483 enum gsi_channel_state state; 484 int ret; 485 486 /* Get initial channel state */ 487 state = gsi_channel_state(channel); 488 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { 489 dev_err(dev, "channel %u bad state %u before alloc\n", 490 channel_id, state); 491 return -EINVAL; 492 } 493 494 ret = gsi_channel_command(channel, GSI_CH_ALLOCATE); 495 496 /* Channel state will normally have been updated */ 497 state = gsi_channel_state(channel); 498 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { 499 dev_err(dev, "channel %u bad state %u after alloc\n", 500 channel_id, state); 501 ret = -EIO; 502 } 503 504 return ret; 505 } 506 507 /* Start an ALLOCATED channel */ 508 static int gsi_channel_start_command(struct gsi_channel *channel) 509 { 510 struct device *dev = channel->gsi->dev; 511 enum gsi_channel_state state; 512 int ret; 513 514 state = gsi_channel_state(channel); 515 if (state != GSI_CHANNEL_STATE_ALLOCATED && 516 state != GSI_CHANNEL_STATE_STOPPED) { 517 dev_err(dev, "channel %u bad state %u before start\n", 518 gsi_channel_id(channel), state); 519 return -EINVAL; 520 } 521 522 ret = gsi_channel_command(channel, GSI_CH_START); 523 524 /* Channel state will normally have been updated */ 525 state = gsi_channel_state(channel); 526 if (!ret && state != GSI_CHANNEL_STATE_STARTED) { 527 dev_err(dev, "channel %u bad state %u after start\n", 528 gsi_channel_id(channel), state); 529 ret = -EIO; 530 } 531 532 return ret; 533 } 534 535 /* Stop a GSI channel in STARTED state */ 536 static int gsi_channel_stop_command(struct gsi_channel *channel) 537 { 538 struct device *dev = channel->gsi->dev; 539 enum gsi_channel_state state; 540 int ret; 541 542 state = gsi_channel_state(channel); 543 544 /* Channel could have entered STOPPED state since last call 545 * if it timed out. If so, we're done. 546 */ 547 if (state == GSI_CHANNEL_STATE_STOPPED) 548 return 0; 549 550 if (state != GSI_CHANNEL_STATE_STARTED && 551 state != GSI_CHANNEL_STATE_STOP_IN_PROC) { 552 dev_err(dev, "channel %u bad state %u before stop\n", 553 gsi_channel_id(channel), state); 554 return -EINVAL; 555 } 556 557 ret = gsi_channel_command(channel, GSI_CH_STOP); 558 559 /* Channel state will normally have been updated */ 560 state = gsi_channel_state(channel); 561 if (ret || state == GSI_CHANNEL_STATE_STOPPED) 562 return ret; 563 564 /* We may have to try again if stop is in progress */ 565 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 566 return -EAGAIN; 567 568 dev_err(dev, "channel %u bad state %u after stop\n", 569 gsi_channel_id(channel), state); 570 571 return -EIO; 572 } 573 574 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 575 static void gsi_channel_reset_command(struct gsi_channel *channel) 576 { 577 struct device *dev = channel->gsi->dev; 578 enum gsi_channel_state state; 579 int ret; 580 581 msleep(1); /* A short delay is required before a RESET command */ 582 583 state = gsi_channel_state(channel); 584 if (state != GSI_CHANNEL_STATE_STOPPED && 585 state != GSI_CHANNEL_STATE_ERROR) { 586 /* No need to reset a channel already in ALLOCATED state */ 587 if (state != GSI_CHANNEL_STATE_ALLOCATED) 588 dev_err(dev, "channel %u bad state %u before reset\n", 589 gsi_channel_id(channel), state); 590 return; 591 } 592 593 ret = gsi_channel_command(channel, GSI_CH_RESET); 594 595 /* Channel state will normally have been updated */ 596 state = gsi_channel_state(channel); 597 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) 598 dev_err(dev, "channel %u bad state %u after reset\n", 599 gsi_channel_id(channel), state); 600 } 601 602 /* Deallocate an ALLOCATED GSI channel */ 603 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 604 { 605 struct gsi_channel *channel = &gsi->channel[channel_id]; 606 struct device *dev = gsi->dev; 607 enum gsi_channel_state state; 608 int ret; 609 610 state = gsi_channel_state(channel); 611 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 612 dev_err(dev, "channel %u bad state %u before dealloc\n", 613 channel_id, state); 614 return; 615 } 616 617 ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC); 618 619 /* Channel state will normally have been updated */ 620 state = gsi_channel_state(channel); 621 if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 622 dev_err(dev, "channel %u bad state %u after dealloc\n", 623 channel_id, state); 624 } 625 626 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 627 * The index argument (modulo the ring count) is the first unfilled entry, so 628 * we supply one less than that with the doorbell. Update the event ring 629 * index field with the value provided. 630 */ 631 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 632 { 633 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 634 u32 val; 635 636 ring->index = index; /* Next unused entry */ 637 638 /* Note: index *must* be used modulo the ring count here */ 639 val = gsi_ring_addr(ring, (index - 1) % ring->count); 640 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 641 } 642 643 /* Program an event ring for use */ 644 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 645 { 646 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 647 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 648 u32 val; 649 650 /* We program all event rings as GPI type/protocol */ 651 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); 652 val |= EV_INTYPE_FMASK; 653 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 654 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 655 656 val = u32_encode_bits(size, EV_R_LENGTH_FMASK); 657 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 658 659 /* The context 2 and 3 registers store the low-order and 660 * high-order 32 bits of the address of the event ring, 661 * respectively. 662 */ 663 val = evt_ring->ring.addr & GENMASK(31, 0); 664 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 665 666 val = evt_ring->ring.addr >> 32; 667 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 668 669 /* Enable interrupt moderation by setting the moderation delay */ 670 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 671 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 672 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 673 674 /* No MSI write data, and MSI address high and low address is 0 */ 675 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 676 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 677 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 678 679 /* We don't need to get event read pointer updates */ 680 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 681 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 682 683 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 684 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 685 } 686 687 /* Return the last (most recent) transaction completed on a channel. */ 688 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 689 { 690 struct gsi_trans_info *trans_info = &channel->trans_info; 691 struct gsi_trans *trans; 692 693 spin_lock_bh(&trans_info->spinlock); 694 695 if (!list_empty(&trans_info->complete)) 696 trans = list_last_entry(&trans_info->complete, 697 struct gsi_trans, links); 698 else if (!list_empty(&trans_info->polled)) 699 trans = list_last_entry(&trans_info->polled, 700 struct gsi_trans, links); 701 else 702 trans = NULL; 703 704 /* Caller will wait for this, so take a reference */ 705 if (trans) 706 refcount_inc(&trans->refcount); 707 708 spin_unlock_bh(&trans_info->spinlock); 709 710 return trans; 711 } 712 713 /* Wait for transaction activity on a channel to complete */ 714 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 715 { 716 struct gsi_trans *trans; 717 718 /* Get the last transaction, and wait for it to complete */ 719 trans = gsi_channel_trans_last(channel); 720 if (trans) { 721 wait_for_completion(&trans->completion); 722 gsi_trans_free(trans); 723 } 724 } 725 726 /* Stop channel activity. Transactions may not be allocated until thawed. */ 727 static void gsi_channel_freeze(struct gsi_channel *channel) 728 { 729 gsi_channel_trans_quiesce(channel); 730 731 napi_disable(&channel->napi); 732 733 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); 734 } 735 736 /* Allow transactions to be used on the channel again. */ 737 static void gsi_channel_thaw(struct gsi_channel *channel) 738 { 739 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 740 741 napi_enable(&channel->napi); 742 } 743 744 /* Program a channel for use */ 745 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 746 { 747 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 748 u32 channel_id = gsi_channel_id(channel); 749 union gsi_channel_scratch scr = { }; 750 struct gsi_channel_scratch_gpi *gpi; 751 struct gsi *gsi = channel->gsi; 752 u32 wrr_weight = 0; 753 u32 val; 754 755 /* Arbitrarily pick TRE 0 as the first channel element to use */ 756 channel->tre_ring.index = 0; 757 758 /* We program all channels as GPI type/protocol */ 759 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); 760 if (channel->toward_ipa) 761 val |= CHTYPE_DIR_FMASK; 762 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 763 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 764 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 765 766 val = u32_encode_bits(size, R_LENGTH_FMASK); 767 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 768 769 /* The context 2 and 3 registers store the low-order and 770 * high-order 32 bits of the address of the channel ring, 771 * respectively. 772 */ 773 val = channel->tre_ring.addr & GENMASK(31, 0); 774 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 775 776 val = channel->tre_ring.addr >> 32; 777 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 778 779 /* Command channel gets low weighted round-robin priority */ 780 if (channel->command) 781 wrr_weight = field_max(WRR_WEIGHT_FMASK); 782 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 783 784 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 785 786 /* We enable the doorbell engine for IPA v3.5.1 */ 787 if (gsi->version == IPA_VERSION_3_5_1 && doorbell) 788 val |= USE_DB_ENG_FMASK; 789 790 /* v4.0 introduces an escape buffer for prefetch. We use it 791 * on all but the AP command channel. 792 */ 793 if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { 794 /* If not otherwise set, prefetch buffers are used */ 795 if (gsi->version < IPA_VERSION_4_5) 796 val |= USE_ESCAPE_BUF_ONLY_FMASK; 797 else 798 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, 799 PREFETCH_MODE_FMASK); 800 } 801 802 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 803 804 /* Now update the scratch registers for GPI protocol */ 805 gpi = &scr.gpi; 806 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 807 GSI_RING_ELEMENT_SIZE; 808 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 809 810 val = scr.data.word1; 811 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 812 813 val = scr.data.word2; 814 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 815 816 val = scr.data.word3; 817 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 818 819 /* We must preserve the upper 16 bits of the last scratch register. 820 * The next sequence assumes those bits remain unchanged between the 821 * read and the write. 822 */ 823 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 824 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 825 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 826 827 /* All done! */ 828 } 829 830 static void gsi_channel_deprogram(struct gsi_channel *channel) 831 { 832 /* Nothing to do */ 833 } 834 835 /* Start an allocated GSI channel */ 836 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 837 { 838 struct gsi_channel *channel = &gsi->channel[channel_id]; 839 int ret; 840 841 mutex_lock(&gsi->mutex); 842 843 ret = gsi_channel_start_command(channel); 844 845 mutex_unlock(&gsi->mutex); 846 847 gsi_channel_thaw(channel); 848 849 return ret; 850 } 851 852 /* Stop a started channel */ 853 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 854 { 855 struct gsi_channel *channel = &gsi->channel[channel_id]; 856 u32 retries; 857 int ret; 858 859 gsi_channel_freeze(channel); 860 861 /* RX channels might require a little time to enter STOPPED state */ 862 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; 863 864 mutex_lock(&gsi->mutex); 865 866 do { 867 ret = gsi_channel_stop_command(channel); 868 if (ret != -EAGAIN) 869 break; 870 msleep(1); 871 } while (retries--); 872 873 mutex_unlock(&gsi->mutex); 874 875 /* Thaw the channel if we need to retry (or on error) */ 876 if (ret) 877 gsi_channel_thaw(channel); 878 879 return ret; 880 } 881 882 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ 883 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) 884 { 885 struct gsi_channel *channel = &gsi->channel[channel_id]; 886 887 mutex_lock(&gsi->mutex); 888 889 gsi_channel_reset_command(channel); 890 /* Due to a hardware quirk we may need to reset RX channels twice. */ 891 if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) 892 gsi_channel_reset_command(channel); 893 894 gsi_channel_program(channel, doorbell); 895 gsi_channel_trans_cancel_pending(channel); 896 897 mutex_unlock(&gsi->mutex); 898 } 899 900 /* Stop a STARTED channel for suspend (using stop if requested) */ 901 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 902 { 903 struct gsi_channel *channel = &gsi->channel[channel_id]; 904 905 if (stop) 906 return gsi_channel_stop(gsi, channel_id); 907 908 gsi_channel_freeze(channel); 909 910 return 0; 911 } 912 913 /* Resume a suspended channel (starting will be requested if STOPPED) */ 914 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 915 { 916 struct gsi_channel *channel = &gsi->channel[channel_id]; 917 918 if (start) 919 return gsi_channel_start(gsi, channel_id); 920 921 gsi_channel_thaw(channel); 922 923 return 0; 924 } 925 926 /** 927 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 928 * @channel: Channel for which to report 929 * 930 * Report to the network stack the number of bytes and transactions that 931 * have been queued to hardware since last call. This and the next function 932 * supply information used by the network stack for throttling. 933 * 934 * For each channel we track the number of transactions used and bytes of 935 * data those transactions represent. We also track what those values are 936 * each time this function is called. Subtracting the two tells us 937 * the number of bytes and transactions that have been added between 938 * successive calls. 939 * 940 * Calling this each time we ring the channel doorbell allows us to 941 * provide accurate information to the network stack about how much 942 * work we've given the hardware at any point in time. 943 */ 944 void gsi_channel_tx_queued(struct gsi_channel *channel) 945 { 946 u32 trans_count; 947 u32 byte_count; 948 949 byte_count = channel->byte_count - channel->queued_byte_count; 950 trans_count = channel->trans_count - channel->queued_trans_count; 951 channel->queued_byte_count = channel->byte_count; 952 channel->queued_trans_count = channel->trans_count; 953 954 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 955 trans_count, byte_count); 956 } 957 958 /** 959 * gsi_channel_tx_update() - Report completed TX transfers 960 * @channel: Channel that has completed transmitting packets 961 * @trans: Last transation known to be complete 962 * 963 * Compute the number of transactions and bytes that have been transferred 964 * over a TX channel since the given transaction was committed. Report this 965 * information to the network stack. 966 * 967 * At the time a transaction is committed, we record its channel's 968 * committed transaction and byte counts *in the transaction*. 969 * Completions are signaled by the hardware with an interrupt, and 970 * we can determine the latest completed transaction at that time. 971 * 972 * The difference between the byte/transaction count recorded in 973 * the transaction and the count last time we recorded a completion 974 * tells us exactly how much data has been transferred between 975 * completions. 976 * 977 * Calling this each time we learn of a newly-completed transaction 978 * allows us to provide accurate information to the network stack 979 * about how much work has been completed by the hardware at a given 980 * point in time. 981 */ 982 static void 983 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 984 { 985 u64 byte_count = trans->byte_count + trans->len; 986 u64 trans_count = trans->trans_count + 1; 987 988 byte_count -= channel->compl_byte_count; 989 channel->compl_byte_count += byte_count; 990 trans_count -= channel->compl_trans_count; 991 channel->compl_trans_count += trans_count; 992 993 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 994 trans_count, byte_count); 995 } 996 997 /* Channel control interrupt handler */ 998 static void gsi_isr_chan_ctrl(struct gsi *gsi) 999 { 1000 u32 channel_mask; 1001 1002 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 1003 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 1004 1005 while (channel_mask) { 1006 u32 channel_id = __ffs(channel_mask); 1007 struct gsi_channel *channel; 1008 1009 channel_mask ^= BIT(channel_id); 1010 1011 channel = &gsi->channel[channel_id]; 1012 1013 complete(&channel->completion); 1014 } 1015 } 1016 1017 /* Event ring control interrupt handler */ 1018 static void gsi_isr_evt_ctrl(struct gsi *gsi) 1019 { 1020 u32 event_mask; 1021 1022 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 1023 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 1024 1025 while (event_mask) { 1026 u32 evt_ring_id = __ffs(event_mask); 1027 struct gsi_evt_ring *evt_ring; 1028 1029 event_mask ^= BIT(evt_ring_id); 1030 1031 evt_ring = &gsi->evt_ring[evt_ring_id]; 1032 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 1033 1034 complete(&evt_ring->completion); 1035 } 1036 } 1037 1038 /* Global channel error interrupt handler */ 1039 static void 1040 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 1041 { 1042 if (code == GSI_OUT_OF_RESOURCES) { 1043 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 1044 complete(&gsi->channel[channel_id].completion); 1045 return; 1046 } 1047 1048 /* Report, but otherwise ignore all other error codes */ 1049 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 1050 channel_id, err_ee, code); 1051 } 1052 1053 /* Global event error interrupt handler */ 1054 static void 1055 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1056 { 1057 if (code == GSI_OUT_OF_RESOURCES) { 1058 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1059 u32 channel_id = gsi_channel_id(evt_ring->channel); 1060 1061 complete(&evt_ring->completion); 1062 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1063 channel_id); 1064 return; 1065 } 1066 1067 /* Report, but otherwise ignore all other error codes */ 1068 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1069 evt_ring_id, err_ee, code); 1070 } 1071 1072 /* Global error interrupt handler */ 1073 static void gsi_isr_glob_err(struct gsi *gsi) 1074 { 1075 enum gsi_err_type type; 1076 enum gsi_err_code code; 1077 u32 which; 1078 u32 val; 1079 u32 ee; 1080 1081 /* Get the logged error, then reinitialize the log */ 1082 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1083 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1084 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1085 1086 ee = u32_get_bits(val, ERR_EE_FMASK); 1087 type = u32_get_bits(val, ERR_TYPE_FMASK); 1088 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1089 code = u32_get_bits(val, ERR_CODE_FMASK); 1090 1091 if (type == GSI_ERR_TYPE_CHAN) 1092 gsi_isr_glob_chan_err(gsi, ee, which, code); 1093 else if (type == GSI_ERR_TYPE_EVT) 1094 gsi_isr_glob_evt_err(gsi, ee, which, code); 1095 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1096 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1097 } 1098 1099 /* Generic EE interrupt handler */ 1100 static void gsi_isr_gp_int1(struct gsi *gsi) 1101 { 1102 u32 result; 1103 u32 val; 1104 1105 /* This interrupt is used to handle completions of the two GENERIC 1106 * GSI commands. We use these to allocate and halt channels on 1107 * the modem's behalf due to a hardware quirk on IPA v4.2. Once 1108 * allocated, the modem "owns" these channels, and as a result we 1109 * have no way of knowing the channel's state at any given time. 1110 * 1111 * It is recommended that we halt the modem channels we allocated 1112 * when shutting down, but it's possible the channel isn't running 1113 * at the time we issue the HALT command. We'll get an error in 1114 * that case, but it's harmless (the channel is already halted). 1115 * 1116 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error 1117 * if we receive it. 1118 */ 1119 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1120 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1121 1122 switch (result) { 1123 case GENERIC_EE_SUCCESS: 1124 case GENERIC_EE_CHANNEL_NOT_RUNNING: 1125 gsi->result = 0; 1126 break; 1127 1128 case GENERIC_EE_RETRY: 1129 gsi->result = -EAGAIN; 1130 break; 1131 1132 default: 1133 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1134 gsi->result = -EIO; 1135 break; 1136 } 1137 1138 complete(&gsi->completion); 1139 } 1140 1141 /* Inter-EE interrupt handler */ 1142 static void gsi_isr_glob_ee(struct gsi *gsi) 1143 { 1144 u32 val; 1145 1146 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1147 1148 if (val & BIT(ERROR_INT)) 1149 gsi_isr_glob_err(gsi); 1150 1151 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1152 1153 val &= ~BIT(ERROR_INT); 1154 1155 if (val & BIT(GP_INT1)) { 1156 val ^= BIT(GP_INT1); 1157 gsi_isr_gp_int1(gsi); 1158 } 1159 1160 if (val) 1161 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1162 } 1163 1164 /* I/O completion interrupt event */ 1165 static void gsi_isr_ieob(struct gsi *gsi) 1166 { 1167 u32 event_mask; 1168 1169 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1170 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1171 1172 while (event_mask) { 1173 u32 evt_ring_id = __ffs(event_mask); 1174 1175 event_mask ^= BIT(evt_ring_id); 1176 1177 gsi_irq_ieob_disable(gsi, evt_ring_id); 1178 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1179 } 1180 } 1181 1182 /* General event interrupts represent serious problems, so report them */ 1183 static void gsi_isr_general(struct gsi *gsi) 1184 { 1185 struct device *dev = gsi->dev; 1186 u32 val; 1187 1188 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1189 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1190 1191 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1192 } 1193 1194 /** 1195 * gsi_isr() - Top level GSI interrupt service routine 1196 * @irq: Interrupt number (ignored) 1197 * @dev_id: GSI pointer supplied to request_irq() 1198 * 1199 * This is the main handler function registered for the GSI IRQ. Each type 1200 * of interrupt has a separate handler function that is called from here. 1201 */ 1202 static irqreturn_t gsi_isr(int irq, void *dev_id) 1203 { 1204 struct gsi *gsi = dev_id; 1205 u32 intr_mask; 1206 u32 cnt = 0; 1207 1208 /* enum gsi_irq_type_id defines GSI interrupt types */ 1209 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1210 /* intr_mask contains bitmask of pending GSI interrupts */ 1211 do { 1212 u32 gsi_intr = BIT(__ffs(intr_mask)); 1213 1214 intr_mask ^= gsi_intr; 1215 1216 switch (gsi_intr) { 1217 case BIT(GSI_CH_CTRL): 1218 gsi_isr_chan_ctrl(gsi); 1219 break; 1220 case BIT(GSI_EV_CTRL): 1221 gsi_isr_evt_ctrl(gsi); 1222 break; 1223 case BIT(GSI_GLOB_EE): 1224 gsi_isr_glob_ee(gsi); 1225 break; 1226 case BIT(GSI_IEOB): 1227 gsi_isr_ieob(gsi); 1228 break; 1229 case BIT(GSI_GENERAL): 1230 gsi_isr_general(gsi); 1231 break; 1232 default: 1233 dev_err(gsi->dev, 1234 "unrecognized interrupt type 0x%08x\n", 1235 gsi_intr); 1236 break; 1237 } 1238 } while (intr_mask); 1239 1240 if (++cnt > GSI_ISR_MAX_ITER) { 1241 dev_err(gsi->dev, "interrupt flood\n"); 1242 break; 1243 } 1244 } 1245 1246 return IRQ_HANDLED; 1247 } 1248 1249 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) 1250 { 1251 struct device *dev = &pdev->dev; 1252 unsigned int irq; 1253 int ret; 1254 1255 ret = platform_get_irq_byname(pdev, "gsi"); 1256 if (ret <= 0) { 1257 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); 1258 return ret ? : -EINVAL; 1259 } 1260 irq = ret; 1261 1262 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1263 if (ret) { 1264 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); 1265 return ret; 1266 } 1267 gsi->irq = irq; 1268 1269 return 0; 1270 } 1271 1272 static void gsi_irq_exit(struct gsi *gsi) 1273 { 1274 free_irq(gsi->irq, gsi); 1275 } 1276 1277 /* Return the transaction associated with a transfer completion event */ 1278 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1279 struct gsi_event *event) 1280 { 1281 u32 tre_offset; 1282 u32 tre_index; 1283 1284 /* Event xfer_ptr records the TRE it's associated with */ 1285 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); 1286 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1287 1288 return gsi_channel_trans_mapped(channel, tre_index); 1289 } 1290 1291 /** 1292 * gsi_evt_ring_rx_update() - Record lengths of received data 1293 * @evt_ring: Event ring associated with channel that received packets 1294 * @index: Event index in ring reported by hardware 1295 * 1296 * Events for RX channels contain the actual number of bytes received into 1297 * the buffer. Every event has a transaction associated with it, and here 1298 * we update transactions to record their actual received lengths. 1299 * 1300 * This function is called whenever we learn that the GSI hardware has filled 1301 * new events since the last time we checked. The ring's index field tells 1302 * the first entry in need of processing. The index provided is the 1303 * first *unfilled* event in the ring (following the last filled one). 1304 * 1305 * Events are sequential within the event ring, and transactions are 1306 * sequential within the transaction pool. 1307 * 1308 * Note that @index always refers to an element *within* the event ring. 1309 */ 1310 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1311 { 1312 struct gsi_channel *channel = evt_ring->channel; 1313 struct gsi_ring *ring = &evt_ring->ring; 1314 struct gsi_trans_info *trans_info; 1315 struct gsi_event *event_done; 1316 struct gsi_event *event; 1317 struct gsi_trans *trans; 1318 u32 byte_count = 0; 1319 u32 old_index; 1320 u32 event_avail; 1321 1322 trans_info = &channel->trans_info; 1323 1324 /* We'll start with the oldest un-processed event. RX channels 1325 * replenish receive buffers in single-TRE transactions, so we 1326 * can just map that event to its transaction. Transactions 1327 * associated with completion events are consecutive. 1328 */ 1329 old_index = ring->index; 1330 event = gsi_ring_virt(ring, old_index); 1331 trans = gsi_event_trans(channel, event); 1332 1333 /* Compute the number of events to process before we wrap, 1334 * and determine when we'll be done processing events. 1335 */ 1336 event_avail = ring->count - old_index % ring->count; 1337 event_done = gsi_ring_virt(ring, index); 1338 do { 1339 trans->len = __le16_to_cpu(event->len); 1340 byte_count += trans->len; 1341 1342 /* Move on to the next event and transaction */ 1343 if (--event_avail) 1344 event++; 1345 else 1346 event = gsi_ring_virt(ring, 0); 1347 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1348 } while (event != event_done); 1349 1350 /* We record RX bytes when they are received */ 1351 channel->byte_count += byte_count; 1352 channel->trans_count++; 1353 } 1354 1355 /* Initialize a ring, including allocating DMA memory for its entries */ 1356 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1357 { 1358 size_t size = count * GSI_RING_ELEMENT_SIZE; 1359 struct device *dev = gsi->dev; 1360 dma_addr_t addr; 1361 1362 /* Hardware requires a 2^n ring size, with alignment equal to size */ 1363 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1364 if (ring->virt && addr % size) { 1365 dma_free_coherent(dev, size, ring->virt, ring->addr); 1366 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", 1367 size); 1368 return -EINVAL; /* Not a good error value, but distinct */ 1369 } else if (!ring->virt) { 1370 return -ENOMEM; 1371 } 1372 ring->addr = addr; 1373 ring->count = count; 1374 1375 return 0; 1376 } 1377 1378 /* Free a previously-allocated ring */ 1379 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1380 { 1381 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1382 1383 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1384 } 1385 1386 /* Allocate an available event ring id */ 1387 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1388 { 1389 u32 evt_ring_id; 1390 1391 if (gsi->event_bitmap == ~0U) { 1392 dev_err(gsi->dev, "event rings exhausted\n"); 1393 return -ENOSPC; 1394 } 1395 1396 evt_ring_id = ffz(gsi->event_bitmap); 1397 gsi->event_bitmap |= BIT(evt_ring_id); 1398 1399 return (int)evt_ring_id; 1400 } 1401 1402 /* Free a previously-allocated event ring id */ 1403 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1404 { 1405 gsi->event_bitmap &= ~BIT(evt_ring_id); 1406 } 1407 1408 /* Ring a channel doorbell, reporting the first un-filled entry */ 1409 void gsi_channel_doorbell(struct gsi_channel *channel) 1410 { 1411 struct gsi_ring *tre_ring = &channel->tre_ring; 1412 u32 channel_id = gsi_channel_id(channel); 1413 struct gsi *gsi = channel->gsi; 1414 u32 val; 1415 1416 /* Note: index *must* be used modulo the ring count here */ 1417 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1418 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1419 } 1420 1421 /* Consult hardware, move any newly completed transactions to completed list */ 1422 static void gsi_channel_update(struct gsi_channel *channel) 1423 { 1424 u32 evt_ring_id = channel->evt_ring_id; 1425 struct gsi *gsi = channel->gsi; 1426 struct gsi_evt_ring *evt_ring; 1427 struct gsi_trans *trans; 1428 struct gsi_ring *ring; 1429 u32 offset; 1430 u32 index; 1431 1432 evt_ring = &gsi->evt_ring[evt_ring_id]; 1433 ring = &evt_ring->ring; 1434 1435 /* See if there's anything new to process; if not, we're done. Note 1436 * that index always refers to an entry *within* the event ring. 1437 */ 1438 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1439 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1440 if (index == ring->index % ring->count) 1441 return; 1442 1443 /* Get the transaction for the latest completed event. Take a 1444 * reference to keep it from completing before we give the events 1445 * for this and previous transactions back to the hardware. 1446 */ 1447 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1448 refcount_inc(&trans->refcount); 1449 1450 /* For RX channels, update each completed transaction with the number 1451 * of bytes that were actually received. For TX channels, report 1452 * the number of transactions and bytes this completion represents 1453 * up the network stack. 1454 */ 1455 if (channel->toward_ipa) 1456 gsi_channel_tx_update(channel, trans); 1457 else 1458 gsi_evt_ring_rx_update(evt_ring, index); 1459 1460 gsi_trans_move_complete(trans); 1461 1462 /* Tell the hardware we've handled these events */ 1463 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1464 1465 gsi_trans_free(trans); 1466 } 1467 1468 /** 1469 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1470 * @channel: Channel to be polled 1471 * 1472 * Return: Transaction pointer, or null if none are available 1473 * 1474 * This function returns the first entry on a channel's completed transaction 1475 * list. If that list is empty, the hardware is consulted to determine 1476 * whether any new transactions have completed. If so, they're moved to the 1477 * completed list and the new first entry is returned. If there are no more 1478 * completed transactions, a null pointer is returned. 1479 */ 1480 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1481 { 1482 struct gsi_trans *trans; 1483 1484 /* Get the first transaction from the completed list */ 1485 trans = gsi_channel_trans_complete(channel); 1486 if (!trans) { 1487 /* List is empty; see if there's more to do */ 1488 gsi_channel_update(channel); 1489 trans = gsi_channel_trans_complete(channel); 1490 } 1491 1492 if (trans) 1493 gsi_trans_move_polled(trans); 1494 1495 return trans; 1496 } 1497 1498 /** 1499 * gsi_channel_poll() - NAPI poll function for a channel 1500 * @napi: NAPI structure for the channel 1501 * @budget: Budget supplied by NAPI core 1502 * 1503 * Return: Number of items polled (<= budget) 1504 * 1505 * Single transactions completed by hardware are polled until either 1506 * the budget is exhausted, or there are no more. Each transaction 1507 * polled is passed to gsi_trans_complete(), to perform remaining 1508 * completion processing and retire/free the transaction. 1509 */ 1510 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1511 { 1512 struct gsi_channel *channel; 1513 int count = 0; 1514 1515 channel = container_of(napi, struct gsi_channel, napi); 1516 while (count < budget) { 1517 struct gsi_trans *trans; 1518 1519 count++; 1520 trans = gsi_channel_poll_one(channel); 1521 if (!trans) 1522 break; 1523 gsi_trans_complete(trans); 1524 } 1525 1526 if (count < budget) { 1527 napi_complete(&channel->napi); 1528 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 1529 } 1530 1531 return count; 1532 } 1533 1534 /* The event bitmap represents which event ids are available for allocation. 1535 * Set bits are not available, clear bits can be used. This function 1536 * initializes the map so all events supported by the hardware are available, 1537 * then precludes any reserved events from being allocated. 1538 */ 1539 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1540 { 1541 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1542 1543 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1544 1545 return event_bitmap; 1546 } 1547 1548 /* Setup function for event rings */ 1549 static void gsi_evt_ring_setup(struct gsi *gsi) 1550 { 1551 /* Nothing to do */ 1552 } 1553 1554 /* Inverse of gsi_evt_ring_setup() */ 1555 static void gsi_evt_ring_teardown(struct gsi *gsi) 1556 { 1557 /* Nothing to do */ 1558 } 1559 1560 /* Setup function for a single channel */ 1561 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) 1562 { 1563 struct gsi_channel *channel = &gsi->channel[channel_id]; 1564 u32 evt_ring_id = channel->evt_ring_id; 1565 int ret; 1566 1567 if (!channel->gsi) 1568 return 0; /* Ignore uninitialized channels */ 1569 1570 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1571 if (ret) 1572 return ret; 1573 1574 gsi_evt_ring_program(gsi, evt_ring_id); 1575 1576 ret = gsi_channel_alloc_command(gsi, channel_id); 1577 if (ret) 1578 goto err_evt_ring_de_alloc; 1579 1580 gsi_channel_program(channel, true); 1581 1582 if (channel->toward_ipa) 1583 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1584 gsi_channel_poll, NAPI_POLL_WEIGHT); 1585 else 1586 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1587 gsi_channel_poll, NAPI_POLL_WEIGHT); 1588 1589 return 0; 1590 1591 err_evt_ring_de_alloc: 1592 /* We've done nothing with the event ring yet so don't reset */ 1593 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1594 1595 return ret; 1596 } 1597 1598 /* Inverse of gsi_channel_setup_one() */ 1599 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1600 { 1601 struct gsi_channel *channel = &gsi->channel[channel_id]; 1602 u32 evt_ring_id = channel->evt_ring_id; 1603 1604 if (!channel->gsi) 1605 return; /* Ignore uninitialized channels */ 1606 1607 netif_napi_del(&channel->napi); 1608 1609 gsi_channel_deprogram(channel); 1610 gsi_channel_de_alloc_command(gsi, channel_id); 1611 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1612 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1613 } 1614 1615 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1616 enum gsi_generic_cmd_opcode opcode) 1617 { 1618 struct completion *completion = &gsi->completion; 1619 bool success; 1620 u32 val; 1621 1622 /* The error global interrupt type is always enabled (until we 1623 * teardown), so we won't change that. A generic EE command 1624 * completes with a GSI global interrupt of type GP_INT1. We 1625 * only perform one generic command at a time (to allocate or 1626 * halt a modem channel) and only from this function. So we 1627 * enable the GP_INT1 IRQ type here while we're expecting it. 1628 */ 1629 val = BIT(ERROR_INT) | BIT(GP_INT1); 1630 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1631 1632 /* First zero the result code field */ 1633 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1634 val &= ~GENERIC_EE_RESULT_FMASK; 1635 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1636 1637 /* Now issue the command */ 1638 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1639 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1640 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1641 1642 success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); 1643 1644 /* Disable the GP_INT1 IRQ type again */ 1645 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 1646 1647 if (success) 1648 return gsi->result; 1649 1650 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1651 opcode, channel_id); 1652 1653 return -ETIMEDOUT; 1654 } 1655 1656 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1657 { 1658 return gsi_generic_command(gsi, channel_id, 1659 GSI_GENERIC_ALLOCATE_CHANNEL); 1660 } 1661 1662 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1663 { 1664 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; 1665 int ret; 1666 1667 do 1668 ret = gsi_generic_command(gsi, channel_id, 1669 GSI_GENERIC_HALT_CHANNEL); 1670 while (ret == -EAGAIN && retries--); 1671 1672 if (ret) 1673 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1674 ret, channel_id); 1675 } 1676 1677 /* Setup function for channels */ 1678 static int gsi_channel_setup(struct gsi *gsi) 1679 { 1680 u32 channel_id = 0; 1681 u32 mask; 1682 int ret; 1683 1684 gsi_evt_ring_setup(gsi); 1685 gsi_irq_enable(gsi); 1686 1687 mutex_lock(&gsi->mutex); 1688 1689 do { 1690 ret = gsi_channel_setup_one(gsi, channel_id); 1691 if (ret) 1692 goto err_unwind; 1693 } while (++channel_id < gsi->channel_count); 1694 1695 /* Make sure no channels were defined that hardware does not support */ 1696 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1697 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1698 1699 if (!channel->gsi) 1700 continue; /* Ignore uninitialized channels */ 1701 1702 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1703 channel_id - 1); 1704 channel_id = gsi->channel_count; 1705 goto err_unwind; 1706 } 1707 1708 /* Allocate modem channels if necessary */ 1709 mask = gsi->modem_channel_bitmap; 1710 while (mask) { 1711 u32 modem_channel_id = __ffs(mask); 1712 1713 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1714 if (ret) 1715 goto err_unwind_modem; 1716 1717 /* Clear bit from mask only after success (for unwind) */ 1718 mask ^= BIT(modem_channel_id); 1719 } 1720 1721 mutex_unlock(&gsi->mutex); 1722 1723 return 0; 1724 1725 err_unwind_modem: 1726 /* Compute which modem channels need to be deallocated */ 1727 mask ^= gsi->modem_channel_bitmap; 1728 while (mask) { 1729 channel_id = __fls(mask); 1730 1731 mask ^= BIT(channel_id); 1732 1733 gsi_modem_channel_halt(gsi, channel_id); 1734 } 1735 1736 err_unwind: 1737 while (channel_id--) 1738 gsi_channel_teardown_one(gsi, channel_id); 1739 1740 mutex_unlock(&gsi->mutex); 1741 1742 gsi_irq_disable(gsi); 1743 gsi_evt_ring_teardown(gsi); 1744 1745 return ret; 1746 } 1747 1748 /* Inverse of gsi_channel_setup() */ 1749 static void gsi_channel_teardown(struct gsi *gsi) 1750 { 1751 u32 mask = gsi->modem_channel_bitmap; 1752 u32 channel_id; 1753 1754 mutex_lock(&gsi->mutex); 1755 1756 while (mask) { 1757 channel_id = __fls(mask); 1758 1759 mask ^= BIT(channel_id); 1760 1761 gsi_modem_channel_halt(gsi, channel_id); 1762 } 1763 1764 channel_id = gsi->channel_count - 1; 1765 do 1766 gsi_channel_teardown_one(gsi, channel_id); 1767 while (channel_id--); 1768 1769 mutex_unlock(&gsi->mutex); 1770 1771 gsi_irq_disable(gsi); 1772 gsi_evt_ring_teardown(gsi); 1773 } 1774 1775 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1776 int gsi_setup(struct gsi *gsi) 1777 { 1778 struct device *dev = gsi->dev; 1779 u32 val; 1780 int ret; 1781 1782 /* Here is where we first touch the GSI hardware */ 1783 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1784 if (!(val & ENABLED_FMASK)) { 1785 dev_err(dev, "GSI has not been enabled\n"); 1786 return -EIO; 1787 } 1788 1789 gsi_irq_setup(gsi); 1790 1791 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1792 1793 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1794 if (!gsi->channel_count) { 1795 dev_err(dev, "GSI reports zero channels supported\n"); 1796 return -EINVAL; 1797 } 1798 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1799 dev_warn(dev, 1800 "limiting to %u channels; hardware supports %u\n", 1801 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1802 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1803 } 1804 1805 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1806 if (!gsi->evt_ring_count) { 1807 dev_err(dev, "GSI reports zero event rings supported\n"); 1808 return -EINVAL; 1809 } 1810 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1811 dev_warn(dev, 1812 "limiting to %u event rings; hardware supports %u\n", 1813 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1814 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1815 } 1816 1817 /* Initialize the error log */ 1818 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1819 1820 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1821 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1822 1823 ret = gsi_channel_setup(gsi); 1824 if (ret) 1825 gsi_irq_teardown(gsi); 1826 1827 return ret; 1828 } 1829 1830 /* Inverse of gsi_setup() */ 1831 void gsi_teardown(struct gsi *gsi) 1832 { 1833 gsi_channel_teardown(gsi); 1834 gsi_irq_teardown(gsi); 1835 } 1836 1837 /* Initialize a channel's event ring */ 1838 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1839 { 1840 struct gsi *gsi = channel->gsi; 1841 struct gsi_evt_ring *evt_ring; 1842 int ret; 1843 1844 ret = gsi_evt_ring_id_alloc(gsi); 1845 if (ret < 0) 1846 return ret; 1847 channel->evt_ring_id = ret; 1848 1849 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1850 evt_ring->channel = channel; 1851 1852 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1853 if (!ret) 1854 return 0; /* Success! */ 1855 1856 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1857 ret, gsi_channel_id(channel)); 1858 1859 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1860 1861 return ret; 1862 } 1863 1864 /* Inverse of gsi_channel_evt_ring_init() */ 1865 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1866 { 1867 u32 evt_ring_id = channel->evt_ring_id; 1868 struct gsi *gsi = channel->gsi; 1869 struct gsi_evt_ring *evt_ring; 1870 1871 evt_ring = &gsi->evt_ring[evt_ring_id]; 1872 gsi_ring_free(gsi, &evt_ring->ring); 1873 gsi_evt_ring_id_free(gsi, evt_ring_id); 1874 } 1875 1876 /* Init function for event rings */ 1877 static void gsi_evt_ring_init(struct gsi *gsi) 1878 { 1879 u32 evt_ring_id = 0; 1880 1881 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1882 gsi->ieob_enabled_bitmap = 0; 1883 do 1884 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1885 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1886 } 1887 1888 /* Inverse of gsi_evt_ring_init() */ 1889 static void gsi_evt_ring_exit(struct gsi *gsi) 1890 { 1891 /* Nothing to do */ 1892 } 1893 1894 static bool gsi_channel_data_valid(struct gsi *gsi, 1895 const struct ipa_gsi_endpoint_data *data) 1896 { 1897 #ifdef IPA_VALIDATION 1898 u32 channel_id = data->channel_id; 1899 struct device *dev = gsi->dev; 1900 1901 /* Make sure channel ids are in the range driver supports */ 1902 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1903 dev_err(dev, "bad channel id %u; must be less than %u\n", 1904 channel_id, GSI_CHANNEL_COUNT_MAX); 1905 return false; 1906 } 1907 1908 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1909 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); 1910 return false; 1911 } 1912 1913 if (!data->channel.tlv_count || 1914 data->channel.tlv_count > GSI_TLV_MAX) { 1915 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", 1916 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1917 return false; 1918 } 1919 1920 /* We have to allow at least one maximally-sized transaction to 1921 * be outstanding (which would use tlv_count TREs). Given how 1922 * gsi_channel_tre_max() is computed, tre_count has to be almost 1923 * twice the TLV FIFO size to satisfy this requirement. 1924 */ 1925 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1926 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1927 channel_id, data->channel.tlv_count, 1928 data->channel.tre_count); 1929 return false; 1930 } 1931 1932 if (!is_power_of_2(data->channel.tre_count)) { 1933 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", 1934 channel_id, data->channel.tre_count); 1935 return false; 1936 } 1937 1938 if (!is_power_of_2(data->channel.event_count)) { 1939 dev_err(dev, "channel %u bad event_count %u; not power of 2\n", 1940 channel_id, data->channel.event_count); 1941 return false; 1942 } 1943 #endif /* IPA_VALIDATION */ 1944 1945 return true; 1946 } 1947 1948 /* Init function for a single channel */ 1949 static int gsi_channel_init_one(struct gsi *gsi, 1950 const struct ipa_gsi_endpoint_data *data, 1951 bool command) 1952 { 1953 struct gsi_channel *channel; 1954 u32 tre_count; 1955 int ret; 1956 1957 if (!gsi_channel_data_valid(gsi, data)) 1958 return -EINVAL; 1959 1960 /* Worst case we need an event for every outstanding TRE */ 1961 if (data->channel.tre_count > data->channel.event_count) { 1962 tre_count = data->channel.event_count; 1963 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1964 data->channel_id, tre_count); 1965 } else { 1966 tre_count = data->channel.tre_count; 1967 } 1968 1969 channel = &gsi->channel[data->channel_id]; 1970 memset(channel, 0, sizeof(*channel)); 1971 1972 channel->gsi = gsi; 1973 channel->toward_ipa = data->toward_ipa; 1974 channel->command = command; 1975 channel->tlv_count = data->channel.tlv_count; 1976 channel->tre_count = tre_count; 1977 channel->event_count = data->channel.event_count; 1978 init_completion(&channel->completion); 1979 1980 ret = gsi_channel_evt_ring_init(channel); 1981 if (ret) 1982 goto err_clear_gsi; 1983 1984 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 1985 if (ret) { 1986 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 1987 ret, data->channel_id); 1988 goto err_channel_evt_ring_exit; 1989 } 1990 1991 ret = gsi_channel_trans_init(gsi, data->channel_id); 1992 if (ret) 1993 goto err_ring_free; 1994 1995 if (command) { 1996 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 1997 1998 ret = ipa_cmd_pool_init(channel, tre_max); 1999 } 2000 if (!ret) 2001 return 0; /* Success! */ 2002 2003 gsi_channel_trans_exit(channel); 2004 err_ring_free: 2005 gsi_ring_free(gsi, &channel->tre_ring); 2006 err_channel_evt_ring_exit: 2007 gsi_channel_evt_ring_exit(channel); 2008 err_clear_gsi: 2009 channel->gsi = NULL; /* Mark it not (fully) initialized */ 2010 2011 return ret; 2012 } 2013 2014 /* Inverse of gsi_channel_init_one() */ 2015 static void gsi_channel_exit_one(struct gsi_channel *channel) 2016 { 2017 if (!channel->gsi) 2018 return; /* Ignore uninitialized channels */ 2019 2020 if (channel->command) 2021 ipa_cmd_pool_exit(channel); 2022 gsi_channel_trans_exit(channel); 2023 gsi_ring_free(channel->gsi, &channel->tre_ring); 2024 gsi_channel_evt_ring_exit(channel); 2025 } 2026 2027 /* Init function for channels */ 2028 static int gsi_channel_init(struct gsi *gsi, u32 count, 2029 const struct ipa_gsi_endpoint_data *data) 2030 { 2031 bool modem_alloc; 2032 int ret = 0; 2033 u32 i; 2034 2035 /* IPA v4.2 requires the AP to allocate channels for the modem */ 2036 modem_alloc = gsi->version == IPA_VERSION_4_2; 2037 2038 gsi_evt_ring_init(gsi); 2039 2040 /* The endpoint data array is indexed by endpoint name */ 2041 for (i = 0; i < count; i++) { 2042 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 2043 2044 if (ipa_gsi_endpoint_data_empty(&data[i])) 2045 continue; /* Skip over empty slots */ 2046 2047 /* Mark modem channels to be allocated (hardware workaround) */ 2048 if (data[i].ee_id == GSI_EE_MODEM) { 2049 if (modem_alloc) 2050 gsi->modem_channel_bitmap |= 2051 BIT(data[i].channel_id); 2052 continue; 2053 } 2054 2055 ret = gsi_channel_init_one(gsi, &data[i], command); 2056 if (ret) 2057 goto err_unwind; 2058 } 2059 2060 return ret; 2061 2062 err_unwind: 2063 while (i--) { 2064 if (ipa_gsi_endpoint_data_empty(&data[i])) 2065 continue; 2066 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 2067 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 2068 continue; 2069 } 2070 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 2071 } 2072 gsi_evt_ring_exit(gsi); 2073 2074 return ret; 2075 } 2076 2077 /* Inverse of gsi_channel_init() */ 2078 static void gsi_channel_exit(struct gsi *gsi) 2079 { 2080 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 2081 2082 do 2083 gsi_channel_exit_one(&gsi->channel[channel_id]); 2084 while (channel_id--); 2085 gsi->modem_channel_bitmap = 0; 2086 2087 gsi_evt_ring_exit(gsi); 2088 } 2089 2090 /* Init function for GSI. GSI hardware does not need to be "ready" */ 2091 int gsi_init(struct gsi *gsi, struct platform_device *pdev, 2092 enum ipa_version version, u32 count, 2093 const struct ipa_gsi_endpoint_data *data) 2094 { 2095 struct device *dev = &pdev->dev; 2096 struct resource *res; 2097 resource_size_t size; 2098 u32 adjust; 2099 int ret; 2100 2101 gsi_validate_build(); 2102 2103 gsi->dev = dev; 2104 gsi->version = version; 2105 2106 /* The GSI layer performs NAPI on all endpoints. NAPI requires a 2107 * network device structure, but the GSI layer does not have one, 2108 * so we must create a dummy network device for this purpose. 2109 */ 2110 init_dummy_netdev(&gsi->dummy_dev); 2111 2112 /* Get GSI memory range and map it */ 2113 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 2114 if (!res) { 2115 dev_err(dev, "DT error getting \"gsi\" memory property\n"); 2116 return -ENODEV; 2117 } 2118 2119 size = resource_size(res); 2120 if (res->start > U32_MAX || size > U32_MAX - res->start) { 2121 dev_err(dev, "DT memory resource \"gsi\" out of range\n"); 2122 return -EINVAL; 2123 } 2124 2125 /* Make sure we can make our pointer adjustment if necessary */ 2126 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; 2127 if (res->start < adjust) { 2128 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", 2129 adjust); 2130 return -EINVAL; 2131 } 2132 2133 gsi->virt = ioremap(res->start, size); 2134 if (!gsi->virt) { 2135 dev_err(dev, "unable to remap \"gsi\" memory\n"); 2136 return -ENOMEM; 2137 } 2138 /* Adjust register range pointer downward for newer IPA versions */ 2139 gsi->virt -= adjust; 2140 2141 init_completion(&gsi->completion); 2142 2143 ret = gsi_irq_init(gsi, pdev); 2144 if (ret) 2145 goto err_iounmap; 2146 2147 ret = gsi_channel_init(gsi, count, data); 2148 if (ret) 2149 goto err_irq_exit; 2150 2151 mutex_init(&gsi->mutex); 2152 2153 return 0; 2154 2155 err_irq_exit: 2156 gsi_irq_exit(gsi); 2157 err_iounmap: 2158 iounmap(gsi->virt); 2159 2160 return ret; 2161 } 2162 2163 /* Inverse of gsi_init() */ 2164 void gsi_exit(struct gsi *gsi) 2165 { 2166 mutex_destroy(&gsi->mutex); 2167 gsi_channel_exit(gsi); 2168 gsi_irq_exit(gsi); 2169 iounmap(gsi->virt); 2170 } 2171 2172 /* The maximum number of outstanding TREs on a channel. This limits 2173 * a channel's maximum number of transactions outstanding (worst case 2174 * is one TRE per transaction). 2175 * 2176 * The absolute limit is the number of TREs in the channel's TRE ring, 2177 * and in theory we should be able use all of them. But in practice, 2178 * doing that led to the hardware reporting exhaustion of event ring 2179 * slots for writing completion information. So the hardware limit 2180 * would be (tre_count - 1). 2181 * 2182 * We reduce it a bit further though. Transaction resource pools are 2183 * sized to be a little larger than this maximum, to allow resource 2184 * allocations to always be contiguous. The number of entries in a 2185 * TRE ring buffer is a power of 2, and the extra resources in a pool 2186 * tends to nearly double the memory allocated for it. Reducing the 2187 * maximum number of outstanding TREs allows the number of entries in 2188 * a pool to avoid crossing that power-of-2 boundary, and this can 2189 * substantially reduce pool memory requirements. The number we 2190 * reduce it by matches the number added in gsi_trans_pool_init(). 2191 */ 2192 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2193 { 2194 struct gsi_channel *channel = &gsi->channel[channel_id]; 2195 2196 /* Hardware limit is channel->tre_count - 1 */ 2197 return channel->tre_count - (channel->tlv_count - 1); 2198 } 2199 2200 /* Returns the maximum number of TREs in a single transaction for a channel */ 2201 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2202 { 2203 struct gsi_channel *channel = &gsi->channel[channel_id]; 2204 2205 return channel->tlv_count; 2206 } 2207