1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bits.h> 9 #include <linux/bitfield.h> 10 #include <linux/mutex.h> 11 #include <linux/completion.h> 12 #include <linux/io.h> 13 #include <linux/bug.h> 14 #include <linux/interrupt.h> 15 #include <linux/platform_device.h> 16 #include <linux/netdevice.h> 17 18 #include "gsi.h" 19 #include "gsi_reg.h" 20 #include "gsi_private.h" 21 #include "gsi_trans.h" 22 #include "ipa_gsi.h" 23 #include "ipa_data.h" 24 25 /** 26 * DOC: The IPA Generic Software Interface 27 * 28 * The generic software interface (GSI) is an integral component of the IPA, 29 * providing a well-defined communication layer between the AP subsystem 30 * and the IPA core. The modem uses the GSI layer as well. 31 * 32 * -------- --------- 33 * | | | | 34 * | AP +<---. .----+ Modem | 35 * | +--. | | .->+ | 36 * | | | | | | | | 37 * -------- | | | | --------- 38 * v | v | 39 * --+-+---+-+-- 40 * | GSI | 41 * |-----------| 42 * | | 43 * | IPA | 44 * | | 45 * ------------- 46 * 47 * In the above diagram, the AP and Modem represent "execution environments" 48 * (EEs), which are independent operating environments that use the IPA for 49 * data transfer. 50 * 51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer 52 * of data to or from the IPA. A channel is implemented as a ring buffer, 53 * with a DRAM-resident array of "transfer elements" (TREs) available to 54 * describe transfers to or from other EEs through the IPA. A transfer 55 * element can also contain an immediate command, requesting the IPA perform 56 * actions other than data transfer. 57 * 58 * Each TRE refers to a block of data--also located DRAM. After writing one 59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a 60 * doorbell register to inform the receiving side how many elements have 61 * been written. 62 * 63 * Each channel has a GSI "event ring" associated with it. An event ring 64 * is implemented very much like a channel ring, but is always directed from 65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel 66 * events by adding an entry to the event ring associated with the channel. 67 * The GSI then writes its doorbell for the event ring, causing the target 68 * EE to be interrupted. Each entry in an event ring contains a pointer 69 * to the channel TRE whose completion the event represents. 70 * 71 * Each TRE in a channel ring has a set of flags. One flag indicates whether 72 * the completion of the transfer operation generates an entry (and possibly 73 * an interrupt) in the channel's event ring. Other flags allow transfer 74 * elements to be chained together, forming a single logical transaction. 75 * TRE flags are used to control whether and when interrupts are generated 76 * to signal completion of channel transfers. 77 * 78 * Elements in channel and event rings are completed (or consumed) strictly 79 * in order. Completion of one entry implies the completion of all preceding 80 * entries. A single completion interrupt can therefore communicate the 81 * completion of many transfers. 82 * 83 * Note that all GSI registers are little-endian, which is the assumed 84 * endianness of I/O space accesses. The accessor functions perform byte 85 * swapping if needed (i.e., for a big endian CPU). 86 */ 87 88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ 89 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ 90 91 #define GSI_CMD_TIMEOUT 5 /* seconds */ 92 93 #define GSI_CHANNEL_STOP_RX_RETRIES 10 94 95 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ 96 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ 97 98 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ 99 100 /* An entry in an event ring */ 101 struct gsi_event { 102 __le64 xfer_ptr; 103 __le16 len; 104 u8 reserved1; 105 u8 code; 106 __le16 reserved2; 107 u8 type; 108 u8 chid; 109 }; 110 111 /* Hardware values from the error log register error code field */ 112 enum gsi_err_code { 113 GSI_INVALID_TRE_ERR = 0x1, 114 GSI_OUT_OF_BUFFERS_ERR = 0x2, 115 GSI_OUT_OF_RESOURCES_ERR = 0x3, 116 GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, 117 GSI_EVT_RING_EMPTY_ERR = 0x5, 118 GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6, 119 GSI_HWO_1_ERR = 0x8, 120 }; 121 122 /* Hardware values from the error log register error type field */ 123 enum gsi_err_type { 124 GSI_ERR_TYPE_GLOB = 0x1, 125 GSI_ERR_TYPE_CHAN = 0x2, 126 GSI_ERR_TYPE_EVT = 0x3, 127 }; 128 129 /* Hardware values used when programming an event ring */ 130 enum gsi_evt_chtype { 131 GSI_EVT_CHTYPE_MHI_EV = 0x0, 132 GSI_EVT_CHTYPE_XHCI_EV = 0x1, 133 GSI_EVT_CHTYPE_GPI_EV = 0x2, 134 GSI_EVT_CHTYPE_XDCI_EV = 0x3, 135 }; 136 137 /* Hardware values used when programming a channel */ 138 enum gsi_channel_protocol { 139 GSI_CHANNEL_PROTOCOL_MHI = 0x0, 140 GSI_CHANNEL_PROTOCOL_XHCI = 0x1, 141 GSI_CHANNEL_PROTOCOL_GPI = 0x2, 142 GSI_CHANNEL_PROTOCOL_XDCI = 0x3, 143 }; 144 145 /* Hardware values representing an event ring immediate command opcode */ 146 enum gsi_evt_cmd_opcode { 147 GSI_EVT_ALLOCATE = 0x0, 148 GSI_EVT_RESET = 0x9, 149 GSI_EVT_DE_ALLOC = 0xa, 150 }; 151 152 /* Hardware values representing a generic immediate command opcode */ 153 enum gsi_generic_cmd_opcode { 154 GSI_GENERIC_HALT_CHANNEL = 0x1, 155 GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, 156 }; 157 158 /* Hardware values representing a channel immediate command opcode */ 159 enum gsi_ch_cmd_opcode { 160 GSI_CH_ALLOCATE = 0x0, 161 GSI_CH_START = 0x1, 162 GSI_CH_STOP = 0x2, 163 GSI_CH_RESET = 0x9, 164 GSI_CH_DE_ALLOC = 0xa, 165 }; 166 167 /** gsi_channel_scratch_gpi - GPI protocol scratch register 168 * @max_outstanding_tre: 169 * Defines the maximum number of TREs allowed in a single transaction 170 * on a channel (in bytes). This determines the amount of prefetch 171 * performed by the hardware. We configure this to equal the size of 172 * the TLV FIFO for the channel. 173 * @outstanding_threshold: 174 * Defines the threshold (in bytes) determining when the sequencer 175 * should update the channel doorbell. We configure this to equal 176 * the size of two TREs. 177 */ 178 struct gsi_channel_scratch_gpi { 179 u64 reserved1; 180 u16 reserved2; 181 u16 max_outstanding_tre; 182 u16 reserved3; 183 u16 outstanding_threshold; 184 }; 185 186 /** gsi_channel_scratch - channel scratch configuration area 187 * 188 * The exact interpretation of this register is protocol-specific. 189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. 190 */ 191 union gsi_channel_scratch { 192 struct gsi_channel_scratch_gpi gpi; 193 struct { 194 u32 word1; 195 u32 word2; 196 u32 word3; 197 u32 word4; 198 } data; 199 }; 200 201 /* Check things that can be validated at build time. */ 202 static void gsi_validate_build(void) 203 { 204 /* This is used as a divisor */ 205 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); 206 207 /* Code assumes the size of channel and event ring element are 208 * the same (and fixed). Make sure the size of an event ring 209 * element is what's expected. 210 */ 211 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); 212 213 /* Hardware requires a 2^n ring size. We ensure the number of 214 * elements in an event ring is a power of 2 elsewhere; this 215 * ensure the elements themselves meet the requirement. 216 */ 217 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); 218 219 /* The channel element size must fit in this field */ 220 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK)); 221 222 /* The event ring element size must fit in this field */ 223 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK)); 224 } 225 226 /* Return the channel id associated with a given channel */ 227 static u32 gsi_channel_id(struct gsi_channel *channel) 228 { 229 return channel - &channel->gsi->channel[0]; 230 } 231 232 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) 233 { 234 u32 val; 235 236 gsi->event_enable_bitmap |= BIT(evt_ring_id); 237 val = gsi->event_enable_bitmap; 238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 239 } 240 241 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) 242 { 243 u32 val; 244 245 gsi->event_enable_bitmap &= ~BIT(evt_ring_id); 246 val = gsi->event_enable_bitmap; 247 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 248 } 249 250 /* Enable all GSI_interrupt types */ 251 static void gsi_irq_enable(struct gsi *gsi) 252 { 253 u32 val; 254 255 /* We don't use inter-EE channel or event interrupts */ 256 val = GSI_CNTXT_TYPE_IRQ_MSK_ALL; 257 val &= ~MSK_INTER_EE_CH_CTRL_FMASK; 258 val &= ~MSK_INTER_EE_EV_CTRL_FMASK; 259 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 260 261 val = GENMASK(gsi->channel_count - 1, 0); 262 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 263 264 val = GENMASK(gsi->evt_ring_count - 1, 0); 265 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 266 267 /* Each IEOB interrupt is enabled (later) as needed by channels */ 268 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 269 270 val = GSI_CNTXT_GLOB_IRQ_ALL; 271 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 272 273 /* Never enable GSI_BREAK_POINT */ 274 val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK; 275 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 276 } 277 278 /* Disable all GSI_interrupt types */ 279 static void gsi_irq_disable(struct gsi *gsi) 280 { 281 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); 282 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); 283 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); 284 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); 285 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); 286 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); 287 } 288 289 /* Return the virtual address associated with a ring index */ 290 void *gsi_ring_virt(struct gsi_ring *ring, u32 index) 291 { 292 /* Note: index *must* be used modulo the ring count here */ 293 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; 294 } 295 296 /* Return the 32-bit DMA address associated with a ring index */ 297 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) 298 { 299 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; 300 } 301 302 /* Return the ring index of a 32-bit ring offset */ 303 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) 304 { 305 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE; 306 } 307 308 /* Issue a GSI command by writing a value to a register, then wait for 309 * completion to be signaled. Returns true if the command completes 310 * or false if it times out. 311 */ 312 static bool 313 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) 314 { 315 reinit_completion(completion); 316 317 iowrite32(val, gsi->virt + reg); 318 319 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ); 320 } 321 322 /* Return the hardware's notion of the current state of an event ring */ 323 static enum gsi_evt_ring_state 324 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) 325 { 326 u32 val; 327 328 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 329 330 return u32_get_bits(val, EV_CHSTATE_FMASK); 331 } 332 333 /* Issue an event ring command and wait for it to complete */ 334 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, 335 enum gsi_evt_cmd_opcode opcode) 336 { 337 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 338 struct completion *completion = &evt_ring->completion; 339 u32 val; 340 341 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); 342 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); 343 344 if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) 345 return 0; /* Success! */ 346 347 dev_err(gsi->dev, "GSI command %u to event ring %u timed out " 348 "(state is %u)\n", opcode, evt_ring_id, evt_ring->state); 349 350 return -ETIMEDOUT; 351 } 352 353 /* Allocate an event ring in NOT_ALLOCATED state */ 354 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) 355 { 356 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 357 int ret; 358 359 /* Get initial event ring state */ 360 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 361 362 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) 363 return -EINVAL; 364 365 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); 366 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 367 dev_err(gsi->dev, "bad event ring state (%u) after alloc\n", 368 evt_ring->state); 369 ret = -EIO; 370 } 371 372 return ret; 373 } 374 375 /* Reset a GSI event ring in ALLOCATED or ERROR state. */ 376 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) 377 { 378 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 379 enum gsi_evt_ring_state state = evt_ring->state; 380 int ret; 381 382 if (state != GSI_EVT_RING_STATE_ALLOCATED && 383 state != GSI_EVT_RING_STATE_ERROR) { 384 dev_err(gsi->dev, "bad event ring state (%u) before reset\n", 385 evt_ring->state); 386 return; 387 } 388 389 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); 390 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) 391 dev_err(gsi->dev, "bad event ring state (%u) after reset\n", 392 evt_ring->state); 393 } 394 395 /* Issue a hardware de-allocation request for an allocated event ring */ 396 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) 397 { 398 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 399 int ret; 400 401 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { 402 dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n", 403 evt_ring->state); 404 return; 405 } 406 407 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); 408 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) 409 dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n", 410 evt_ring->state); 411 } 412 413 /* Fetch the current state of a channel from hardware */ 414 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) 415 { 416 u32 channel_id = gsi_channel_id(channel); 417 void *virt = channel->gsi->virt; 418 u32 val; 419 420 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 421 422 return u32_get_bits(val, CHSTATE_FMASK); 423 } 424 425 /* Issue a channel command and wait for it to complete */ 426 static int 427 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) 428 { 429 struct completion *completion = &channel->completion; 430 u32 channel_id = gsi_channel_id(channel); 431 struct gsi *gsi = channel->gsi; 432 u32 val; 433 434 val = u32_encode_bits(channel_id, CH_CHID_FMASK); 435 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); 436 437 if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) 438 return 0; /* Success! */ 439 440 dev_err(gsi->dev, 441 "GSI command %u to channel %u timed out (state is %u)\n", 442 opcode, channel_id, gsi_channel_state(channel)); 443 444 return -ETIMEDOUT; 445 } 446 447 /* Allocate GSI channel in NOT_ALLOCATED state */ 448 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) 449 { 450 struct gsi_channel *channel = &gsi->channel[channel_id]; 451 enum gsi_channel_state state; 452 int ret; 453 454 /* Get initial channel state */ 455 state = gsi_channel_state(channel); 456 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 457 return -EINVAL; 458 459 ret = gsi_channel_command(channel, GSI_CH_ALLOCATE); 460 461 /* Channel state will normally have been updated */ 462 state = gsi_channel_state(channel); 463 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { 464 dev_err(gsi->dev, "bad channel state (%u) after alloc\n", 465 state); 466 ret = -EIO; 467 } 468 469 return ret; 470 } 471 472 /* Start an ALLOCATED channel */ 473 static int gsi_channel_start_command(struct gsi_channel *channel) 474 { 475 enum gsi_channel_state state; 476 int ret; 477 478 state = gsi_channel_state(channel); 479 if (state != GSI_CHANNEL_STATE_ALLOCATED && 480 state != GSI_CHANNEL_STATE_STOPPED) 481 return -EINVAL; 482 483 ret = gsi_channel_command(channel, GSI_CH_START); 484 485 /* Channel state will normally have been updated */ 486 state = gsi_channel_state(channel); 487 if (!ret && state != GSI_CHANNEL_STATE_STARTED) { 488 dev_err(channel->gsi->dev, 489 "bad channel state (%u) after start\n", state); 490 ret = -EIO; 491 } 492 493 return ret; 494 } 495 496 /* Stop a GSI channel in STARTED state */ 497 static int gsi_channel_stop_command(struct gsi_channel *channel) 498 { 499 enum gsi_channel_state state; 500 int ret; 501 502 state = gsi_channel_state(channel); 503 if (state != GSI_CHANNEL_STATE_STARTED && 504 state != GSI_CHANNEL_STATE_STOP_IN_PROC) 505 return -EINVAL; 506 507 ret = gsi_channel_command(channel, GSI_CH_STOP); 508 509 /* Channel state will normally have been updated */ 510 state = gsi_channel_state(channel); 511 if (ret || state == GSI_CHANNEL_STATE_STOPPED) 512 return ret; 513 514 /* We may have to try again if stop is in progress */ 515 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) 516 return -EAGAIN; 517 518 dev_err(channel->gsi->dev, 519 "bad channel state (%u) after stop\n", state); 520 521 return -EIO; 522 } 523 524 /* Reset a GSI channel in ALLOCATED or ERROR state. */ 525 static void gsi_channel_reset_command(struct gsi_channel *channel) 526 { 527 enum gsi_channel_state state; 528 int ret; 529 530 msleep(1); /* A short delay is required before a RESET command */ 531 532 state = gsi_channel_state(channel); 533 if (state != GSI_CHANNEL_STATE_STOPPED && 534 state != GSI_CHANNEL_STATE_ERROR) { 535 dev_err(channel->gsi->dev, 536 "bad channel state (%u) before reset\n", state); 537 return; 538 } 539 540 ret = gsi_channel_command(channel, GSI_CH_RESET); 541 542 /* Channel state will normally have been updated */ 543 state = gsi_channel_state(channel); 544 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) 545 dev_err(channel->gsi->dev, 546 "bad channel state (%u) after reset\n", state); 547 } 548 549 /* Deallocate an ALLOCATED GSI channel */ 550 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) 551 { 552 struct gsi_channel *channel = &gsi->channel[channel_id]; 553 enum gsi_channel_state state; 554 int ret; 555 556 state = gsi_channel_state(channel); 557 if (state != GSI_CHANNEL_STATE_ALLOCATED) { 558 dev_err(gsi->dev, 559 "bad channel state (%u) before dealloc\n", state); 560 return; 561 } 562 563 ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC); 564 565 /* Channel state will normally have been updated */ 566 state = gsi_channel_state(channel); 567 if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) 568 dev_err(gsi->dev, 569 "bad channel state (%u) after dealloc\n", state); 570 } 571 572 /* Ring an event ring doorbell, reporting the last entry processed by the AP. 573 * The index argument (modulo the ring count) is the first unfilled entry, so 574 * we supply one less than that with the doorbell. Update the event ring 575 * index field with the value provided. 576 */ 577 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) 578 { 579 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; 580 u32 val; 581 582 ring->index = index; /* Next unused entry */ 583 584 /* Note: index *must* be used modulo the ring count here */ 585 val = gsi_ring_addr(ring, (index - 1) % ring->count); 586 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); 587 } 588 589 /* Program an event ring for use */ 590 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) 591 { 592 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 593 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; 594 u32 val; 595 596 val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK); 597 val |= EV_INTYPE_FMASK; 598 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); 599 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); 600 601 val = u32_encode_bits(size, EV_R_LENGTH_FMASK); 602 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); 603 604 /* The context 2 and 3 registers store the low-order and 605 * high-order 32 bits of the address of the event ring, 606 * respectively. 607 */ 608 val = evt_ring->ring.addr & GENMASK(31, 0); 609 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); 610 611 val = evt_ring->ring.addr >> 32; 612 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); 613 614 /* Enable interrupt moderation by setting the moderation delay */ 615 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); 616 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ 617 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); 618 619 /* No MSI write data, and MSI address high and low address is 0 */ 620 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); 621 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); 622 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id)); 623 624 /* We don't need to get event read pointer updates */ 625 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); 626 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); 627 628 /* Finally, tell the hardware we've completed event 0 (arbitrary) */ 629 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); 630 } 631 632 /* Return the last (most recent) transaction completed on a channel. */ 633 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) 634 { 635 struct gsi_trans_info *trans_info = &channel->trans_info; 636 struct gsi_trans *trans; 637 638 spin_lock_bh(&trans_info->spinlock); 639 640 if (!list_empty(&trans_info->complete)) 641 trans = list_last_entry(&trans_info->complete, 642 struct gsi_trans, links); 643 else if (!list_empty(&trans_info->polled)) 644 trans = list_last_entry(&trans_info->polled, 645 struct gsi_trans, links); 646 else 647 trans = NULL; 648 649 /* Caller will wait for this, so take a reference */ 650 if (trans) 651 refcount_inc(&trans->refcount); 652 653 spin_unlock_bh(&trans_info->spinlock); 654 655 return trans; 656 } 657 658 /* Wait for transaction activity on a channel to complete */ 659 static void gsi_channel_trans_quiesce(struct gsi_channel *channel) 660 { 661 struct gsi_trans *trans; 662 663 /* Get the last transaction, and wait for it to complete */ 664 trans = gsi_channel_trans_last(channel); 665 if (trans) { 666 wait_for_completion(&trans->completion); 667 gsi_trans_free(trans); 668 } 669 } 670 671 /* Stop channel activity. Transactions may not be allocated until thawed. */ 672 static void gsi_channel_freeze(struct gsi_channel *channel) 673 { 674 gsi_channel_trans_quiesce(channel); 675 676 napi_disable(&channel->napi); 677 678 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); 679 } 680 681 /* Allow transactions to be used on the channel again. */ 682 static void gsi_channel_thaw(struct gsi_channel *channel) 683 { 684 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 685 686 napi_enable(&channel->napi); 687 } 688 689 /* Program a channel for use */ 690 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) 691 { 692 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; 693 u32 channel_id = gsi_channel_id(channel); 694 union gsi_channel_scratch scr = { }; 695 struct gsi_channel_scratch_gpi *gpi; 696 struct gsi *gsi = channel->gsi; 697 u32 wrr_weight = 0; 698 u32 val; 699 700 /* Arbitrarily pick TRE 0 as the first channel element to use */ 701 channel->tre_ring.index = 0; 702 703 /* We program all channels to use GPI protocol */ 704 val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK); 705 if (channel->toward_ipa) 706 val |= CHTYPE_DIR_FMASK; 707 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); 708 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); 709 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); 710 711 val = u32_encode_bits(size, R_LENGTH_FMASK); 712 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); 713 714 /* The context 2 and 3 registers store the low-order and 715 * high-order 32 bits of the address of the channel ring, 716 * respectively. 717 */ 718 val = channel->tre_ring.addr & GENMASK(31, 0); 719 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); 720 721 val = channel->tre_ring.addr >> 32; 722 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); 723 724 /* Command channel gets low weighted round-robin priority */ 725 if (channel->command) 726 wrr_weight = field_max(WRR_WEIGHT_FMASK); 727 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK); 728 729 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ 730 731 /* Enable the doorbell engine if requested */ 732 if (doorbell) 733 val |= USE_DB_ENG_FMASK; 734 735 if (!channel->use_prefetch) 736 val |= USE_ESCAPE_BUF_ONLY_FMASK; 737 738 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); 739 740 /* Now update the scratch registers for GPI protocol */ 741 gpi = &scr.gpi; 742 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) * 743 GSI_RING_ELEMENT_SIZE; 744 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; 745 746 val = scr.data.word1; 747 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); 748 749 val = scr.data.word2; 750 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); 751 752 val = scr.data.word3; 753 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); 754 755 /* We must preserve the upper 16 bits of the last scratch register. 756 * The next sequence assumes those bits remain unchanged between the 757 * read and the write. 758 */ 759 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 760 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); 761 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); 762 763 /* All done! */ 764 } 765 766 static void gsi_channel_deprogram(struct gsi_channel *channel) 767 { 768 /* Nothing to do */ 769 } 770 771 /* Start an allocated GSI channel */ 772 int gsi_channel_start(struct gsi *gsi, u32 channel_id) 773 { 774 struct gsi_channel *channel = &gsi->channel[channel_id]; 775 int ret; 776 777 mutex_lock(&gsi->mutex); 778 779 ret = gsi_channel_start_command(channel); 780 781 mutex_unlock(&gsi->mutex); 782 783 gsi_channel_thaw(channel); 784 785 return ret; 786 } 787 788 /* Stop a started channel */ 789 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) 790 { 791 struct gsi_channel *channel = &gsi->channel[channel_id]; 792 enum gsi_channel_state state; 793 u32 retries; 794 int ret; 795 796 gsi_channel_freeze(channel); 797 798 /* Channel could have entered STOPPED state since last call if the 799 * STOP command timed out. We won't stop a channel if stopping it 800 * was successful previously (so we still want the freeze above). 801 */ 802 state = gsi_channel_state(channel); 803 if (state == GSI_CHANNEL_STATE_STOPPED) 804 return 0; 805 806 /* RX channels might require a little time to enter STOPPED state */ 807 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; 808 809 mutex_lock(&gsi->mutex); 810 811 do { 812 ret = gsi_channel_stop_command(channel); 813 if (ret != -EAGAIN) 814 break; 815 msleep(1); 816 } while (retries--); 817 818 mutex_unlock(&gsi->mutex); 819 820 /* Thaw the channel if we need to retry (or on error) */ 821 if (ret) 822 gsi_channel_thaw(channel); 823 824 return ret; 825 } 826 827 /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */ 828 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) 829 { 830 struct gsi_channel *channel = &gsi->channel[channel_id]; 831 832 mutex_lock(&gsi->mutex); 833 834 gsi_channel_reset_command(channel); 835 /* Due to a hardware quirk we may need to reset RX channels twice. */ 836 if (legacy && !channel->toward_ipa) 837 gsi_channel_reset_command(channel); 838 839 gsi_channel_program(channel, legacy); 840 gsi_channel_trans_cancel_pending(channel); 841 842 mutex_unlock(&gsi->mutex); 843 } 844 845 /* Stop a STARTED channel for suspend (using stop if requested) */ 846 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) 847 { 848 struct gsi_channel *channel = &gsi->channel[channel_id]; 849 850 if (stop) 851 return gsi_channel_stop(gsi, channel_id); 852 853 gsi_channel_freeze(channel); 854 855 return 0; 856 } 857 858 /* Resume a suspended channel (starting will be requested if STOPPED) */ 859 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) 860 { 861 struct gsi_channel *channel = &gsi->channel[channel_id]; 862 863 if (start) 864 return gsi_channel_start(gsi, channel_id); 865 866 gsi_channel_thaw(channel); 867 868 return 0; 869 } 870 871 /** 872 * gsi_channel_tx_queued() - Report queued TX transfers for a channel 873 * @channel: Channel for which to report 874 * 875 * Report to the network stack the number of bytes and transactions that 876 * have been queued to hardware since last call. This and the next function 877 * supply information used by the network stack for throttling. 878 * 879 * For each channel we track the number of transactions used and bytes of 880 * data those transactions represent. We also track what those values are 881 * each time this function is called. Subtracting the two tells us 882 * the number of bytes and transactions that have been added between 883 * successive calls. 884 * 885 * Calling this each time we ring the channel doorbell allows us to 886 * provide accurate information to the network stack about how much 887 * work we've given the hardware at any point in time. 888 */ 889 void gsi_channel_tx_queued(struct gsi_channel *channel) 890 { 891 u32 trans_count; 892 u32 byte_count; 893 894 byte_count = channel->byte_count - channel->queued_byte_count; 895 trans_count = channel->trans_count - channel->queued_trans_count; 896 channel->queued_byte_count = channel->byte_count; 897 channel->queued_trans_count = channel->trans_count; 898 899 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel), 900 trans_count, byte_count); 901 } 902 903 /** 904 * gsi_channel_tx_update() - Report completed TX transfers 905 * @channel: Channel that has completed transmitting packets 906 * @trans: Last transation known to be complete 907 * 908 * Compute the number of transactions and bytes that have been transferred 909 * over a TX channel since the given transaction was committed. Report this 910 * information to the network stack. 911 * 912 * At the time a transaction is committed, we record its channel's 913 * committed transaction and byte counts *in the transaction*. 914 * Completions are signaled by the hardware with an interrupt, and 915 * we can determine the latest completed transaction at that time. 916 * 917 * The difference between the byte/transaction count recorded in 918 * the transaction and the count last time we recorded a completion 919 * tells us exactly how much data has been transferred between 920 * completions. 921 * 922 * Calling this each time we learn of a newly-completed transaction 923 * allows us to provide accurate information to the network stack 924 * about how much work has been completed by the hardware at a given 925 * point in time. 926 */ 927 static void 928 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans) 929 { 930 u64 byte_count = trans->byte_count + trans->len; 931 u64 trans_count = trans->trans_count + 1; 932 933 byte_count -= channel->compl_byte_count; 934 channel->compl_byte_count += byte_count; 935 trans_count -= channel->compl_trans_count; 936 channel->compl_trans_count += trans_count; 937 938 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel), 939 trans_count, byte_count); 940 } 941 942 /* Channel control interrupt handler */ 943 static void gsi_isr_chan_ctrl(struct gsi *gsi) 944 { 945 u32 channel_mask; 946 947 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); 948 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); 949 950 while (channel_mask) { 951 u32 channel_id = __ffs(channel_mask); 952 struct gsi_channel *channel; 953 954 channel_mask ^= BIT(channel_id); 955 956 channel = &gsi->channel[channel_id]; 957 958 complete(&channel->completion); 959 } 960 } 961 962 /* Event ring control interrupt handler */ 963 static void gsi_isr_evt_ctrl(struct gsi *gsi) 964 { 965 u32 event_mask; 966 967 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); 968 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); 969 970 while (event_mask) { 971 u32 evt_ring_id = __ffs(event_mask); 972 struct gsi_evt_ring *evt_ring; 973 974 event_mask ^= BIT(evt_ring_id); 975 976 evt_ring = &gsi->evt_ring[evt_ring_id]; 977 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); 978 979 complete(&evt_ring->completion); 980 } 981 } 982 983 /* Global channel error interrupt handler */ 984 static void 985 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) 986 { 987 if (code == GSI_OUT_OF_RESOURCES_ERR) { 988 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); 989 complete(&gsi->channel[channel_id].completion); 990 return; 991 } 992 993 /* Report, but otherwise ignore all other error codes */ 994 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", 995 channel_id, err_ee, code); 996 } 997 998 /* Global event error interrupt handler */ 999 static void 1000 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) 1001 { 1002 if (code == GSI_OUT_OF_RESOURCES_ERR) { 1003 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; 1004 u32 channel_id = gsi_channel_id(evt_ring->channel); 1005 1006 complete(&evt_ring->completion); 1007 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", 1008 channel_id); 1009 return; 1010 } 1011 1012 /* Report, but otherwise ignore all other error codes */ 1013 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", 1014 evt_ring_id, err_ee, code); 1015 } 1016 1017 /* Global error interrupt handler */ 1018 static void gsi_isr_glob_err(struct gsi *gsi) 1019 { 1020 enum gsi_err_type type; 1021 enum gsi_err_code code; 1022 u32 which; 1023 u32 val; 1024 u32 ee; 1025 1026 /* Get the logged error, then reinitialize the log */ 1027 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); 1028 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1029 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); 1030 1031 ee = u32_get_bits(val, ERR_EE_FMASK); 1032 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); 1033 type = u32_get_bits(val, ERR_TYPE_FMASK); 1034 code = u32_get_bits(val, ERR_CODE_FMASK); 1035 1036 if (type == GSI_ERR_TYPE_CHAN) 1037 gsi_isr_glob_chan_err(gsi, ee, which, code); 1038 else if (type == GSI_ERR_TYPE_EVT) 1039 gsi_isr_glob_evt_err(gsi, ee, which, code); 1040 else /* type GSI_ERR_TYPE_GLOB should be fatal */ 1041 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); 1042 } 1043 1044 /* Generic EE interrupt handler */ 1045 static void gsi_isr_gp_int1(struct gsi *gsi) 1046 { 1047 u32 result; 1048 u32 val; 1049 1050 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1051 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); 1052 if (result != GENERIC_EE_SUCCESS_FVAL) 1053 dev_err(gsi->dev, "global INT1 generic result %u\n", result); 1054 1055 complete(&gsi->completion); 1056 } 1057 1058 /* Inter-EE interrupt handler */ 1059 static void gsi_isr_glob_ee(struct gsi *gsi) 1060 { 1061 u32 val; 1062 1063 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); 1064 1065 if (val & ERROR_INT_FMASK) 1066 gsi_isr_glob_err(gsi); 1067 1068 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); 1069 1070 val &= ~ERROR_INT_FMASK; 1071 1072 if (val & EN_GP_INT1_FMASK) { 1073 val ^= EN_GP_INT1_FMASK; 1074 gsi_isr_gp_int1(gsi); 1075 } 1076 1077 if (val) 1078 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); 1079 } 1080 1081 /* I/O completion interrupt event */ 1082 static void gsi_isr_ieob(struct gsi *gsi) 1083 { 1084 u32 event_mask; 1085 1086 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); 1087 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); 1088 1089 while (event_mask) { 1090 u32 evt_ring_id = __ffs(event_mask); 1091 1092 event_mask ^= BIT(evt_ring_id); 1093 1094 gsi_irq_ieob_disable(gsi, evt_ring_id); 1095 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); 1096 } 1097 } 1098 1099 /* General event interrupts represent serious problems, so report them */ 1100 static void gsi_isr_general(struct gsi *gsi) 1101 { 1102 struct device *dev = gsi->dev; 1103 u32 val; 1104 1105 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); 1106 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); 1107 1108 if (val) 1109 dev_err(dev, "unexpected general interrupt 0x%08x\n", val); 1110 } 1111 1112 /** 1113 * gsi_isr() - Top level GSI interrupt service routine 1114 * @irq: Interrupt number (ignored) 1115 * @dev_id: GSI pointer supplied to request_irq() 1116 * 1117 * This is the main handler function registered for the GSI IRQ. Each type 1118 * of interrupt has a separate handler function that is called from here. 1119 */ 1120 static irqreturn_t gsi_isr(int irq, void *dev_id) 1121 { 1122 struct gsi *gsi = dev_id; 1123 u32 intr_mask; 1124 u32 cnt = 0; 1125 1126 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { 1127 /* intr_mask contains bitmask of pending GSI interrupts */ 1128 do { 1129 u32 gsi_intr = BIT(__ffs(intr_mask)); 1130 1131 intr_mask ^= gsi_intr; 1132 1133 switch (gsi_intr) { 1134 case CH_CTRL_FMASK: 1135 gsi_isr_chan_ctrl(gsi); 1136 break; 1137 case EV_CTRL_FMASK: 1138 gsi_isr_evt_ctrl(gsi); 1139 break; 1140 case GLOB_EE_FMASK: 1141 gsi_isr_glob_ee(gsi); 1142 break; 1143 case IEOB_FMASK: 1144 gsi_isr_ieob(gsi); 1145 break; 1146 case GENERAL_FMASK: 1147 gsi_isr_general(gsi); 1148 break; 1149 default: 1150 dev_err(gsi->dev, 1151 "%s: unrecognized type 0x%08x\n", 1152 __func__, gsi_intr); 1153 break; 1154 } 1155 } while (intr_mask); 1156 1157 if (++cnt > GSI_ISR_MAX_ITER) { 1158 dev_err(gsi->dev, "interrupt flood\n"); 1159 break; 1160 } 1161 } 1162 1163 return IRQ_HANDLED; 1164 } 1165 1166 /* Return the transaction associated with a transfer completion event */ 1167 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, 1168 struct gsi_event *event) 1169 { 1170 u32 tre_offset; 1171 u32 tre_index; 1172 1173 /* Event xfer_ptr records the TRE it's associated with */ 1174 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); 1175 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); 1176 1177 return gsi_channel_trans_mapped(channel, tre_index); 1178 } 1179 1180 /** 1181 * gsi_evt_ring_rx_update() - Record lengths of received data 1182 * @evt_ring: Event ring associated with channel that received packets 1183 * @index: Event index in ring reported by hardware 1184 * 1185 * Events for RX channels contain the actual number of bytes received into 1186 * the buffer. Every event has a transaction associated with it, and here 1187 * we update transactions to record their actual received lengths. 1188 * 1189 * This function is called whenever we learn that the GSI hardware has filled 1190 * new events since the last time we checked. The ring's index field tells 1191 * the first entry in need of processing. The index provided is the 1192 * first *unfilled* event in the ring (following the last filled one). 1193 * 1194 * Events are sequential within the event ring, and transactions are 1195 * sequential within the transaction pool. 1196 * 1197 * Note that @index always refers to an element *within* the event ring. 1198 */ 1199 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index) 1200 { 1201 struct gsi_channel *channel = evt_ring->channel; 1202 struct gsi_ring *ring = &evt_ring->ring; 1203 struct gsi_trans_info *trans_info; 1204 struct gsi_event *event_done; 1205 struct gsi_event *event; 1206 struct gsi_trans *trans; 1207 u32 byte_count = 0; 1208 u32 old_index; 1209 u32 event_avail; 1210 1211 trans_info = &channel->trans_info; 1212 1213 /* We'll start with the oldest un-processed event. RX channels 1214 * replenish receive buffers in single-TRE transactions, so we 1215 * can just map that event to its transaction. Transactions 1216 * associated with completion events are consecutive. 1217 */ 1218 old_index = ring->index; 1219 event = gsi_ring_virt(ring, old_index); 1220 trans = gsi_event_trans(channel, event); 1221 1222 /* Compute the number of events to process before we wrap, 1223 * and determine when we'll be done processing events. 1224 */ 1225 event_avail = ring->count - old_index % ring->count; 1226 event_done = gsi_ring_virt(ring, index); 1227 do { 1228 trans->len = __le16_to_cpu(event->len); 1229 byte_count += trans->len; 1230 1231 /* Move on to the next event and transaction */ 1232 if (--event_avail) 1233 event++; 1234 else 1235 event = gsi_ring_virt(ring, 0); 1236 trans = gsi_trans_pool_next(&trans_info->pool, trans); 1237 } while (event != event_done); 1238 1239 /* We record RX bytes when they are received */ 1240 channel->byte_count += byte_count; 1241 channel->trans_count++; 1242 } 1243 1244 /* Initialize a ring, including allocating DMA memory for its entries */ 1245 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) 1246 { 1247 size_t size = count * GSI_RING_ELEMENT_SIZE; 1248 struct device *dev = gsi->dev; 1249 dma_addr_t addr; 1250 1251 /* Hardware requires a 2^n ring size, with alignment equal to size */ 1252 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); 1253 if (ring->virt && addr % size) { 1254 dma_free_coherent(dev, size, ring->virt, ring->addr); 1255 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", 1256 size); 1257 return -EINVAL; /* Not a good error value, but distinct */ 1258 } else if (!ring->virt) { 1259 return -ENOMEM; 1260 } 1261 ring->addr = addr; 1262 ring->count = count; 1263 1264 return 0; 1265 } 1266 1267 /* Free a previously-allocated ring */ 1268 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) 1269 { 1270 size_t size = ring->count * GSI_RING_ELEMENT_SIZE; 1271 1272 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); 1273 } 1274 1275 /* Allocate an available event ring id */ 1276 static int gsi_evt_ring_id_alloc(struct gsi *gsi) 1277 { 1278 u32 evt_ring_id; 1279 1280 if (gsi->event_bitmap == ~0U) { 1281 dev_err(gsi->dev, "event rings exhausted\n"); 1282 return -ENOSPC; 1283 } 1284 1285 evt_ring_id = ffz(gsi->event_bitmap); 1286 gsi->event_bitmap |= BIT(evt_ring_id); 1287 1288 return (int)evt_ring_id; 1289 } 1290 1291 /* Free a previously-allocated event ring id */ 1292 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) 1293 { 1294 gsi->event_bitmap &= ~BIT(evt_ring_id); 1295 } 1296 1297 /* Ring a channel doorbell, reporting the first un-filled entry */ 1298 void gsi_channel_doorbell(struct gsi_channel *channel) 1299 { 1300 struct gsi_ring *tre_ring = &channel->tre_ring; 1301 u32 channel_id = gsi_channel_id(channel); 1302 struct gsi *gsi = channel->gsi; 1303 u32 val; 1304 1305 /* Note: index *must* be used modulo the ring count here */ 1306 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); 1307 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); 1308 } 1309 1310 /* Consult hardware, move any newly completed transactions to completed list */ 1311 static void gsi_channel_update(struct gsi_channel *channel) 1312 { 1313 u32 evt_ring_id = channel->evt_ring_id; 1314 struct gsi *gsi = channel->gsi; 1315 struct gsi_evt_ring *evt_ring; 1316 struct gsi_trans *trans; 1317 struct gsi_ring *ring; 1318 u32 offset; 1319 u32 index; 1320 1321 evt_ring = &gsi->evt_ring[evt_ring_id]; 1322 ring = &evt_ring->ring; 1323 1324 /* See if there's anything new to process; if not, we're done. Note 1325 * that index always refers to an entry *within* the event ring. 1326 */ 1327 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); 1328 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); 1329 if (index == ring->index % ring->count) 1330 return; 1331 1332 /* Get the transaction for the latest completed event. Take a 1333 * reference to keep it from completing before we give the events 1334 * for this and previous transactions back to the hardware. 1335 */ 1336 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); 1337 refcount_inc(&trans->refcount); 1338 1339 /* For RX channels, update each completed transaction with the number 1340 * of bytes that were actually received. For TX channels, report 1341 * the number of transactions and bytes this completion represents 1342 * up the network stack. 1343 */ 1344 if (channel->toward_ipa) 1345 gsi_channel_tx_update(channel, trans); 1346 else 1347 gsi_evt_ring_rx_update(evt_ring, index); 1348 1349 gsi_trans_move_complete(trans); 1350 1351 /* Tell the hardware we've handled these events */ 1352 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); 1353 1354 gsi_trans_free(trans); 1355 } 1356 1357 /** 1358 * gsi_channel_poll_one() - Return a single completed transaction on a channel 1359 * @channel: Channel to be polled 1360 * 1361 * @Return: Transaction pointer, or null if none are available 1362 * 1363 * This function returns the first entry on a channel's completed transaction 1364 * list. If that list is empty, the hardware is consulted to determine 1365 * whether any new transactions have completed. If so, they're moved to the 1366 * completed list and the new first entry is returned. If there are no more 1367 * completed transactions, a null pointer is returned. 1368 */ 1369 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) 1370 { 1371 struct gsi_trans *trans; 1372 1373 /* Get the first transaction from the completed list */ 1374 trans = gsi_channel_trans_complete(channel); 1375 if (!trans) { 1376 /* List is empty; see if there's more to do */ 1377 gsi_channel_update(channel); 1378 trans = gsi_channel_trans_complete(channel); 1379 } 1380 1381 if (trans) 1382 gsi_trans_move_polled(trans); 1383 1384 return trans; 1385 } 1386 1387 /** 1388 * gsi_channel_poll() - NAPI poll function for a channel 1389 * @napi: NAPI structure for the channel 1390 * @budget: Budget supplied by NAPI core 1391 1392 * @Return: Number of items polled (<= budget) 1393 * 1394 * Single transactions completed by hardware are polled until either 1395 * the budget is exhausted, or there are no more. Each transaction 1396 * polled is passed to gsi_trans_complete(), to perform remaining 1397 * completion processing and retire/free the transaction. 1398 */ 1399 static int gsi_channel_poll(struct napi_struct *napi, int budget) 1400 { 1401 struct gsi_channel *channel; 1402 int count = 0; 1403 1404 channel = container_of(napi, struct gsi_channel, napi); 1405 while (count < budget) { 1406 struct gsi_trans *trans; 1407 1408 trans = gsi_channel_poll_one(channel); 1409 if (!trans) 1410 break; 1411 gsi_trans_complete(trans); 1412 } 1413 1414 if (count < budget) { 1415 napi_complete(&channel->napi); 1416 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); 1417 } 1418 1419 return count; 1420 } 1421 1422 /* The event bitmap represents which event ids are available for allocation. 1423 * Set bits are not available, clear bits can be used. This function 1424 * initializes the map so all events supported by the hardware are available, 1425 * then precludes any reserved events from being allocated. 1426 */ 1427 static u32 gsi_event_bitmap_init(u32 evt_ring_max) 1428 { 1429 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); 1430 1431 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); 1432 1433 return event_bitmap; 1434 } 1435 1436 /* Setup function for event rings */ 1437 static void gsi_evt_ring_setup(struct gsi *gsi) 1438 { 1439 /* Nothing to do */ 1440 } 1441 1442 /* Inverse of gsi_evt_ring_setup() */ 1443 static void gsi_evt_ring_teardown(struct gsi *gsi) 1444 { 1445 /* Nothing to do */ 1446 } 1447 1448 /* Setup function for a single channel */ 1449 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, 1450 bool legacy) 1451 { 1452 struct gsi_channel *channel = &gsi->channel[channel_id]; 1453 u32 evt_ring_id = channel->evt_ring_id; 1454 int ret; 1455 1456 if (!channel->gsi) 1457 return 0; /* Ignore uninitialized channels */ 1458 1459 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); 1460 if (ret) 1461 return ret; 1462 1463 gsi_evt_ring_program(gsi, evt_ring_id); 1464 1465 ret = gsi_channel_alloc_command(gsi, channel_id); 1466 if (ret) 1467 goto err_evt_ring_de_alloc; 1468 1469 gsi_channel_program(channel, legacy); 1470 1471 if (channel->toward_ipa) 1472 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, 1473 gsi_channel_poll, NAPI_POLL_WEIGHT); 1474 else 1475 netif_napi_add(&gsi->dummy_dev, &channel->napi, 1476 gsi_channel_poll, NAPI_POLL_WEIGHT); 1477 1478 return 0; 1479 1480 err_evt_ring_de_alloc: 1481 /* We've done nothing with the event ring yet so don't reset */ 1482 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1483 1484 return ret; 1485 } 1486 1487 /* Inverse of gsi_channel_setup_one() */ 1488 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) 1489 { 1490 struct gsi_channel *channel = &gsi->channel[channel_id]; 1491 u32 evt_ring_id = channel->evt_ring_id; 1492 1493 if (!channel->gsi) 1494 return; /* Ignore uninitialized channels */ 1495 1496 netif_napi_del(&channel->napi); 1497 1498 gsi_channel_deprogram(channel); 1499 gsi_channel_de_alloc_command(gsi, channel_id); 1500 gsi_evt_ring_reset_command(gsi, evt_ring_id); 1501 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); 1502 } 1503 1504 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, 1505 enum gsi_generic_cmd_opcode opcode) 1506 { 1507 struct completion *completion = &gsi->completion; 1508 u32 val; 1509 1510 /* First zero the result code field */ 1511 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1512 val &= ~GENERIC_EE_RESULT_FMASK; 1513 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1514 1515 /* Now issue the command */ 1516 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1517 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1518 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); 1519 1520 if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion)) 1521 return 0; /* Success! */ 1522 1523 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", 1524 opcode, channel_id); 1525 1526 return -ETIMEDOUT; 1527 } 1528 1529 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) 1530 { 1531 return gsi_generic_command(gsi, channel_id, 1532 GSI_GENERIC_ALLOCATE_CHANNEL); 1533 } 1534 1535 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) 1536 { 1537 int ret; 1538 1539 ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL); 1540 if (ret) 1541 dev_err(gsi->dev, "error %d halting modem channel %u\n", 1542 ret, channel_id); 1543 } 1544 1545 /* Setup function for channels */ 1546 static int gsi_channel_setup(struct gsi *gsi, bool legacy) 1547 { 1548 u32 channel_id = 0; 1549 u32 mask; 1550 int ret; 1551 1552 gsi_evt_ring_setup(gsi); 1553 gsi_irq_enable(gsi); 1554 1555 mutex_lock(&gsi->mutex); 1556 1557 do { 1558 ret = gsi_channel_setup_one(gsi, channel_id, legacy); 1559 if (ret) 1560 goto err_unwind; 1561 } while (++channel_id < gsi->channel_count); 1562 1563 /* Make sure no channels were defined that hardware does not support */ 1564 while (channel_id < GSI_CHANNEL_COUNT_MAX) { 1565 struct gsi_channel *channel = &gsi->channel[channel_id++]; 1566 1567 if (!channel->gsi) 1568 continue; /* Ignore uninitialized channels */ 1569 1570 dev_err(gsi->dev, "channel %u not supported by hardware\n", 1571 channel_id - 1); 1572 channel_id = gsi->channel_count; 1573 goto err_unwind; 1574 } 1575 1576 /* Allocate modem channels if necessary */ 1577 mask = gsi->modem_channel_bitmap; 1578 while (mask) { 1579 u32 modem_channel_id = __ffs(mask); 1580 1581 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); 1582 if (ret) 1583 goto err_unwind_modem; 1584 1585 /* Clear bit from mask only after success (for unwind) */ 1586 mask ^= BIT(modem_channel_id); 1587 } 1588 1589 mutex_unlock(&gsi->mutex); 1590 1591 return 0; 1592 1593 err_unwind_modem: 1594 /* Compute which modem channels need to be deallocated */ 1595 mask ^= gsi->modem_channel_bitmap; 1596 while (mask) { 1597 u32 channel_id = __fls(mask); 1598 1599 mask ^= BIT(channel_id); 1600 1601 gsi_modem_channel_halt(gsi, channel_id); 1602 } 1603 1604 err_unwind: 1605 while (channel_id--) 1606 gsi_channel_teardown_one(gsi, channel_id); 1607 1608 mutex_unlock(&gsi->mutex); 1609 1610 gsi_irq_disable(gsi); 1611 gsi_evt_ring_teardown(gsi); 1612 1613 return ret; 1614 } 1615 1616 /* Inverse of gsi_channel_setup() */ 1617 static void gsi_channel_teardown(struct gsi *gsi) 1618 { 1619 u32 mask = gsi->modem_channel_bitmap; 1620 u32 channel_id; 1621 1622 mutex_lock(&gsi->mutex); 1623 1624 while (mask) { 1625 u32 channel_id = __fls(mask); 1626 1627 mask ^= BIT(channel_id); 1628 1629 gsi_modem_channel_halt(gsi, channel_id); 1630 } 1631 1632 channel_id = gsi->channel_count - 1; 1633 do 1634 gsi_channel_teardown_one(gsi, channel_id); 1635 while (channel_id--); 1636 1637 mutex_unlock(&gsi->mutex); 1638 1639 gsi_irq_disable(gsi); 1640 gsi_evt_ring_teardown(gsi); 1641 } 1642 1643 /* Setup function for GSI. GSI firmware must be loaded and initialized */ 1644 int gsi_setup(struct gsi *gsi, bool legacy) 1645 { 1646 u32 val; 1647 1648 /* Here is where we first touch the GSI hardware */ 1649 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); 1650 if (!(val & ENABLED_FMASK)) { 1651 dev_err(gsi->dev, "GSI has not been enabled\n"); 1652 return -EIO; 1653 } 1654 1655 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); 1656 1657 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); 1658 if (!gsi->channel_count) { 1659 dev_err(gsi->dev, "GSI reports zero channels supported\n"); 1660 return -EINVAL; 1661 } 1662 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { 1663 dev_warn(gsi->dev, 1664 "limiting to %u channels (hardware supports %u)\n", 1665 GSI_CHANNEL_COUNT_MAX, gsi->channel_count); 1666 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; 1667 } 1668 1669 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); 1670 if (!gsi->evt_ring_count) { 1671 dev_err(gsi->dev, "GSI reports zero event rings supported\n"); 1672 return -EINVAL; 1673 } 1674 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { 1675 dev_warn(gsi->dev, 1676 "limiting to %u event rings (hardware supports %u)\n", 1677 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); 1678 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; 1679 } 1680 1681 /* Initialize the error log */ 1682 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); 1683 1684 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ 1685 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); 1686 1687 return gsi_channel_setup(gsi, legacy); 1688 } 1689 1690 /* Inverse of gsi_setup() */ 1691 void gsi_teardown(struct gsi *gsi) 1692 { 1693 gsi_channel_teardown(gsi); 1694 } 1695 1696 /* Initialize a channel's event ring */ 1697 static int gsi_channel_evt_ring_init(struct gsi_channel *channel) 1698 { 1699 struct gsi *gsi = channel->gsi; 1700 struct gsi_evt_ring *evt_ring; 1701 int ret; 1702 1703 ret = gsi_evt_ring_id_alloc(gsi); 1704 if (ret < 0) 1705 return ret; 1706 channel->evt_ring_id = ret; 1707 1708 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; 1709 evt_ring->channel = channel; 1710 1711 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); 1712 if (!ret) 1713 return 0; /* Success! */ 1714 1715 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", 1716 ret, gsi_channel_id(channel)); 1717 1718 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); 1719 1720 return ret; 1721 } 1722 1723 /* Inverse of gsi_channel_evt_ring_init() */ 1724 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) 1725 { 1726 u32 evt_ring_id = channel->evt_ring_id; 1727 struct gsi *gsi = channel->gsi; 1728 struct gsi_evt_ring *evt_ring; 1729 1730 evt_ring = &gsi->evt_ring[evt_ring_id]; 1731 gsi_ring_free(gsi, &evt_ring->ring); 1732 gsi_evt_ring_id_free(gsi, evt_ring_id); 1733 } 1734 1735 /* Init function for event rings */ 1736 static void gsi_evt_ring_init(struct gsi *gsi) 1737 { 1738 u32 evt_ring_id = 0; 1739 1740 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); 1741 gsi->event_enable_bitmap = 0; 1742 do 1743 init_completion(&gsi->evt_ring[evt_ring_id].completion); 1744 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); 1745 } 1746 1747 /* Inverse of gsi_evt_ring_init() */ 1748 static void gsi_evt_ring_exit(struct gsi *gsi) 1749 { 1750 /* Nothing to do */ 1751 } 1752 1753 static bool gsi_channel_data_valid(struct gsi *gsi, 1754 const struct ipa_gsi_endpoint_data *data) 1755 { 1756 #ifdef IPA_VALIDATION 1757 u32 channel_id = data->channel_id; 1758 struct device *dev = gsi->dev; 1759 1760 /* Make sure channel ids are in the range driver supports */ 1761 if (channel_id >= GSI_CHANNEL_COUNT_MAX) { 1762 dev_err(dev, "bad channel id %u (must be less than %u)\n", 1763 channel_id, GSI_CHANNEL_COUNT_MAX); 1764 return false; 1765 } 1766 1767 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { 1768 dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id); 1769 return false; 1770 } 1771 1772 if (!data->channel.tlv_count || 1773 data->channel.tlv_count > GSI_TLV_MAX) { 1774 dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n", 1775 channel_id, data->channel.tlv_count, GSI_TLV_MAX); 1776 return false; 1777 } 1778 1779 /* We have to allow at least one maximally-sized transaction to 1780 * be outstanding (which would use tlv_count TREs). Given how 1781 * gsi_channel_tre_max() is computed, tre_count has to be almost 1782 * twice the TLV FIFO size to satisfy this requirement. 1783 */ 1784 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) { 1785 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", 1786 channel_id, data->channel.tlv_count, 1787 data->channel.tre_count); 1788 return false; 1789 } 1790 1791 if (!is_power_of_2(data->channel.tre_count)) { 1792 dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n", 1793 channel_id, data->channel.tre_count); 1794 return false; 1795 } 1796 1797 if (!is_power_of_2(data->channel.event_count)) { 1798 dev_err(dev, "channel %u bad event_count %u (not power of 2)\n", 1799 channel_id, data->channel.event_count); 1800 return false; 1801 } 1802 #endif /* IPA_VALIDATION */ 1803 1804 return true; 1805 } 1806 1807 /* Init function for a single channel */ 1808 static int gsi_channel_init_one(struct gsi *gsi, 1809 const struct ipa_gsi_endpoint_data *data, 1810 bool command, bool prefetch) 1811 { 1812 struct gsi_channel *channel; 1813 u32 tre_count; 1814 int ret; 1815 1816 if (!gsi_channel_data_valid(gsi, data)) 1817 return -EINVAL; 1818 1819 /* Worst case we need an event for every outstanding TRE */ 1820 if (data->channel.tre_count > data->channel.event_count) { 1821 tre_count = data->channel.event_count; 1822 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1823 data->channel_id, tre_count); 1824 } else { 1825 tre_count = data->channel.tre_count; 1826 } 1827 1828 channel = &gsi->channel[data->channel_id]; 1829 memset(channel, 0, sizeof(*channel)); 1830 1831 channel->gsi = gsi; 1832 channel->toward_ipa = data->toward_ipa; 1833 channel->command = command; 1834 channel->use_prefetch = command && prefetch; 1835 channel->tlv_count = data->channel.tlv_count; 1836 channel->tre_count = tre_count; 1837 channel->event_count = data->channel.event_count; 1838 init_completion(&channel->completion); 1839 1840 ret = gsi_channel_evt_ring_init(channel); 1841 if (ret) 1842 goto err_clear_gsi; 1843 1844 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); 1845 if (ret) { 1846 dev_err(gsi->dev, "error %d allocating channel %u ring\n", 1847 ret, data->channel_id); 1848 goto err_channel_evt_ring_exit; 1849 } 1850 1851 ret = gsi_channel_trans_init(gsi, data->channel_id); 1852 if (ret) 1853 goto err_ring_free; 1854 1855 if (command) { 1856 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); 1857 1858 ret = ipa_cmd_pool_init(channel, tre_max); 1859 } 1860 if (!ret) 1861 return 0; /* Success! */ 1862 1863 gsi_channel_trans_exit(channel); 1864 err_ring_free: 1865 gsi_ring_free(gsi, &channel->tre_ring); 1866 err_channel_evt_ring_exit: 1867 gsi_channel_evt_ring_exit(channel); 1868 err_clear_gsi: 1869 channel->gsi = NULL; /* Mark it not (fully) initialized */ 1870 1871 return ret; 1872 } 1873 1874 /* Inverse of gsi_channel_init_one() */ 1875 static void gsi_channel_exit_one(struct gsi_channel *channel) 1876 { 1877 if (!channel->gsi) 1878 return; /* Ignore uninitialized channels */ 1879 1880 if (channel->command) 1881 ipa_cmd_pool_exit(channel); 1882 gsi_channel_trans_exit(channel); 1883 gsi_ring_free(channel->gsi, &channel->tre_ring); 1884 gsi_channel_evt_ring_exit(channel); 1885 } 1886 1887 /* Init function for channels */ 1888 static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, 1889 const struct ipa_gsi_endpoint_data *data, 1890 bool modem_alloc) 1891 { 1892 int ret = 0; 1893 u32 i; 1894 1895 gsi_evt_ring_init(gsi); 1896 1897 /* The endpoint data array is indexed by endpoint name */ 1898 for (i = 0; i < count; i++) { 1899 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; 1900 1901 if (ipa_gsi_endpoint_data_empty(&data[i])) 1902 continue; /* Skip over empty slots */ 1903 1904 /* Mark modem channels to be allocated (hardware workaround) */ 1905 if (data[i].ee_id == GSI_EE_MODEM) { 1906 if (modem_alloc) 1907 gsi->modem_channel_bitmap |= 1908 BIT(data[i].channel_id); 1909 continue; 1910 } 1911 1912 ret = gsi_channel_init_one(gsi, &data[i], command, prefetch); 1913 if (ret) 1914 goto err_unwind; 1915 } 1916 1917 return ret; 1918 1919 err_unwind: 1920 while (i--) { 1921 if (ipa_gsi_endpoint_data_empty(&data[i])) 1922 continue; 1923 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { 1924 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); 1925 continue; 1926 } 1927 gsi_channel_exit_one(&gsi->channel[data->channel_id]); 1928 } 1929 gsi_evt_ring_exit(gsi); 1930 1931 return ret; 1932 } 1933 1934 /* Inverse of gsi_channel_init() */ 1935 static void gsi_channel_exit(struct gsi *gsi) 1936 { 1937 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; 1938 1939 do 1940 gsi_channel_exit_one(&gsi->channel[channel_id]); 1941 while (channel_id--); 1942 gsi->modem_channel_bitmap = 0; 1943 1944 gsi_evt_ring_exit(gsi); 1945 } 1946 1947 /* Init function for GSI. GSI hardware does not need to be "ready" */ 1948 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, 1949 u32 count, const struct ipa_gsi_endpoint_data *data, 1950 bool modem_alloc) 1951 { 1952 struct resource *res; 1953 resource_size_t size; 1954 unsigned int irq; 1955 int ret; 1956 1957 gsi_validate_build(); 1958 1959 gsi->dev = &pdev->dev; 1960 1961 /* The GSI layer performs NAPI on all endpoints. NAPI requires a 1962 * network device structure, but the GSI layer does not have one, 1963 * so we must create a dummy network device for this purpose. 1964 */ 1965 init_dummy_netdev(&gsi->dummy_dev); 1966 1967 /* Get the GSI IRQ and request for it to wake the system */ 1968 ret = platform_get_irq_byname(pdev, "gsi"); 1969 if (ret <= 0) { 1970 dev_err(gsi->dev, 1971 "DT error %d getting \"gsi\" IRQ property\n", ret); 1972 return ret ? : -EINVAL; 1973 } 1974 irq = ret; 1975 1976 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); 1977 if (ret) { 1978 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); 1979 return ret; 1980 } 1981 gsi->irq = irq; 1982 1983 ret = enable_irq_wake(gsi->irq); 1984 if (ret) 1985 dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret); 1986 gsi->irq_wake_enabled = !ret; 1987 1988 /* Get GSI memory range and map it */ 1989 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); 1990 if (!res) { 1991 dev_err(gsi->dev, 1992 "DT error getting \"gsi\" memory property\n"); 1993 ret = -ENODEV; 1994 goto err_disable_irq_wake; 1995 } 1996 1997 size = resource_size(res); 1998 if (res->start > U32_MAX || size > U32_MAX - res->start) { 1999 dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n"); 2000 ret = -EINVAL; 2001 goto err_disable_irq_wake; 2002 } 2003 2004 gsi->virt = ioremap(res->start, size); 2005 if (!gsi->virt) { 2006 dev_err(gsi->dev, "unable to remap \"gsi\" memory\n"); 2007 ret = -ENOMEM; 2008 goto err_disable_irq_wake; 2009 } 2010 2011 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); 2012 if (ret) 2013 goto err_iounmap; 2014 2015 mutex_init(&gsi->mutex); 2016 init_completion(&gsi->completion); 2017 2018 return 0; 2019 2020 err_iounmap: 2021 iounmap(gsi->virt); 2022 err_disable_irq_wake: 2023 if (gsi->irq_wake_enabled) 2024 (void)disable_irq_wake(gsi->irq); 2025 free_irq(gsi->irq, gsi); 2026 2027 return ret; 2028 } 2029 2030 /* Inverse of gsi_init() */ 2031 void gsi_exit(struct gsi *gsi) 2032 { 2033 mutex_destroy(&gsi->mutex); 2034 gsi_channel_exit(gsi); 2035 if (gsi->irq_wake_enabled) 2036 (void)disable_irq_wake(gsi->irq); 2037 free_irq(gsi->irq, gsi); 2038 iounmap(gsi->virt); 2039 } 2040 2041 /* The maximum number of outstanding TREs on a channel. This limits 2042 * a channel's maximum number of transactions outstanding (worst case 2043 * is one TRE per transaction). 2044 * 2045 * The absolute limit is the number of TREs in the channel's TRE ring, 2046 * and in theory we should be able use all of them. But in practice, 2047 * doing that led to the hardware reporting exhaustion of event ring 2048 * slots for writing completion information. So the hardware limit 2049 * would be (tre_count - 1). 2050 * 2051 * We reduce it a bit further though. Transaction resource pools are 2052 * sized to be a little larger than this maximum, to allow resource 2053 * allocations to always be contiguous. The number of entries in a 2054 * TRE ring buffer is a power of 2, and the extra resources in a pool 2055 * tends to nearly double the memory allocated for it. Reducing the 2056 * maximum number of outstanding TREs allows the number of entries in 2057 * a pool to avoid crossing that power-of-2 boundary, and this can 2058 * substantially reduce pool memory requirements. The number we 2059 * reduce it by matches the number added in gsi_trans_pool_init(). 2060 */ 2061 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) 2062 { 2063 struct gsi_channel *channel = &gsi->channel[channel_id]; 2064 2065 /* Hardware limit is channel->tre_count - 1 */ 2066 return channel->tre_count - (channel->tlv_count - 1); 2067 } 2068 2069 /* Returns the maximum number of TREs in a single transaction for a channel */ 2070 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id) 2071 { 2072 struct gsi_channel *channel = &gsi->channel[channel_id]; 2073 2074 return channel->tlv_count; 2075 } 2076