1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 73 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 74 75 #ifdef IPA_VALIDATE 76 77 static void ipa_endpoint_validate_build(void) 78 { 79 /* The aggregation byte limit defines the point at which an 80 * aggregation window will close. It is programmed into the 81 * IPA hardware as a number of KB. We don't use "hard byte 82 * limit" aggregation, which means that we need to supply 83 * enough space in a receive buffer to hold a complete MTU 84 * plus normal skb overhead *after* that aggregation byte 85 * limit has been crossed. 86 * 87 * This check just ensures we don't define a receive buffer 88 * size that would exceed what we can represent in the field 89 * that is used to program its size. 90 */ 91 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > 92 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + 93 IPA_MTU + IPA_RX_BUFFER_OVERHEAD); 94 95 /* I honestly don't know where this requirement comes from. But 96 * it holds, and if we someday need to loosen the constraint we 97 * can try to track it down. 98 */ 99 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 100 } 101 102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 103 const struct ipa_gsi_endpoint_data *all_data, 104 const struct ipa_gsi_endpoint_data *data) 105 { 106 const struct ipa_gsi_endpoint_data *other_data; 107 struct device *dev = &ipa->pdev->dev; 108 enum ipa_endpoint_name other_name; 109 110 if (ipa_gsi_endpoint_data_empty(data)) 111 return true; 112 113 if (!data->toward_ipa) { 114 if (data->endpoint.filter_support) { 115 dev_err(dev, "filtering not supported for " 116 "RX endpoint %u\n", 117 data->endpoint_id); 118 return false; 119 } 120 121 return true; /* Nothing more to check for RX */ 122 } 123 124 if (data->endpoint.config.status_enable) { 125 other_name = data->endpoint.config.tx.status_endpoint; 126 if (other_name >= count) { 127 dev_err(dev, "status endpoint name %u out of range " 128 "for endpoint %u\n", 129 other_name, data->endpoint_id); 130 return false; 131 } 132 133 /* Status endpoint must be defined... */ 134 other_data = &all_data[other_name]; 135 if (ipa_gsi_endpoint_data_empty(other_data)) { 136 dev_err(dev, "DMA endpoint name %u undefined " 137 "for endpoint %u\n", 138 other_name, data->endpoint_id); 139 return false; 140 } 141 142 /* ...and has to be an RX endpoint... */ 143 if (other_data->toward_ipa) { 144 dev_err(dev, 145 "status endpoint for endpoint %u not RX\n", 146 data->endpoint_id); 147 return false; 148 } 149 150 /* ...and if it's to be an AP endpoint... */ 151 if (other_data->ee_id == GSI_EE_AP) { 152 /* ...make sure it has status enabled. */ 153 if (!other_data->endpoint.config.status_enable) { 154 dev_err(dev, 155 "status not enabled for endpoint %u\n", 156 other_data->endpoint_id); 157 return false; 158 } 159 } 160 } 161 162 if (data->endpoint.config.dma_mode) { 163 other_name = data->endpoint.config.dma_endpoint; 164 if (other_name >= count) { 165 dev_err(dev, "DMA endpoint name %u out of range " 166 "for endpoint %u\n", 167 other_name, data->endpoint_id); 168 return false; 169 } 170 171 other_data = &all_data[other_name]; 172 if (ipa_gsi_endpoint_data_empty(other_data)) { 173 dev_err(dev, "DMA endpoint name %u undefined " 174 "for endpoint %u\n", 175 other_name, data->endpoint_id); 176 return false; 177 } 178 } 179 180 return true; 181 } 182 183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 184 const struct ipa_gsi_endpoint_data *data) 185 { 186 const struct ipa_gsi_endpoint_data *dp = data; 187 struct device *dev = &ipa->pdev->dev; 188 enum ipa_endpoint_name name; 189 190 ipa_endpoint_validate_build(); 191 192 if (count > IPA_ENDPOINT_COUNT) { 193 dev_err(dev, "too many endpoints specified (%u > %u)\n", 194 count, IPA_ENDPOINT_COUNT); 195 return false; 196 } 197 198 /* Make sure needed endpoints have defined data */ 199 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 200 dev_err(dev, "command TX endpoint not defined\n"); 201 return false; 202 } 203 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 204 dev_err(dev, "LAN RX endpoint not defined\n"); 205 return false; 206 } 207 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 208 dev_err(dev, "AP->modem TX endpoint not defined\n"); 209 return false; 210 } 211 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 212 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 213 return false; 214 } 215 216 for (name = 0; name < count; name++, dp++) 217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 218 return false; 219 220 return true; 221 } 222 223 #else /* !IPA_VALIDATE */ 224 225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 226 const struct ipa_gsi_endpoint_data *data) 227 { 228 return true; 229 } 230 231 #endif /* !IPA_VALIDATE */ 232 233 /* Allocate a transaction to use on a non-command endpoint */ 234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 235 u32 tre_count) 236 { 237 struct gsi *gsi = &endpoint->ipa->gsi; 238 u32 channel_id = endpoint->channel_id; 239 enum dma_data_direction direction; 240 241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 242 243 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 244 } 245 246 /* suspend_delay represents suspend for RX, delay for TX endpoints. 247 * Note that suspend is not supported starting with IPA v4.0. 248 */ 249 static bool 250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 251 { 252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 253 struct ipa *ipa = endpoint->ipa; 254 bool state; 255 u32 mask; 256 u32 val; 257 258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 259 * correctly on IPA v4.2. 260 * 261 * if (endpoint->toward_ipa) 262 * assert(ipa->version != IPA_VERSION_4.2); 263 * else 264 * assert(ipa->version == IPA_VERSION_3_5_1); 265 */ 266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 267 268 val = ioread32(ipa->reg_virt + offset); 269 /* Don't bother if it's already in the requested state */ 270 state = !!(val & mask); 271 if (suspend_delay != state) { 272 val ^= mask; 273 iowrite32(val, ipa->reg_virt + offset); 274 } 275 276 return state; 277 } 278 279 /* We currently don't care what the previous state was for delay mode */ 280 static void 281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 282 { 283 /* assert(endpoint->toward_ipa); */ 284 285 /* Delay mode doesn't work properly for IPA v4.2 */ 286 if (endpoint->ipa->version != IPA_VERSION_4_2) 287 (void)ipa_endpoint_init_ctrl(endpoint, enable); 288 } 289 290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 291 { 292 u32 mask = BIT(endpoint->endpoint_id); 293 struct ipa *ipa = endpoint->ipa; 294 u32 offset; 295 u32 val; 296 297 /* assert(mask & ipa->available); */ 298 offset = ipa_reg_state_aggr_active_offset(ipa->version); 299 val = ioread32(ipa->reg_virt + offset); 300 301 return !!(val & mask); 302 } 303 304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 305 { 306 u32 mask = BIT(endpoint->endpoint_id); 307 struct ipa *ipa = endpoint->ipa; 308 309 /* assert(mask & ipa->available); */ 310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 311 } 312 313 /** 314 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 315 * @endpoint: Endpoint on which to emulate a suspend 316 * 317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 318 * with an open aggregation frame. This is to work around a hardware 319 * issue in IPA version 3.5.1 where the suspend interrupt will not be 320 * generated when it should be. 321 */ 322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 323 { 324 struct ipa *ipa = endpoint->ipa; 325 326 if (!endpoint->data->aggregation) 327 return; 328 329 /* Nothing to do if the endpoint doesn't have aggregation open */ 330 if (!ipa_endpoint_aggr_active(endpoint)) 331 return; 332 333 /* Force close aggregation */ 334 ipa_endpoint_force_close(endpoint); 335 336 ipa_interrupt_simulate_suspend(ipa->interrupt); 337 } 338 339 /* Returns previous suspend state (true means suspend was enabled) */ 340 static bool 341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 342 { 343 bool suspended; 344 345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) 346 return enable; /* For IPA v4.0+, no change made */ 347 348 /* assert(!endpoint->toward_ipa); */ 349 350 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 351 352 /* A client suspended with an open aggregation frame will not 353 * generate a SUSPEND IPA interrupt. If enabling suspend, have 354 * ipa_endpoint_suspend_aggr() handle this. 355 */ 356 if (enable && !suspended) 357 ipa_endpoint_suspend_aggr(endpoint); 358 359 return suspended; 360 } 361 362 /* Enable or disable delay or suspend mode on all modem endpoints */ 363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 364 { 365 u32 endpoint_id; 366 367 /* DELAY mode doesn't work correctly on IPA v4.2 */ 368 if (ipa->version == IPA_VERSION_4_2) 369 return; 370 371 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 373 374 if (endpoint->ee_id != GSI_EE_MODEM) 375 continue; 376 377 /* Set TX delay mode or RX suspend mode */ 378 if (endpoint->toward_ipa) 379 ipa_endpoint_program_delay(endpoint, enable); 380 else 381 (void)ipa_endpoint_program_suspend(endpoint, enable); 382 } 383 } 384 385 /* Reset all modem endpoints to use the default exception endpoint */ 386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 387 { 388 u32 initialized = ipa->initialized; 389 struct gsi_trans *trans; 390 u32 count; 391 392 /* We need one command per modem TX endpoint. We can get an upper 393 * bound on that by assuming all initialized endpoints are modem->IPA. 394 * That won't happen, and we could be more precise, but this is fine 395 * for now. We need to end the transaction with a "tag process." 396 */ 397 count = hweight32(initialized) + ipa_cmd_tag_process_count(); 398 trans = ipa_cmd_trans_alloc(ipa, count); 399 if (!trans) { 400 dev_err(&ipa->pdev->dev, 401 "no transaction to reset modem exception endpoints\n"); 402 return -EBUSY; 403 } 404 405 while (initialized) { 406 u32 endpoint_id = __ffs(initialized); 407 struct ipa_endpoint *endpoint; 408 u32 offset; 409 410 initialized ^= BIT(endpoint_id); 411 412 /* We only reset modem TX endpoints */ 413 endpoint = &ipa->endpoint[endpoint_id]; 414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 415 continue; 416 417 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 418 419 /* Value written is 0, and all bits are updated. That 420 * means status is disabled on the endpoint, and as a 421 * result all other fields in the register are ignored. 422 */ 423 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 424 } 425 426 ipa_cmd_tag_process_add(trans); 427 428 /* XXX This should have a 1 second timeout */ 429 gsi_trans_commit_wait(trans); 430 431 return 0; 432 } 433 434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 435 { 436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 437 u32 val = 0; 438 439 /* FRAG_OFFLOAD_EN is 0 */ 440 if (endpoint->data->checksum) { 441 if (endpoint->toward_ipa) { 442 u32 checksum_offset; 443 444 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 445 CS_OFFLOAD_EN_FMASK); 446 /* Checksum header offset is in 4-byte units */ 447 checksum_offset = sizeof(struct rmnet_map_header); 448 checksum_offset /= sizeof(u32); 449 val |= u32_encode_bits(checksum_offset, 450 CS_METADATA_HDR_OFFSET_FMASK); 451 } else { 452 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 453 CS_OFFLOAD_EN_FMASK); 454 } 455 } else { 456 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 457 CS_OFFLOAD_EN_FMASK); 458 } 459 /* CS_GEN_QMB_MASTER_SEL is 0 */ 460 461 iowrite32(val, endpoint->ipa->reg_virt + offset); 462 } 463 464 /** 465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 466 * @endpoint: Endpoint pointer 467 * 468 * We program QMAP endpoints so each packet received is preceded by a QMAP 469 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 470 * packet size field, and we have the IPA hardware populate both for each 471 * received packet. The header is configured (in the HDR_EXT register) 472 * to use big endian format. 473 * 474 * The packet size is written into the QMAP header's pkt_len field. That 475 * location is defined here using the HDR_OFST_PKT_SIZE field. 476 * 477 * The mux_id comes from a 4-byte metadata value supplied with each packet 478 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 479 * value that we want, in its low-order byte. A bitmask defined in the 480 * endpoint's METADATA_MASK register defines which byte within the modem 481 * metadata contains the mux_id. And the OFST_METADATA field programmed 482 * here indicates where the extracted byte should be placed within the QMAP 483 * header. 484 */ 485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 486 { 487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 488 u32 val = 0; 489 490 if (endpoint->data->qmap) { 491 size_t header_size = sizeof(struct rmnet_map_header); 492 493 /* We might supply a checksum header after the QMAP header */ 494 if (endpoint->toward_ipa && endpoint->data->checksum) 495 header_size += sizeof(struct rmnet_map_ul_csum_header); 496 val |= u32_encode_bits(header_size, HDR_LEN_FMASK); 497 498 /* Define how to fill fields in a received QMAP header */ 499 if (!endpoint->toward_ipa) { 500 u32 off; /* Field offset within header */ 501 502 /* Where IPA will write the metadata value */ 503 off = offsetof(struct rmnet_map_header, mux_id); 504 val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK); 505 506 /* Where IPA will write the length */ 507 off = offsetof(struct rmnet_map_header, pkt_len); 508 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 509 val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); 510 } 511 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 512 val |= HDR_OFST_METADATA_VALID_FMASK; 513 514 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 515 /* HDR_A5_MUX is 0 */ 516 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 517 /* HDR_METADATA_REG_VALID is 0 (TX only) */ 518 } 519 520 iowrite32(val, endpoint->ipa->reg_virt + offset); 521 } 522 523 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 524 { 525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 526 u32 pad_align = endpoint->data->rx.pad_align; 527 u32 val = 0; 528 529 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 530 531 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 532 * driver assumes this field is meaningful in packets it receives, 533 * and assumes the header's payload length includes that padding. 534 * The RMNet driver does *not* pad packets it sends, however, so 535 * the pad field (although 0) should be ignored. 536 */ 537 if (endpoint->data->qmap && !endpoint->toward_ipa) { 538 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 539 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 540 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 541 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 542 } 543 544 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 545 if (!endpoint->toward_ipa) 546 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 547 548 iowrite32(val, endpoint->ipa->reg_virt + offset); 549 } 550 551 552 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 553 { 554 u32 endpoint_id = endpoint->endpoint_id; 555 u32 val = 0; 556 u32 offset; 557 558 if (endpoint->toward_ipa) 559 return; /* Register not valid for TX endpoints */ 560 561 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 562 563 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 564 if (endpoint->data->qmap) 565 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 566 567 iowrite32(val, endpoint->ipa->reg_virt + offset); 568 } 569 570 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 571 { 572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 573 u32 val; 574 575 if (!endpoint->toward_ipa) 576 return; /* Register not valid for RX endpoints */ 577 578 if (endpoint->data->dma_mode) { 579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 580 u32 dma_endpoint_id; 581 582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 583 584 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 585 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 586 } else { 587 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 588 } 589 /* All other bits unspecified (and 0) */ 590 591 iowrite32(val, endpoint->ipa->reg_virt + offset); 592 } 593 594 /* Compute the aggregation size value to use for a given buffer size */ 595 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 596 { 597 /* We don't use "hard byte limit" aggregation, so we define the 598 * aggregation limit such that our buffer has enough space *after* 599 * that limit to receive a full MTU of data, plus overhead. 600 */ 601 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 602 603 return rx_buffer_size / SZ_1K; 604 } 605 606 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 607 { 608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 609 u32 val = 0; 610 611 if (endpoint->data->aggregation) { 612 if (!endpoint->toward_ipa) { 613 u32 limit; 614 615 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 616 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 617 618 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 619 val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); 620 621 limit = IPA_AGGR_TIME_LIMIT_DEFAULT; 622 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 623 val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); 624 625 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 626 627 if (endpoint->data->rx.aggr_close_eof) 628 val |= AGGR_SW_EOF_ACTIVE_FMASK; 629 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 630 } else { 631 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 632 AGGR_EN_FMASK); 633 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 634 /* other fields ignored */ 635 } 636 /* AGGR_FORCE_CLOSE is 0 */ 637 } else { 638 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 639 /* other fields ignored */ 640 } 641 642 iowrite32(val, endpoint->ipa->reg_virt + offset); 643 } 644 645 /* The head-of-line blocking timer is defined as a tick count, where each 646 * tick represents 128 cycles of the IPA core clock. Return the value 647 * that should be written to that register that represents the timeout 648 * period provided. 649 */ 650 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) 651 { 652 u32 width; 653 u32 scale; 654 u64 ticks; 655 u64 rate; 656 u32 high; 657 u32 val; 658 659 if (!microseconds) 660 return 0; /* Nothing to compute if timer period is 0 */ 661 662 /* Use 64 bit arithmetic to avoid overflow... */ 663 rate = ipa_clock_rate(ipa); 664 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 665 /* ...but we still need to fit into a 32-bit register */ 666 WARN_ON(ticks > U32_MAX); 667 668 /* IPA v3.5.1 through v4.1 just record the tick count */ 669 if (ipa->version < IPA_VERSION_4_2) 670 return (u32)ticks; 671 672 /* For IPA v4.2, the tick count is represented by base and 673 * scale fields within the 32-bit timer register, where: 674 * ticks = base << scale; 675 * The best precision is achieved when the base value is as 676 * large as possible. Find the highest set bit in the tick 677 * count, and extract the number of bits in the base field 678 * such that that high bit is included. 679 */ 680 high = fls(ticks); /* 1..32 */ 681 width = HWEIGHT32(BASE_VALUE_FMASK); 682 scale = high > width ? high - width : 0; 683 if (scale) { 684 /* If we're scaling, round up to get a closer result */ 685 ticks += 1 << (scale - 1); 686 /* High bit was set, so rounding might have affected it */ 687 if (fls(ticks) != high) 688 scale++; 689 } 690 691 val = u32_encode_bits(scale, SCALE_FMASK); 692 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 693 694 return val; 695 } 696 697 /* If microseconds is 0, timeout is immediate */ 698 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 699 u32 microseconds) 700 { 701 u32 endpoint_id = endpoint->endpoint_id; 702 struct ipa *ipa = endpoint->ipa; 703 u32 offset; 704 u32 val; 705 706 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 707 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); 708 iowrite32(val, ipa->reg_virt + offset); 709 } 710 711 static void 712 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 713 { 714 u32 endpoint_id = endpoint->endpoint_id; 715 u32 offset; 716 u32 val; 717 718 val = enable ? HOL_BLOCK_EN_FMASK : 0; 719 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 720 iowrite32(val, endpoint->ipa->reg_virt + offset); 721 } 722 723 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 724 { 725 u32 i; 726 727 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 728 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 729 730 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 731 continue; 732 733 ipa_endpoint_init_hol_block_timer(endpoint, 0); 734 ipa_endpoint_init_hol_block_enable(endpoint, true); 735 } 736 } 737 738 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 739 { 740 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 741 u32 val = 0; 742 743 if (!endpoint->toward_ipa) 744 return; /* Register not valid for RX endpoints */ 745 746 /* DEAGGR_HDR_LEN is 0 */ 747 /* PACKET_OFFSET_VALID is 0 */ 748 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 749 /* MAX_PACKET_LEN is 0 (not enforced) */ 750 751 iowrite32(val, endpoint->ipa->reg_virt + offset); 752 } 753 754 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 755 { 756 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 757 struct ipa *ipa = endpoint->ipa; 758 u32 val; 759 760 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 761 iowrite32(val, ipa->reg_virt + offset); 762 } 763 764 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 765 { 766 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 767 u32 seq_type = endpoint->seq_type; 768 u32 val = 0; 769 770 if (!endpoint->toward_ipa) 771 return; /* Register not valid for RX endpoints */ 772 773 /* Sequencer type is made up of four nibbles */ 774 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 775 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 776 /* The second two apply to replicated packets */ 777 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); 778 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); 779 780 iowrite32(val, endpoint->ipa->reg_virt + offset); 781 } 782 783 /** 784 * ipa_endpoint_skb_tx() - Transmit a socket buffer 785 * @endpoint: Endpoint pointer 786 * @skb: Socket buffer to send 787 * 788 * Returns: 0 if successful, or a negative error code 789 */ 790 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 791 { 792 struct gsi_trans *trans; 793 u32 nr_frags; 794 int ret; 795 796 /* Make sure source endpoint's TLV FIFO has enough entries to 797 * hold the linear portion of the skb and all its fragments. 798 * If not, see if we can linearize it before giving up. 799 */ 800 nr_frags = skb_shinfo(skb)->nr_frags; 801 if (1 + nr_frags > endpoint->trans_tre_max) { 802 if (skb_linearize(skb)) 803 return -E2BIG; 804 nr_frags = 0; 805 } 806 807 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 808 if (!trans) 809 return -EBUSY; 810 811 ret = gsi_trans_skb_add(trans, skb); 812 if (ret) 813 goto err_trans_free; 814 trans->data = skb; /* transaction owns skb now */ 815 816 gsi_trans_commit(trans, !netdev_xmit_more()); 817 818 return 0; 819 820 err_trans_free: 821 gsi_trans_free(trans); 822 823 return -ENOMEM; 824 } 825 826 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 827 { 828 u32 endpoint_id = endpoint->endpoint_id; 829 struct ipa *ipa = endpoint->ipa; 830 u32 val = 0; 831 u32 offset; 832 833 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 834 835 if (endpoint->data->status_enable) { 836 val |= STATUS_EN_FMASK; 837 if (endpoint->toward_ipa) { 838 enum ipa_endpoint_name name; 839 u32 status_endpoint_id; 840 841 name = endpoint->data->tx.status_endpoint; 842 status_endpoint_id = ipa->name_map[name]->endpoint_id; 843 844 val |= u32_encode_bits(status_endpoint_id, 845 STATUS_ENDP_FMASK); 846 } 847 /* STATUS_LOCATION is 0 (status element precedes packet) */ 848 /* The next field is present for IPA v4.0 and above */ 849 /* STATUS_PKT_SUPPRESS_FMASK is 0 */ 850 } 851 852 iowrite32(val, ipa->reg_virt + offset); 853 } 854 855 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 856 { 857 struct gsi_trans *trans; 858 bool doorbell = false; 859 struct page *page; 860 u32 offset; 861 u32 len; 862 int ret; 863 864 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 865 if (!page) 866 return -ENOMEM; 867 868 trans = ipa_endpoint_trans_alloc(endpoint, 1); 869 if (!trans) 870 goto err_free_pages; 871 872 /* Offset the buffer to make space for skb headroom */ 873 offset = NET_SKB_PAD; 874 len = IPA_RX_BUFFER_SIZE - offset; 875 876 ret = gsi_trans_page_add(trans, page, len, offset); 877 if (ret) 878 goto err_trans_free; 879 trans->data = page; /* transaction owns page now */ 880 881 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 882 doorbell = true; 883 endpoint->replenish_ready = 0; 884 } 885 886 gsi_trans_commit(trans, doorbell); 887 888 return 0; 889 890 err_trans_free: 891 gsi_trans_free(trans); 892 err_free_pages: 893 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 894 895 return -ENOMEM; 896 } 897 898 /** 899 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 900 * @endpoint: Endpoint to be replenished 901 * @count: Number of buffers to send to hardware 902 * 903 * Allocate RX packet wrapper structures with maximal socket buffers 904 * for an endpoint. These are supplied to the hardware, which fills 905 * them with incoming data. 906 */ 907 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 908 { 909 struct gsi *gsi; 910 u32 backlog; 911 912 if (!endpoint->replenish_enabled) { 913 if (count) 914 atomic_add(count, &endpoint->replenish_saved); 915 return; 916 } 917 918 919 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 920 if (ipa_endpoint_replenish_one(endpoint)) 921 goto try_again_later; 922 if (count) 923 atomic_add(count, &endpoint->replenish_backlog); 924 925 return; 926 927 try_again_later: 928 /* The last one didn't succeed, so fix the backlog */ 929 backlog = atomic_inc_return(&endpoint->replenish_backlog); 930 931 if (count) 932 atomic_add(count, &endpoint->replenish_backlog); 933 934 /* Whenever a receive buffer transaction completes we'll try to 935 * replenish again. It's unlikely, but if we fail to supply even 936 * one buffer, nothing will trigger another replenish attempt. 937 * Receive buffer transactions use one TRE, so schedule work to 938 * try replenishing again if our backlog is *all* available TREs. 939 */ 940 gsi = &endpoint->ipa->gsi; 941 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 942 schedule_delayed_work(&endpoint->replenish_work, 943 msecs_to_jiffies(1)); 944 } 945 946 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 947 { 948 struct gsi *gsi = &endpoint->ipa->gsi; 949 u32 max_backlog; 950 u32 saved; 951 952 endpoint->replenish_enabled = true; 953 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 954 atomic_add(saved, &endpoint->replenish_backlog); 955 956 /* Start replenishing if hardware currently has no buffers */ 957 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 958 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 959 ipa_endpoint_replenish(endpoint, 0); 960 } 961 962 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 963 { 964 u32 backlog; 965 966 endpoint->replenish_enabled = false; 967 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 968 atomic_add(backlog, &endpoint->replenish_saved); 969 } 970 971 static void ipa_endpoint_replenish_work(struct work_struct *work) 972 { 973 struct delayed_work *dwork = to_delayed_work(work); 974 struct ipa_endpoint *endpoint; 975 976 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 977 978 ipa_endpoint_replenish(endpoint, 0); 979 } 980 981 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 982 void *data, u32 len, u32 extra) 983 { 984 struct sk_buff *skb; 985 986 skb = __dev_alloc_skb(len, GFP_ATOMIC); 987 if (skb) { 988 skb_put(skb, len); 989 memcpy(skb->data, data, len); 990 skb->truesize += extra; 991 } 992 993 /* Now receive it, or drop it if there's no netdev */ 994 if (endpoint->netdev) 995 ipa_modem_skb_rx(endpoint->netdev, skb); 996 else if (skb) 997 dev_kfree_skb_any(skb); 998 } 999 1000 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1001 struct page *page, u32 len) 1002 { 1003 struct sk_buff *skb; 1004 1005 /* Nothing to do if there's no netdev */ 1006 if (!endpoint->netdev) 1007 return false; 1008 1009 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1010 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1011 if (skb) { 1012 /* Reserve the headroom and account for the data */ 1013 skb_reserve(skb, NET_SKB_PAD); 1014 skb_put(skb, len); 1015 } 1016 1017 /* Receive the buffer (or record drop if unable to build it) */ 1018 ipa_modem_skb_rx(endpoint->netdev, skb); 1019 1020 return skb != NULL; 1021 } 1022 1023 /* The format of a packet status element is the same for several status 1024 * types (opcodes). Other types aren't currently supported. 1025 */ 1026 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1027 { 1028 switch (opcode) { 1029 case IPA_STATUS_OPCODE_PACKET: 1030 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1031 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1032 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1033 return true; 1034 default: 1035 return false; 1036 } 1037 } 1038 1039 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1040 const struct ipa_status *status) 1041 { 1042 u32 endpoint_id; 1043 1044 if (!ipa_status_format_packet(status->opcode)) 1045 return true; 1046 if (!status->pkt_len) 1047 return true; 1048 endpoint_id = u32_get_bits(status->endp_dst_idx, 1049 IPA_STATUS_DST_IDX_FMASK); 1050 if (endpoint_id != endpoint->endpoint_id) 1051 return true; 1052 1053 return false; /* Don't skip this packet, process it */ 1054 } 1055 1056 /* Return whether the status indicates the packet should be dropped */ 1057 static bool ipa_status_drop_packet(const struct ipa_status *status) 1058 { 1059 u32 val; 1060 1061 /* Deaggregation exceptions we drop; all other types we consume */ 1062 if (status->exception) 1063 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1064 1065 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1066 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1067 1068 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1069 } 1070 1071 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1072 struct page *page, u32 total_len) 1073 { 1074 void *data = page_address(page) + NET_SKB_PAD; 1075 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1076 u32 resid = total_len; 1077 1078 while (resid) { 1079 const struct ipa_status *status = data; 1080 u32 align; 1081 u32 len; 1082 1083 if (resid < sizeof(*status)) { 1084 dev_err(&endpoint->ipa->pdev->dev, 1085 "short message (%u bytes < %zu byte status)\n", 1086 resid, sizeof(*status)); 1087 break; 1088 } 1089 1090 /* Skip over status packets that lack packet data */ 1091 if (ipa_endpoint_status_skip(endpoint, status)) { 1092 data += sizeof(*status); 1093 resid -= sizeof(*status); 1094 continue; 1095 } 1096 1097 /* Compute the amount of buffer space consumed by the 1098 * packet, including the status element. If the hardware 1099 * is configured to pad packet data to an aligned boundary, 1100 * account for that. And if checksum offload is is enabled 1101 * a trailer containing computed checksum information will 1102 * be appended. 1103 */ 1104 align = endpoint->data->rx.pad_align ? : 1; 1105 len = le16_to_cpu(status->pkt_len); 1106 len = sizeof(*status) + ALIGN(len, align); 1107 if (endpoint->data->checksum) 1108 len += sizeof(struct rmnet_map_dl_csum_trailer); 1109 1110 /* Charge the new packet with a proportional fraction of 1111 * the unused space in the original receive buffer. 1112 * XXX Charge a proportion of the *whole* receive buffer? 1113 */ 1114 if (!ipa_status_drop_packet(status)) { 1115 u32 extra = unused * len / total_len; 1116 void *data2 = data + sizeof(*status); 1117 u32 len2 = le16_to_cpu(status->pkt_len); 1118 1119 /* Client receives only packet data (no status) */ 1120 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1121 } 1122 1123 /* Consume status and the full packet it describes */ 1124 data += len; 1125 resid -= len; 1126 } 1127 } 1128 1129 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1130 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1131 struct gsi_trans *trans) 1132 { 1133 } 1134 1135 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1136 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1137 struct gsi_trans *trans) 1138 { 1139 struct page *page; 1140 1141 ipa_endpoint_replenish(endpoint, 1); 1142 1143 if (trans->cancelled) 1144 return; 1145 1146 /* Parse or build a socket buffer using the actual received length */ 1147 page = trans->data; 1148 if (endpoint->data->status_enable) 1149 ipa_endpoint_status_parse(endpoint, page, trans->len); 1150 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1151 trans->data = NULL; /* Pages have been consumed */ 1152 } 1153 1154 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1155 struct gsi_trans *trans) 1156 { 1157 if (endpoint->toward_ipa) 1158 ipa_endpoint_tx_complete(endpoint, trans); 1159 else 1160 ipa_endpoint_rx_complete(endpoint, trans); 1161 } 1162 1163 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1164 struct gsi_trans *trans) 1165 { 1166 if (endpoint->toward_ipa) { 1167 struct ipa *ipa = endpoint->ipa; 1168 1169 /* Nothing to do for command transactions */ 1170 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1171 struct sk_buff *skb = trans->data; 1172 1173 if (skb) 1174 dev_kfree_skb_any(skb); 1175 } 1176 } else { 1177 struct page *page = trans->data; 1178 1179 if (page) 1180 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1181 } 1182 } 1183 1184 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1185 { 1186 u32 val; 1187 1188 /* ROUTE_DIS is 0 */ 1189 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1190 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1191 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1192 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1193 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1194 1195 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1196 } 1197 1198 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1199 { 1200 ipa_endpoint_default_route_set(ipa, 0); 1201 } 1202 1203 /** 1204 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1205 * @endpoint: Endpoint to be reset 1206 * 1207 * If aggregation is active on an RX endpoint when a reset is performed 1208 * on its underlying GSI channel, a special sequence of actions must be 1209 * taken to ensure the IPA pipeline is properly cleared. 1210 * 1211 * Return: 0 if successful, or a negative error code 1212 */ 1213 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1214 { 1215 struct device *dev = &endpoint->ipa->pdev->dev; 1216 struct ipa *ipa = endpoint->ipa; 1217 struct gsi *gsi = &ipa->gsi; 1218 bool suspended = false; 1219 dma_addr_t addr; 1220 u32 retries; 1221 u32 len = 1; 1222 void *virt; 1223 int ret; 1224 1225 virt = kzalloc(len, GFP_KERNEL); 1226 if (!virt) 1227 return -ENOMEM; 1228 1229 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1230 if (dma_mapping_error(dev, addr)) { 1231 ret = -ENOMEM; 1232 goto out_kfree; 1233 } 1234 1235 /* Force close aggregation before issuing the reset */ 1236 ipa_endpoint_force_close(endpoint); 1237 1238 /* Reset and reconfigure the channel with the doorbell engine 1239 * disabled. Then poll until we know aggregation is no longer 1240 * active. We'll re-enable the doorbell (if appropriate) when 1241 * we reset again below. 1242 */ 1243 gsi_channel_reset(gsi, endpoint->channel_id, false); 1244 1245 /* Make sure the channel isn't suspended */ 1246 suspended = ipa_endpoint_program_suspend(endpoint, false); 1247 1248 /* Start channel and do a 1 byte read */ 1249 ret = gsi_channel_start(gsi, endpoint->channel_id); 1250 if (ret) 1251 goto out_suspend_again; 1252 1253 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1254 if (ret) 1255 goto err_endpoint_stop; 1256 1257 /* Wait for aggregation to be closed on the channel */ 1258 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1259 do { 1260 if (!ipa_endpoint_aggr_active(endpoint)) 1261 break; 1262 msleep(1); 1263 } while (retries--); 1264 1265 /* Check one last time */ 1266 if (ipa_endpoint_aggr_active(endpoint)) 1267 dev_err(dev, "endpoint %u still active during reset\n", 1268 endpoint->endpoint_id); 1269 1270 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1271 1272 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1273 if (ret) 1274 goto out_suspend_again; 1275 1276 /* Finally, reset and reconfigure the channel again (re-enabling the 1277 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1278 * complete the channel reset sequence. Finish by suspending the 1279 * channel again (if necessary). 1280 */ 1281 gsi_channel_reset(gsi, endpoint->channel_id, true); 1282 1283 msleep(1); 1284 1285 goto out_suspend_again; 1286 1287 err_endpoint_stop: 1288 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1289 out_suspend_again: 1290 if (suspended) 1291 (void)ipa_endpoint_program_suspend(endpoint, true); 1292 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1293 out_kfree: 1294 kfree(virt); 1295 1296 return ret; 1297 } 1298 1299 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1300 { 1301 u32 channel_id = endpoint->channel_id; 1302 struct ipa *ipa = endpoint->ipa; 1303 bool special; 1304 int ret = 0; 1305 1306 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1307 * is active, we need to handle things specially to recover. 1308 * All other cases just need to reset the underlying GSI channel. 1309 */ 1310 special = ipa->version == IPA_VERSION_3_5_1 && 1311 !endpoint->toward_ipa && 1312 endpoint->data->aggregation; 1313 if (special && ipa_endpoint_aggr_active(endpoint)) 1314 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1315 else 1316 gsi_channel_reset(&ipa->gsi, channel_id, true); 1317 1318 if (ret) 1319 dev_err(&ipa->pdev->dev, 1320 "error %d resetting channel %u for endpoint %u\n", 1321 ret, endpoint->channel_id, endpoint->endpoint_id); 1322 } 1323 1324 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1325 { 1326 if (endpoint->toward_ipa) 1327 ipa_endpoint_program_delay(endpoint, false); 1328 else 1329 (void)ipa_endpoint_program_suspend(endpoint, false); 1330 ipa_endpoint_init_cfg(endpoint); 1331 ipa_endpoint_init_hdr(endpoint); 1332 ipa_endpoint_init_hdr_ext(endpoint); 1333 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1334 ipa_endpoint_init_mode(endpoint); 1335 ipa_endpoint_init_aggr(endpoint); 1336 ipa_endpoint_init_deaggr(endpoint); 1337 ipa_endpoint_init_rsrc_grp(endpoint); 1338 ipa_endpoint_init_seq(endpoint); 1339 ipa_endpoint_status(endpoint); 1340 } 1341 1342 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1343 { 1344 struct ipa *ipa = endpoint->ipa; 1345 struct gsi *gsi = &ipa->gsi; 1346 int ret; 1347 1348 ret = gsi_channel_start(gsi, endpoint->channel_id); 1349 if (ret) { 1350 dev_err(&ipa->pdev->dev, 1351 "error %d starting %cX channel %u for endpoint %u\n", 1352 ret, endpoint->toward_ipa ? 'T' : 'R', 1353 endpoint->channel_id, endpoint->endpoint_id); 1354 return ret; 1355 } 1356 1357 if (!endpoint->toward_ipa) { 1358 ipa_interrupt_suspend_enable(ipa->interrupt, 1359 endpoint->endpoint_id); 1360 ipa_endpoint_replenish_enable(endpoint); 1361 } 1362 1363 ipa->enabled |= BIT(endpoint->endpoint_id); 1364 1365 return 0; 1366 } 1367 1368 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1369 { 1370 u32 mask = BIT(endpoint->endpoint_id); 1371 struct ipa *ipa = endpoint->ipa; 1372 struct gsi *gsi = &ipa->gsi; 1373 int ret; 1374 1375 if (!(ipa->enabled & mask)) 1376 return; 1377 1378 ipa->enabled ^= mask; 1379 1380 if (!endpoint->toward_ipa) { 1381 ipa_endpoint_replenish_disable(endpoint); 1382 ipa_interrupt_suspend_disable(ipa->interrupt, 1383 endpoint->endpoint_id); 1384 } 1385 1386 /* Note that if stop fails, the channel's state is not well-defined */ 1387 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1388 if (ret) 1389 dev_err(&ipa->pdev->dev, 1390 "error %d attempting to stop endpoint %u\n", ret, 1391 endpoint->endpoint_id); 1392 } 1393 1394 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1395 { 1396 struct device *dev = &endpoint->ipa->pdev->dev; 1397 struct gsi *gsi = &endpoint->ipa->gsi; 1398 bool stop_channel; 1399 int ret; 1400 1401 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1402 return; 1403 1404 if (!endpoint->toward_ipa) { 1405 ipa_endpoint_replenish_disable(endpoint); 1406 (void)ipa_endpoint_program_suspend(endpoint, true); 1407 } 1408 1409 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1410 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1411 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1412 if (ret) 1413 dev_err(dev, "error %d suspending channel %u\n", ret, 1414 endpoint->channel_id); 1415 } 1416 1417 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1418 { 1419 struct device *dev = &endpoint->ipa->pdev->dev; 1420 struct gsi *gsi = &endpoint->ipa->gsi; 1421 bool start_channel; 1422 int ret; 1423 1424 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1425 return; 1426 1427 if (!endpoint->toward_ipa) 1428 (void)ipa_endpoint_program_suspend(endpoint, false); 1429 1430 /* IPA v3.5.1 doesn't use channel start for resume */ 1431 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1432 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1433 if (ret) 1434 dev_err(dev, "error %d resuming channel %u\n", ret, 1435 endpoint->channel_id); 1436 else if (!endpoint->toward_ipa) 1437 ipa_endpoint_replenish_enable(endpoint); 1438 } 1439 1440 void ipa_endpoint_suspend(struct ipa *ipa) 1441 { 1442 if (!ipa->setup_complete) 1443 return; 1444 1445 if (ipa->modem_netdev) 1446 ipa_modem_suspend(ipa->modem_netdev); 1447 1448 ipa_cmd_tag_process(ipa); 1449 1450 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1451 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1452 } 1453 1454 void ipa_endpoint_resume(struct ipa *ipa) 1455 { 1456 if (!ipa->setup_complete) 1457 return; 1458 1459 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1460 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1461 1462 if (ipa->modem_netdev) 1463 ipa_modem_resume(ipa->modem_netdev); 1464 } 1465 1466 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1467 { 1468 struct gsi *gsi = &endpoint->ipa->gsi; 1469 u32 channel_id = endpoint->channel_id; 1470 1471 /* Only AP endpoints get set up */ 1472 if (endpoint->ee_id != GSI_EE_AP) 1473 return; 1474 1475 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1476 if (!endpoint->toward_ipa) { 1477 /* RX transactions require a single TRE, so the maximum 1478 * backlog is the same as the maximum outstanding TREs. 1479 */ 1480 endpoint->replenish_enabled = false; 1481 atomic_set(&endpoint->replenish_saved, 1482 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1483 atomic_set(&endpoint->replenish_backlog, 0); 1484 INIT_DELAYED_WORK(&endpoint->replenish_work, 1485 ipa_endpoint_replenish_work); 1486 } 1487 1488 ipa_endpoint_program(endpoint); 1489 1490 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1491 } 1492 1493 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1494 { 1495 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1496 1497 if (!endpoint->toward_ipa) 1498 cancel_delayed_work_sync(&endpoint->replenish_work); 1499 1500 ipa_endpoint_reset(endpoint); 1501 } 1502 1503 void ipa_endpoint_setup(struct ipa *ipa) 1504 { 1505 u32 initialized = ipa->initialized; 1506 1507 ipa->set_up = 0; 1508 while (initialized) { 1509 u32 endpoint_id = __ffs(initialized); 1510 1511 initialized ^= BIT(endpoint_id); 1512 1513 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1514 } 1515 } 1516 1517 void ipa_endpoint_teardown(struct ipa *ipa) 1518 { 1519 u32 set_up = ipa->set_up; 1520 1521 while (set_up) { 1522 u32 endpoint_id = __fls(set_up); 1523 1524 set_up ^= BIT(endpoint_id); 1525 1526 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1527 } 1528 ipa->set_up = 0; 1529 } 1530 1531 int ipa_endpoint_config(struct ipa *ipa) 1532 { 1533 struct device *dev = &ipa->pdev->dev; 1534 u32 initialized; 1535 u32 rx_base; 1536 u32 rx_mask; 1537 u32 tx_mask; 1538 int ret = 0; 1539 u32 max; 1540 u32 val; 1541 1542 /* Find out about the endpoints supplied by the hardware, and ensure 1543 * the highest one doesn't exceed the number we support. 1544 */ 1545 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1546 1547 /* Our RX is an IPA producer */ 1548 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1549 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1550 if (max > IPA_ENDPOINT_MAX) { 1551 dev_err(dev, "too many endpoints (%u > %u)\n", 1552 max, IPA_ENDPOINT_MAX); 1553 return -EINVAL; 1554 } 1555 rx_mask = GENMASK(max - 1, rx_base); 1556 1557 /* Our TX is an IPA consumer */ 1558 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1559 tx_mask = GENMASK(max - 1, 0); 1560 1561 ipa->available = rx_mask | tx_mask; 1562 1563 /* Check for initialized endpoints not supported by the hardware */ 1564 if (ipa->initialized & ~ipa->available) { 1565 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1566 ipa->initialized & ~ipa->available); 1567 ret = -EINVAL; /* Report other errors too */ 1568 } 1569 1570 initialized = ipa->initialized; 1571 while (initialized) { 1572 u32 endpoint_id = __ffs(initialized); 1573 struct ipa_endpoint *endpoint; 1574 1575 initialized ^= BIT(endpoint_id); 1576 1577 /* Make sure it's pointing in the right direction */ 1578 endpoint = &ipa->endpoint[endpoint_id]; 1579 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1580 dev_err(dev, "endpoint id %u wrong direction\n", 1581 endpoint_id); 1582 ret = -EINVAL; 1583 } 1584 } 1585 1586 return ret; 1587 } 1588 1589 void ipa_endpoint_deconfig(struct ipa *ipa) 1590 { 1591 ipa->available = 0; /* Nothing more to do */ 1592 } 1593 1594 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1595 const struct ipa_gsi_endpoint_data *data) 1596 { 1597 struct ipa_endpoint *endpoint; 1598 1599 endpoint = &ipa->endpoint[data->endpoint_id]; 1600 1601 if (data->ee_id == GSI_EE_AP) 1602 ipa->channel_map[data->channel_id] = endpoint; 1603 ipa->name_map[name] = endpoint; 1604 1605 endpoint->ipa = ipa; 1606 endpoint->ee_id = data->ee_id; 1607 endpoint->seq_type = data->endpoint.seq_type; 1608 endpoint->channel_id = data->channel_id; 1609 endpoint->endpoint_id = data->endpoint_id; 1610 endpoint->toward_ipa = data->toward_ipa; 1611 endpoint->data = &data->endpoint.config; 1612 1613 ipa->initialized |= BIT(endpoint->endpoint_id); 1614 } 1615 1616 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1617 { 1618 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1619 1620 memset(endpoint, 0, sizeof(*endpoint)); 1621 } 1622 1623 void ipa_endpoint_exit(struct ipa *ipa) 1624 { 1625 u32 initialized = ipa->initialized; 1626 1627 while (initialized) { 1628 u32 endpoint_id = __fls(initialized); 1629 1630 initialized ^= BIT(endpoint_id); 1631 1632 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1633 } 1634 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1635 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1636 } 1637 1638 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1639 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1640 const struct ipa_gsi_endpoint_data *data) 1641 { 1642 enum ipa_endpoint_name name; 1643 u32 filter_map; 1644 1645 if (!ipa_endpoint_data_valid(ipa, count, data)) 1646 return 0; /* Error */ 1647 1648 ipa->initialized = 0; 1649 1650 filter_map = 0; 1651 for (name = 0; name < count; name++, data++) { 1652 if (ipa_gsi_endpoint_data_empty(data)) 1653 continue; /* Skip over empty slots */ 1654 1655 ipa_endpoint_init_one(ipa, name, data); 1656 1657 if (data->endpoint.filter_support) 1658 filter_map |= BIT(data->endpoint_id); 1659 } 1660 1661 if (!ipa_filter_map_valid(ipa, filter_map)) 1662 goto err_endpoint_exit; 1663 1664 return filter_map; /* Non-zero bitmask */ 1665 1666 err_endpoint_exit: 1667 ipa_endpoint_exit(ipa); 1668 1669 return 0; /* Error */ 1670 } 1671