1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2022 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_power.h" 25 26 /* Hardware is told about receive buffers once a "batch" has been queued */ 27 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ 28 29 /* The amount of RX buffer space consumed by standard skb overhead */ 30 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 31 32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 33 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 34 35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 36 37 /** enum ipa_status_opcode - status element opcode hardware values */ 38 enum ipa_status_opcode { 39 IPA_STATUS_OPCODE_PACKET = 0x01, 40 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 41 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 42 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 43 }; 44 45 /** enum ipa_status_exception - status element exception type */ 46 enum ipa_status_exception { 47 /* 0 means no exception */ 48 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 49 }; 50 51 /* Status element provided by hardware */ 52 struct ipa_status { 53 u8 opcode; /* enum ipa_status_opcode */ 54 u8 exception; /* enum ipa_status_exception */ 55 __le16 mask; 56 __le16 pkt_len; 57 u8 endp_src_idx; 58 u8 endp_dst_idx; 59 __le32 metadata; 60 __le32 flags1; 61 __le64 flags2; 62 __le32 flags3; 63 __le32 flags4; 64 }; 65 66 /* Field masks for struct ipa_status structure fields */ 67 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 68 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 69 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 70 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 71 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 72 73 /* Compute the aggregation size value to use for a given buffer size */ 74 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit) 75 { 76 /* A hard aggregation limit will not be crossed; aggregation closes 77 * if saving incoming data would cross the hard byte limit boundary. 78 * 79 * With a soft limit, aggregation closes *after* the size boundary 80 * has been crossed. In that case the limit must leave enough space 81 * after that limit to receive a full MTU of data plus overhead. 82 */ 83 if (!aggr_hard_limit) 84 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 85 86 /* The byte limit is encoded as a number of kilobytes */ 87 88 return rx_buffer_size / SZ_1K; 89 } 90 91 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 92 const struct ipa_gsi_endpoint_data *all_data, 93 const struct ipa_gsi_endpoint_data *data) 94 { 95 const struct ipa_gsi_endpoint_data *other_data; 96 struct device *dev = &ipa->pdev->dev; 97 enum ipa_endpoint_name other_name; 98 99 if (ipa_gsi_endpoint_data_empty(data)) 100 return true; 101 102 if (!data->toward_ipa) { 103 const struct ipa_endpoint_rx *rx_config; 104 const struct ipa_reg *reg; 105 u32 buffer_size; 106 u32 aggr_size; 107 u32 limit; 108 109 if (data->endpoint.filter_support) { 110 dev_err(dev, "filtering not supported for " 111 "RX endpoint %u\n", 112 data->endpoint_id); 113 return false; 114 } 115 116 /* Nothing more to check for non-AP RX */ 117 if (data->ee_id != GSI_EE_AP) 118 return true; 119 120 rx_config = &data->endpoint.config.rx; 121 122 /* The buffer size must hold an MTU plus overhead */ 123 buffer_size = rx_config->buffer_size; 124 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 125 if (buffer_size < limit) { 126 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", 127 data->endpoint_id, buffer_size, limit); 128 return false; 129 } 130 131 if (!data->endpoint.config.aggregation) { 132 bool result = true; 133 134 /* No aggregation; check for bogus aggregation data */ 135 if (rx_config->aggr_time_limit) { 136 dev_err(dev, 137 "time limit with no aggregation for RX endpoint %u\n", 138 data->endpoint_id); 139 result = false; 140 } 141 142 if (rx_config->aggr_hard_limit) { 143 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", 144 data->endpoint_id); 145 result = false; 146 } 147 148 if (rx_config->aggr_close_eof) { 149 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", 150 data->endpoint_id); 151 result = false; 152 } 153 154 return result; /* Nothing more to check */ 155 } 156 157 /* For an endpoint supporting receive aggregation, the byte 158 * limit defines the point at which aggregation closes. This 159 * check ensures the receive buffer size doesn't result in a 160 * limit that exceeds what's representable in the aggregation 161 * byte limit field. 162 */ 163 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, 164 rx_config->aggr_hard_limit); 165 reg = ipa_reg(ipa, ENDP_INIT_AGGR); 166 167 limit = ipa_reg_field_max(reg, BYTE_LIMIT); 168 if (aggr_size > limit) { 169 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n", 170 data->endpoint_id, aggr_size, limit); 171 172 return false; 173 } 174 175 return true; /* Nothing more to check for RX */ 176 } 177 178 /* Starting with IPA v4.5 sequencer replication is obsolete */ 179 if (ipa->version >= IPA_VERSION_4_5) { 180 if (data->endpoint.config.tx.seq_rep_type) { 181 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", 182 data->endpoint_id); 183 return false; 184 } 185 } 186 187 if (data->endpoint.config.status_enable) { 188 other_name = data->endpoint.config.tx.status_endpoint; 189 if (other_name >= count) { 190 dev_err(dev, "status endpoint name %u out of range " 191 "for endpoint %u\n", 192 other_name, data->endpoint_id); 193 return false; 194 } 195 196 /* Status endpoint must be defined... */ 197 other_data = &all_data[other_name]; 198 if (ipa_gsi_endpoint_data_empty(other_data)) { 199 dev_err(dev, "DMA endpoint name %u undefined " 200 "for endpoint %u\n", 201 other_name, data->endpoint_id); 202 return false; 203 } 204 205 /* ...and has to be an RX endpoint... */ 206 if (other_data->toward_ipa) { 207 dev_err(dev, 208 "status endpoint for endpoint %u not RX\n", 209 data->endpoint_id); 210 return false; 211 } 212 213 /* ...and if it's to be an AP endpoint... */ 214 if (other_data->ee_id == GSI_EE_AP) { 215 /* ...make sure it has status enabled. */ 216 if (!other_data->endpoint.config.status_enable) { 217 dev_err(dev, 218 "status not enabled for endpoint %u\n", 219 other_data->endpoint_id); 220 return false; 221 } 222 } 223 } 224 225 if (data->endpoint.config.dma_mode) { 226 other_name = data->endpoint.config.dma_endpoint; 227 if (other_name >= count) { 228 dev_err(dev, "DMA endpoint name %u out of range " 229 "for endpoint %u\n", 230 other_name, data->endpoint_id); 231 return false; 232 } 233 234 other_data = &all_data[other_name]; 235 if (ipa_gsi_endpoint_data_empty(other_data)) { 236 dev_err(dev, "DMA endpoint name %u undefined " 237 "for endpoint %u\n", 238 other_name, data->endpoint_id); 239 return false; 240 } 241 } 242 243 return true; 244 } 245 246 /* Validate endpoint configuration data. Return max defined endpoint ID */ 247 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count, 248 const struct ipa_gsi_endpoint_data *data) 249 { 250 const struct ipa_gsi_endpoint_data *dp = data; 251 struct device *dev = &ipa->pdev->dev; 252 enum ipa_endpoint_name name; 253 u32 max; 254 255 if (count > IPA_ENDPOINT_COUNT) { 256 dev_err(dev, "too many endpoints specified (%u > %u)\n", 257 count, IPA_ENDPOINT_COUNT); 258 return 0; 259 } 260 261 /* Make sure needed endpoints have defined data */ 262 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 263 dev_err(dev, "command TX endpoint not defined\n"); 264 return 0; 265 } 266 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 267 dev_err(dev, "LAN RX endpoint not defined\n"); 268 return 0; 269 } 270 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 271 dev_err(dev, "AP->modem TX endpoint not defined\n"); 272 return 0; 273 } 274 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 275 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 276 return 0; 277 } 278 279 max = 0; 280 for (name = 0; name < count; name++, dp++) { 281 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 282 return 0; 283 max = max_t(u32, max, dp->endpoint_id); 284 } 285 286 return max; 287 } 288 289 /* Allocate a transaction to use on a non-command endpoint */ 290 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 291 u32 tre_count) 292 { 293 struct gsi *gsi = &endpoint->ipa->gsi; 294 u32 channel_id = endpoint->channel_id; 295 enum dma_data_direction direction; 296 297 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 298 299 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 300 } 301 302 /* suspend_delay represents suspend for RX, delay for TX endpoints. 303 * Note that suspend is not supported starting with IPA v4.0, and 304 * delay mode should not be used starting with IPA v4.2. 305 */ 306 static bool 307 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 308 { 309 struct ipa *ipa = endpoint->ipa; 310 const struct ipa_reg *reg; 311 u32 field_id; 312 u32 offset; 313 bool state; 314 u32 mask; 315 u32 val; 316 317 if (endpoint->toward_ipa) 318 WARN_ON(ipa->version >= IPA_VERSION_4_2); 319 else 320 WARN_ON(ipa->version >= IPA_VERSION_4_0); 321 322 reg = ipa_reg(ipa, ENDP_INIT_CTRL); 323 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); 324 val = ioread32(ipa->reg_virt + offset); 325 326 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; 327 mask = ipa_reg_bit(reg, field_id); 328 329 state = !!(val & mask); 330 331 /* Don't bother if it's already in the requested state */ 332 if (suspend_delay != state) { 333 val ^= mask; 334 iowrite32(val, ipa->reg_virt + offset); 335 } 336 337 return state; 338 } 339 340 /* We don't care what the previous state was for delay mode */ 341 static void 342 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 343 { 344 /* Delay mode should not be used for IPA v4.2+ */ 345 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); 346 WARN_ON(!endpoint->toward_ipa); 347 348 (void)ipa_endpoint_init_ctrl(endpoint, enable); 349 } 350 351 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 352 { 353 u32 endpoint_id = endpoint->endpoint_id; 354 struct ipa *ipa = endpoint->ipa; 355 u32 unit = endpoint_id / 32; 356 const struct ipa_reg *reg; 357 u32 val; 358 359 WARN_ON(!test_bit(endpoint_id, ipa->available)); 360 361 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); 362 val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 363 364 return !!(val & BIT(endpoint_id % 32)); 365 } 366 367 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 368 { 369 u32 endpoint_id = endpoint->endpoint_id; 370 u32 mask = BIT(endpoint_id % 32); 371 struct ipa *ipa = endpoint->ipa; 372 u32 unit = endpoint_id / 32; 373 const struct ipa_reg *reg; 374 375 WARN_ON(!test_bit(endpoint_id, ipa->available)); 376 377 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); 378 iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 379 } 380 381 /** 382 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 383 * @endpoint: Endpoint on which to emulate a suspend 384 * 385 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 386 * with an open aggregation frame. This is to work around a hardware 387 * issue in IPA version 3.5.1 where the suspend interrupt will not be 388 * generated when it should be. 389 */ 390 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 391 { 392 struct ipa *ipa = endpoint->ipa; 393 394 if (!endpoint->config.aggregation) 395 return; 396 397 /* Nothing to do if the endpoint doesn't have aggregation open */ 398 if (!ipa_endpoint_aggr_active(endpoint)) 399 return; 400 401 /* Force close aggregation */ 402 ipa_endpoint_force_close(endpoint); 403 404 ipa_interrupt_simulate_suspend(ipa->interrupt); 405 } 406 407 /* Returns previous suspend state (true means suspend was enabled) */ 408 static bool 409 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 410 { 411 bool suspended; 412 413 if (endpoint->ipa->version >= IPA_VERSION_4_0) 414 return enable; /* For IPA v4.0+, no change made */ 415 416 WARN_ON(endpoint->toward_ipa); 417 418 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 419 420 /* A client suspended with an open aggregation frame will not 421 * generate a SUSPEND IPA interrupt. If enabling suspend, have 422 * ipa_endpoint_suspend_aggr() handle this. 423 */ 424 if (enable && !suspended) 425 ipa_endpoint_suspend_aggr(endpoint); 426 427 return suspended; 428 } 429 430 /* Put all modem RX endpoints into suspend mode, and stop transmission 431 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is 432 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow 433 * control instead. 434 */ 435 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 436 { 437 u32 endpoint_id = 0; 438 439 while (endpoint_id < ipa->endpoint_count) { 440 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; 441 442 if (endpoint->ee_id != GSI_EE_MODEM) 443 continue; 444 445 if (!endpoint->toward_ipa) 446 (void)ipa_endpoint_program_suspend(endpoint, enable); 447 else if (ipa->version < IPA_VERSION_4_2) 448 ipa_endpoint_program_delay(endpoint, enable); 449 else 450 gsi_modem_channel_flow_control(&ipa->gsi, 451 endpoint->channel_id, 452 enable); 453 } 454 } 455 456 /* Reset all modem endpoints to use the default exception endpoint */ 457 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 458 { 459 struct gsi_trans *trans; 460 u32 endpoint_id; 461 u32 count; 462 463 /* We need one command per modem TX endpoint, plus the commands 464 * that clear the pipeline. 465 */ 466 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); 467 trans = ipa_cmd_trans_alloc(ipa, count); 468 if (!trans) { 469 dev_err(&ipa->pdev->dev, 470 "no transaction to reset modem exception endpoints\n"); 471 return -EBUSY; 472 } 473 474 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { 475 struct ipa_endpoint *endpoint; 476 const struct ipa_reg *reg; 477 u32 offset; 478 479 /* We only reset modem TX endpoints */ 480 endpoint = &ipa->endpoint[endpoint_id]; 481 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 482 continue; 483 484 reg = ipa_reg(ipa, ENDP_STATUS); 485 offset = ipa_reg_n_offset(reg, endpoint_id); 486 487 /* Value written is 0, and all bits are updated. That 488 * means status is disabled on the endpoint, and as a 489 * result all other fields in the register are ignored. 490 */ 491 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 492 } 493 494 ipa_cmd_pipeline_clear_add(trans); 495 496 gsi_trans_commit_wait(trans); 497 498 ipa_cmd_pipeline_clear_wait(ipa); 499 500 return 0; 501 } 502 503 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 504 { 505 u32 endpoint_id = endpoint->endpoint_id; 506 struct ipa *ipa = endpoint->ipa; 507 enum ipa_cs_offload_en enabled; 508 const struct ipa_reg *reg; 509 u32 val = 0; 510 511 reg = ipa_reg(ipa, ENDP_INIT_CFG); 512 /* FRAG_OFFLOAD_EN is 0 */ 513 if (endpoint->config.checksum) { 514 enum ipa_version version = ipa->version; 515 516 if (endpoint->toward_ipa) { 517 u32 off; 518 519 /* Checksum header offset is in 4-byte units */ 520 off = sizeof(struct rmnet_map_header) / sizeof(u32); 521 val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off); 522 523 enabled = version < IPA_VERSION_4_5 524 ? IPA_CS_OFFLOAD_UL 525 : IPA_CS_OFFLOAD_INLINE; 526 } else { 527 enabled = version < IPA_VERSION_4_5 528 ? IPA_CS_OFFLOAD_DL 529 : IPA_CS_OFFLOAD_INLINE; 530 } 531 } else { 532 enabled = IPA_CS_OFFLOAD_NONE; 533 } 534 val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled); 535 /* CS_GEN_QMB_MASTER_SEL is 0 */ 536 537 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 538 } 539 540 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) 541 { 542 u32 endpoint_id = endpoint->endpoint_id; 543 struct ipa *ipa = endpoint->ipa; 544 const struct ipa_reg *reg; 545 u32 val; 546 547 if (!endpoint->toward_ipa) 548 return; 549 550 reg = ipa_reg(ipa, ENDP_INIT_NAT); 551 val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS); 552 553 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 554 } 555 556 static u32 557 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) 558 { 559 u32 header_size = sizeof(struct rmnet_map_header); 560 561 /* Without checksum offload, we just have the MAP header */ 562 if (!endpoint->config.checksum) 563 return header_size; 564 565 if (version < IPA_VERSION_4_5) { 566 /* Checksum header inserted for AP TX endpoints only */ 567 if (endpoint->toward_ipa) 568 header_size += sizeof(struct rmnet_map_ul_csum_header); 569 } else { 570 /* Checksum header is used in both directions */ 571 header_size += sizeof(struct rmnet_map_v5_csum_header); 572 } 573 574 return header_size; 575 } 576 577 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */ 578 static u32 ipa_header_size_encode(enum ipa_version version, 579 const struct ipa_reg *reg, u32 header_size) 580 { 581 u32 field_max = ipa_reg_field_max(reg, HDR_LEN); 582 u32 val; 583 584 /* We know field_max can be used as a mask (2^n - 1) */ 585 val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max); 586 if (version < IPA_VERSION_4_5) { 587 WARN_ON(header_size > field_max); 588 return val; 589 } 590 591 /* IPA v4.5 adds a few more most-significant bits */ 592 header_size >>= hweight32(field_max); 593 WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB)); 594 val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size); 595 596 return val; 597 } 598 599 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */ 600 static u32 ipa_metadata_offset_encode(enum ipa_version version, 601 const struct ipa_reg *reg, u32 offset) 602 { 603 u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA); 604 u32 val; 605 606 /* We know field_max can be used as a mask (2^n - 1) */ 607 val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset); 608 if (version < IPA_VERSION_4_5) { 609 WARN_ON(offset > field_max); 610 return val; 611 } 612 613 /* IPA v4.5 adds a few more most-significant bits */ 614 offset >>= hweight32(field_max); 615 WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB)); 616 val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset); 617 618 return val; 619 } 620 621 /** 622 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 623 * @endpoint: Endpoint pointer 624 * 625 * We program QMAP endpoints so each packet received is preceded by a QMAP 626 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 627 * packet size field, and we have the IPA hardware populate both for each 628 * received packet. The header is configured (in the HDR_EXT register) 629 * to use big endian format. 630 * 631 * The packet size is written into the QMAP header's pkt_len field. That 632 * location is defined here using the HDR_OFST_PKT_SIZE field. 633 * 634 * The mux_id comes from a 4-byte metadata value supplied with each packet 635 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 636 * value that we want, in its low-order byte. A bitmask defined in the 637 * endpoint's METADATA_MASK register defines which byte within the modem 638 * metadata contains the mux_id. And the OFST_METADATA field programmed 639 * here indicates where the extracted byte should be placed within the QMAP 640 * header. 641 */ 642 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 643 { 644 u32 endpoint_id = endpoint->endpoint_id; 645 struct ipa *ipa = endpoint->ipa; 646 const struct ipa_reg *reg; 647 u32 val = 0; 648 649 reg = ipa_reg(ipa, ENDP_INIT_HDR); 650 if (endpoint->config.qmap) { 651 enum ipa_version version = ipa->version; 652 size_t header_size; 653 654 header_size = ipa_qmap_header_size(version, endpoint); 655 val = ipa_header_size_encode(version, reg, header_size); 656 657 /* Define how to fill fields in a received QMAP header */ 658 if (!endpoint->toward_ipa) { 659 u32 off; /* Field offset within header */ 660 661 /* Where IPA will write the metadata value */ 662 off = offsetof(struct rmnet_map_header, mux_id); 663 val |= ipa_metadata_offset_encode(version, reg, off); 664 665 /* Where IPA will write the length */ 666 off = offsetof(struct rmnet_map_header, pkt_len); 667 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 668 if (version >= IPA_VERSION_4_5) 669 off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE); 670 671 val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID); 672 val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off); 673 } 674 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 675 val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID); 676 677 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 678 /* HDR_A5_MUX is 0 */ 679 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 680 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 681 } 682 683 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 684 } 685 686 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 687 { 688 u32 pad_align = endpoint->config.rx.pad_align; 689 u32 endpoint_id = endpoint->endpoint_id; 690 struct ipa *ipa = endpoint->ipa; 691 const struct ipa_reg *reg; 692 u32 val = 0; 693 694 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); 695 if (endpoint->config.qmap) { 696 /* We have a header, so we must specify its endianness */ 697 val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */ 698 699 /* A QMAP header contains a 6 bit pad field at offset 0. 700 * The RMNet driver assumes this field is meaningful in 701 * packets it receives, and assumes the header's payload 702 * length includes that padding. The RMNet driver does 703 * *not* pad packets it sends, however, so the pad field 704 * (although 0) should be ignored. 705 */ 706 if (!endpoint->toward_ipa) { 707 val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID); 708 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 709 val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING); 710 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 711 } 712 } 713 714 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 715 if (!endpoint->toward_ipa) 716 val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align); 717 718 /* IPA v4.5 adds some most-significant bits to a few fields, 719 * two of which are defined in the HDR (not HDR_EXT) register. 720 */ 721 if (ipa->version >= IPA_VERSION_4_5) { 722 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 723 if (endpoint->config.qmap && !endpoint->toward_ipa) { 724 u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE); 725 u32 off; /* Field offset within header */ 726 727 off = offsetof(struct rmnet_map_header, pkt_len); 728 /* Low bits are in the ENDP_INIT_HDR register */ 729 off >>= hweight32(mask); 730 val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off); 731 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 732 } 733 } 734 735 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 736 } 737 738 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 739 { 740 u32 endpoint_id = endpoint->endpoint_id; 741 struct ipa *ipa = endpoint->ipa; 742 const struct ipa_reg *reg; 743 u32 val = 0; 744 u32 offset; 745 746 if (endpoint->toward_ipa) 747 return; /* Register not valid for TX endpoints */ 748 749 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); 750 offset = ipa_reg_n_offset(reg, endpoint_id); 751 752 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 753 if (endpoint->config.qmap) 754 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 755 756 iowrite32(val, ipa->reg_virt + offset); 757 } 758 759 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 760 { 761 struct ipa *ipa = endpoint->ipa; 762 const struct ipa_reg *reg; 763 u32 offset; 764 u32 val; 765 766 if (!endpoint->toward_ipa) 767 return; /* Register not valid for RX endpoints */ 768 769 reg = ipa_reg(ipa, ENDP_INIT_MODE); 770 if (endpoint->config.dma_mode) { 771 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; 772 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; 773 774 val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA); 775 val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id); 776 } else { 777 val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC); 778 } 779 /* All other bits unspecified (and 0) */ 780 781 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); 782 iowrite32(val, ipa->reg_virt + offset); 783 } 784 785 /* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two 786 * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config() 787 * they're configured to have granularity 100 usec and 1 msec, respectively. 788 * 789 * The return value is the positive or negative Qtime value to use to 790 * express the (microsecond) time provided. A positive return value 791 * means pulse generator 0 can be used; otherwise use pulse generator 1. 792 */ 793 static int ipa_qtime_val(u32 microseconds, u32 max) 794 { 795 u32 val; 796 797 /* Use 100 microsecond granularity if possible */ 798 val = DIV_ROUND_CLOSEST(microseconds, 100); 799 if (val <= max) 800 return (int)val; 801 802 /* Have to use pulse generator 1 (millisecond granularity) */ 803 val = DIV_ROUND_CLOSEST(microseconds, 1000); 804 WARN_ON(val > max); 805 806 return (int)-val; 807 } 808 809 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 810 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg, 811 u32 microseconds) 812 { 813 u32 max; 814 u32 val; 815 816 if (!microseconds) 817 return 0; /* Nothing to compute if time limit is 0 */ 818 819 max = ipa_reg_field_max(reg, TIME_LIMIT); 820 if (ipa->version >= IPA_VERSION_4_5) { 821 u32 gran_sel; 822 int ret; 823 824 /* Compute the Qtime limit value to use */ 825 ret = ipa_qtime_val(microseconds, max); 826 if (ret < 0) { 827 val = -ret; 828 gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL); 829 } else { 830 val = ret; 831 gran_sel = 0; 832 } 833 834 return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val); 835 } 836 837 /* We program aggregation granularity in ipa_hardware_config() */ 838 val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY); 839 WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n", 840 microseconds, max * IPA_AGGR_GRANULARITY); 841 842 return ipa_reg_encode(reg, TIME_LIMIT, val); 843 } 844 845 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 846 { 847 u32 endpoint_id = endpoint->endpoint_id; 848 struct ipa *ipa = endpoint->ipa; 849 const struct ipa_reg *reg; 850 u32 val = 0; 851 852 reg = ipa_reg(ipa, ENDP_INIT_AGGR); 853 if (endpoint->config.aggregation) { 854 if (!endpoint->toward_ipa) { 855 const struct ipa_endpoint_rx *rx_config; 856 u32 buffer_size; 857 u32 limit; 858 859 rx_config = &endpoint->config.rx; 860 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR); 861 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC); 862 863 buffer_size = rx_config->buffer_size; 864 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, 865 rx_config->aggr_hard_limit); 866 val |= ipa_reg_encode(reg, BYTE_LIMIT, limit); 867 868 limit = rx_config->aggr_time_limit; 869 val |= aggr_time_limit_encode(ipa, reg, limit); 870 871 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 872 873 if (rx_config->aggr_close_eof) 874 val |= ipa_reg_bit(reg, SW_EOF_ACTIVE); 875 } else { 876 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR); 877 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP); 878 /* other fields ignored */ 879 } 880 /* AGGR_FORCE_CLOSE is 0 */ 881 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 882 } else { 883 val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR); 884 /* other fields ignored */ 885 } 886 887 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 888 } 889 890 /* The head-of-line blocking timer is defined as a tick count. For 891 * IPA version 4.5 the tick count is based on the Qtimer, which is 892 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 893 * each tick represents 128 cycles of the IPA core clock. 894 * 895 * Return the encoded value representing the timeout period provided 896 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register. 897 */ 898 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg, 899 u32 microseconds) 900 { 901 u32 width; 902 u32 scale; 903 u64 ticks; 904 u64 rate; 905 u32 high; 906 u32 val; 907 908 if (!microseconds) 909 return 0; /* Nothing to compute if timer period is 0 */ 910 911 if (ipa->version >= IPA_VERSION_4_5) { 912 u32 max = ipa_reg_field_max(reg, TIMER_LIMIT); 913 u32 gran_sel; 914 int ret; 915 916 /* Compute the Qtime limit value to use */ 917 ret = ipa_qtime_val(microseconds, max); 918 if (ret < 0) { 919 val = -ret; 920 gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL); 921 } else { 922 val = ret; 923 gran_sel = 0; 924 } 925 926 return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val); 927 } 928 929 /* Use 64 bit arithmetic to avoid overflow */ 930 rate = ipa_core_clock_rate(ipa); 931 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 932 933 /* We still need the result to fit into the field */ 934 WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE)); 935 936 /* IPA v3.5.1 through v4.1 just record the tick count */ 937 if (ipa->version < IPA_VERSION_4_2) 938 return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks); 939 940 /* For IPA v4.2, the tick count is represented by base and 941 * scale fields within the 32-bit timer register, where: 942 * ticks = base << scale; 943 * The best precision is achieved when the base value is as 944 * large as possible. Find the highest set bit in the tick 945 * count, and extract the number of bits in the base field 946 * such that high bit is included. 947 */ 948 high = fls(ticks); /* 1..32 (or warning above) */ 949 width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE)); 950 scale = high > width ? high - width : 0; 951 if (scale) { 952 /* If we're scaling, round up to get a closer result */ 953 ticks += 1 << (scale - 1); 954 /* High bit was set, so rounding might have affected it */ 955 if (fls(ticks) != high) 956 scale++; 957 } 958 959 val = ipa_reg_encode(reg, TIMER_SCALE, scale); 960 val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale); 961 962 return val; 963 } 964 965 /* If microseconds is 0, timeout is immediate */ 966 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 967 u32 microseconds) 968 { 969 u32 endpoint_id = endpoint->endpoint_id; 970 struct ipa *ipa = endpoint->ipa; 971 const struct ipa_reg *reg; 972 u32 val; 973 974 /* This should only be changed when HOL_BLOCK_EN is disabled */ 975 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); 976 val = hol_block_timer_encode(ipa, reg, microseconds); 977 978 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 979 } 980 981 static void 982 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) 983 { 984 u32 endpoint_id = endpoint->endpoint_id; 985 struct ipa *ipa = endpoint->ipa; 986 const struct ipa_reg *reg; 987 u32 offset; 988 u32 val; 989 990 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); 991 offset = ipa_reg_n_offset(reg, endpoint_id); 992 val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0; 993 994 iowrite32(val, ipa->reg_virt + offset); 995 996 /* When enabling, the register must be written twice for IPA v4.5+ */ 997 if (enable && ipa->version >= IPA_VERSION_4_5) 998 iowrite32(val, ipa->reg_virt + offset); 999 } 1000 1001 /* Assumes HOL_BLOCK is in disabled state */ 1002 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, 1003 u32 microseconds) 1004 { 1005 ipa_endpoint_init_hol_block_timer(endpoint, microseconds); 1006 ipa_endpoint_init_hol_block_en(endpoint, true); 1007 } 1008 1009 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) 1010 { 1011 ipa_endpoint_init_hol_block_en(endpoint, false); 1012 } 1013 1014 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 1015 { 1016 u32 endpoint_id = 0; 1017 1018 while (endpoint_id < ipa->endpoint_count) { 1019 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; 1020 1021 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 1022 continue; 1023 1024 ipa_endpoint_init_hol_block_disable(endpoint); 1025 ipa_endpoint_init_hol_block_enable(endpoint, 0); 1026 } 1027 } 1028 1029 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 1030 { 1031 u32 endpoint_id = endpoint->endpoint_id; 1032 struct ipa *ipa = endpoint->ipa; 1033 const struct ipa_reg *reg; 1034 u32 val = 0; 1035 1036 if (!endpoint->toward_ipa) 1037 return; /* Register not valid for RX endpoints */ 1038 1039 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); 1040 /* DEAGGR_HDR_LEN is 0 */ 1041 /* PACKET_OFFSET_VALID is 0 */ 1042 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 1043 /* MAX_PACKET_LEN is 0 (not enforced) */ 1044 1045 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1046 } 1047 1048 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 1049 { 1050 u32 resource_group = endpoint->config.resource_group; 1051 u32 endpoint_id = endpoint->endpoint_id; 1052 struct ipa *ipa = endpoint->ipa; 1053 const struct ipa_reg *reg; 1054 u32 val; 1055 1056 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); 1057 val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group); 1058 1059 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1060 } 1061 1062 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 1063 { 1064 u32 endpoint_id = endpoint->endpoint_id; 1065 struct ipa *ipa = endpoint->ipa; 1066 const struct ipa_reg *reg; 1067 u32 val; 1068 1069 if (!endpoint->toward_ipa) 1070 return; /* Register not valid for RX endpoints */ 1071 1072 reg = ipa_reg(ipa, ENDP_INIT_SEQ); 1073 1074 /* Low-order byte configures primary packet processing */ 1075 val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); 1076 1077 /* Second byte (if supported) configures replicated packet processing */ 1078 if (ipa->version < IPA_VERSION_4_5) 1079 val |= ipa_reg_encode(reg, SEQ_REP_TYPE, 1080 endpoint->config.tx.seq_rep_type); 1081 1082 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1083 } 1084 1085 /** 1086 * ipa_endpoint_skb_tx() - Transmit a socket buffer 1087 * @endpoint: Endpoint pointer 1088 * @skb: Socket buffer to send 1089 * 1090 * Returns: 0 if successful, or a negative error code 1091 */ 1092 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 1093 { 1094 struct gsi_trans *trans; 1095 u32 nr_frags; 1096 int ret; 1097 1098 /* Make sure source endpoint's TLV FIFO has enough entries to 1099 * hold the linear portion of the skb and all its fragments. 1100 * If not, see if we can linearize it before giving up. 1101 */ 1102 nr_frags = skb_shinfo(skb)->nr_frags; 1103 if (nr_frags > endpoint->skb_frag_max) { 1104 if (skb_linearize(skb)) 1105 return -E2BIG; 1106 nr_frags = 0; 1107 } 1108 1109 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 1110 if (!trans) 1111 return -EBUSY; 1112 1113 ret = gsi_trans_skb_add(trans, skb); 1114 if (ret) 1115 goto err_trans_free; 1116 trans->data = skb; /* transaction owns skb now */ 1117 1118 gsi_trans_commit(trans, !netdev_xmit_more()); 1119 1120 return 0; 1121 1122 err_trans_free: 1123 gsi_trans_free(trans); 1124 1125 return -ENOMEM; 1126 } 1127 1128 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 1129 { 1130 u32 endpoint_id = endpoint->endpoint_id; 1131 struct ipa *ipa = endpoint->ipa; 1132 const struct ipa_reg *reg; 1133 u32 val = 0; 1134 1135 reg = ipa_reg(ipa, ENDP_STATUS); 1136 if (endpoint->config.status_enable) { 1137 val |= ipa_reg_bit(reg, STATUS_EN); 1138 if (endpoint->toward_ipa) { 1139 enum ipa_endpoint_name name; 1140 u32 status_endpoint_id; 1141 1142 name = endpoint->config.tx.status_endpoint; 1143 status_endpoint_id = ipa->name_map[name]->endpoint_id; 1144 1145 val |= ipa_reg_encode(reg, STATUS_ENDP, 1146 status_endpoint_id); 1147 } 1148 /* STATUS_LOCATION is 0, meaning status element precedes 1149 * packet (not present for IPA v4.5+) 1150 */ 1151 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */ 1152 } 1153 1154 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1155 } 1156 1157 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, 1158 struct gsi_trans *trans) 1159 { 1160 struct page *page; 1161 u32 buffer_size; 1162 u32 offset; 1163 u32 len; 1164 int ret; 1165 1166 buffer_size = endpoint->config.rx.buffer_size; 1167 page = dev_alloc_pages(get_order(buffer_size)); 1168 if (!page) 1169 return -ENOMEM; 1170 1171 /* Offset the buffer to make space for skb headroom */ 1172 offset = NET_SKB_PAD; 1173 len = buffer_size - offset; 1174 1175 ret = gsi_trans_page_add(trans, page, len, offset); 1176 if (ret) 1177 put_page(page); 1178 else 1179 trans->data = page; /* transaction owns page now */ 1180 1181 return ret; 1182 } 1183 1184 /** 1185 * ipa_endpoint_replenish() - Replenish endpoint receive buffers 1186 * @endpoint: Endpoint to be replenished 1187 * 1188 * The IPA hardware can hold a fixed number of receive buffers for an RX 1189 * endpoint, based on the number of entries in the underlying channel ring 1190 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many 1191 * more receive buffers can be supplied to the hardware. Replenishing for 1192 * an endpoint can be disabled, in which case buffers are not queued to 1193 * the hardware. 1194 */ 1195 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) 1196 { 1197 struct gsi_trans *trans; 1198 1199 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) 1200 return; 1201 1202 /* Skip it if it's already active */ 1203 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) 1204 return; 1205 1206 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { 1207 bool doorbell; 1208 1209 if (ipa_endpoint_replenish_one(endpoint, trans)) 1210 goto try_again_later; 1211 1212 1213 /* Ring the doorbell if we've got a full batch */ 1214 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); 1215 gsi_trans_commit(trans, doorbell); 1216 } 1217 1218 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1219 1220 return; 1221 1222 try_again_later: 1223 gsi_trans_free(trans); 1224 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1225 1226 /* Whenever a receive buffer transaction completes we'll try to 1227 * replenish again. It's unlikely, but if we fail to supply even 1228 * one buffer, nothing will trigger another replenish attempt. 1229 * If the hardware has no receive buffers queued, schedule work to 1230 * try replenishing again. 1231 */ 1232 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) 1233 schedule_delayed_work(&endpoint->replenish_work, 1234 msecs_to_jiffies(1)); 1235 } 1236 1237 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1238 { 1239 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1240 1241 /* Start replenishing if hardware currently has no buffers */ 1242 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) 1243 ipa_endpoint_replenish(endpoint); 1244 } 1245 1246 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1247 { 1248 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1249 } 1250 1251 static void ipa_endpoint_replenish_work(struct work_struct *work) 1252 { 1253 struct delayed_work *dwork = to_delayed_work(work); 1254 struct ipa_endpoint *endpoint; 1255 1256 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1257 1258 ipa_endpoint_replenish(endpoint); 1259 } 1260 1261 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1262 void *data, u32 len, u32 extra) 1263 { 1264 struct sk_buff *skb; 1265 1266 if (!endpoint->netdev) 1267 return; 1268 1269 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1270 if (skb) { 1271 /* Copy the data into the socket buffer and receive it */ 1272 skb_put(skb, len); 1273 memcpy(skb->data, data, len); 1274 skb->truesize += extra; 1275 } 1276 1277 ipa_modem_skb_rx(endpoint->netdev, skb); 1278 } 1279 1280 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1281 struct page *page, u32 len) 1282 { 1283 u32 buffer_size = endpoint->config.rx.buffer_size; 1284 struct sk_buff *skb; 1285 1286 /* Nothing to do if there's no netdev */ 1287 if (!endpoint->netdev) 1288 return false; 1289 1290 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); 1291 1292 skb = build_skb(page_address(page), buffer_size); 1293 if (skb) { 1294 /* Reserve the headroom and account for the data */ 1295 skb_reserve(skb, NET_SKB_PAD); 1296 skb_put(skb, len); 1297 } 1298 1299 /* Receive the buffer (or record drop if unable to build it) */ 1300 ipa_modem_skb_rx(endpoint->netdev, skb); 1301 1302 return skb != NULL; 1303 } 1304 1305 /* The format of a packet status element is the same for several status 1306 * types (opcodes). Other types aren't currently supported. 1307 */ 1308 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1309 { 1310 switch (opcode) { 1311 case IPA_STATUS_OPCODE_PACKET: 1312 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1313 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1314 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1315 return true; 1316 default: 1317 return false; 1318 } 1319 } 1320 1321 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1322 const struct ipa_status *status) 1323 { 1324 u32 endpoint_id; 1325 1326 if (!ipa_status_format_packet(status->opcode)) 1327 return true; 1328 if (!status->pkt_len) 1329 return true; 1330 endpoint_id = u8_get_bits(status->endp_dst_idx, 1331 IPA_STATUS_DST_IDX_FMASK); 1332 if (endpoint_id != endpoint->endpoint_id) 1333 return true; 1334 1335 return false; /* Don't skip this packet, process it */ 1336 } 1337 1338 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1339 const struct ipa_status *status) 1340 { 1341 struct ipa_endpoint *command_endpoint; 1342 struct ipa *ipa = endpoint->ipa; 1343 u32 endpoint_id; 1344 1345 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1346 return false; /* No valid tag */ 1347 1348 /* The status contains a valid tag. We know the packet was sent to 1349 * this endpoint (already verified by ipa_endpoint_status_skip()). 1350 * If the packet came from the AP->command TX endpoint we know 1351 * this packet was sent as part of the pipeline clear process. 1352 */ 1353 endpoint_id = u8_get_bits(status->endp_src_idx, 1354 IPA_STATUS_SRC_IDX_FMASK); 1355 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1356 if (endpoint_id == command_endpoint->endpoint_id) { 1357 complete(&ipa->completion); 1358 } else { 1359 dev_err(&ipa->pdev->dev, 1360 "unexpected tagged packet from endpoint %u\n", 1361 endpoint_id); 1362 } 1363 1364 return true; 1365 } 1366 1367 /* Return whether the status indicates the packet should be dropped */ 1368 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1369 const struct ipa_status *status) 1370 { 1371 u32 val; 1372 1373 /* If the status indicates a tagged transfer, we'll drop the packet */ 1374 if (ipa_endpoint_status_tag(endpoint, status)) 1375 return true; 1376 1377 /* Deaggregation exceptions we drop; all other types we consume */ 1378 if (status->exception) 1379 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1380 1381 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1382 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1383 1384 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1385 } 1386 1387 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1388 struct page *page, u32 total_len) 1389 { 1390 u32 buffer_size = endpoint->config.rx.buffer_size; 1391 void *data = page_address(page) + NET_SKB_PAD; 1392 u32 unused = buffer_size - total_len; 1393 u32 resid = total_len; 1394 1395 while (resid) { 1396 const struct ipa_status *status = data; 1397 u32 align; 1398 u32 len; 1399 1400 if (resid < sizeof(*status)) { 1401 dev_err(&endpoint->ipa->pdev->dev, 1402 "short message (%u bytes < %zu byte status)\n", 1403 resid, sizeof(*status)); 1404 break; 1405 } 1406 1407 /* Skip over status packets that lack packet data */ 1408 if (ipa_endpoint_status_skip(endpoint, status)) { 1409 data += sizeof(*status); 1410 resid -= sizeof(*status); 1411 continue; 1412 } 1413 1414 /* Compute the amount of buffer space consumed by the packet, 1415 * including the status element. If the hardware is configured 1416 * to pad packet data to an aligned boundary, account for that. 1417 * And if checksum offload is enabled a trailer containing 1418 * computed checksum information will be appended. 1419 */ 1420 align = endpoint->config.rx.pad_align ? : 1; 1421 len = le16_to_cpu(status->pkt_len); 1422 len = sizeof(*status) + ALIGN(len, align); 1423 if (endpoint->config.checksum) 1424 len += sizeof(struct rmnet_map_dl_csum_trailer); 1425 1426 if (!ipa_endpoint_status_drop(endpoint, status)) { 1427 void *data2; 1428 u32 extra; 1429 u32 len2; 1430 1431 /* Client receives only packet data (no status) */ 1432 data2 = data + sizeof(*status); 1433 len2 = le16_to_cpu(status->pkt_len); 1434 1435 /* Have the true size reflect the extra unused space in 1436 * the original receive buffer. Distribute the "cost" 1437 * proportionately across all aggregated packets in the 1438 * buffer. 1439 */ 1440 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1441 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1442 } 1443 1444 /* Consume status and the full packet it describes */ 1445 data += len; 1446 resid -= len; 1447 } 1448 } 1449 1450 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1451 struct gsi_trans *trans) 1452 { 1453 struct page *page; 1454 1455 if (endpoint->toward_ipa) 1456 return; 1457 1458 if (trans->cancelled) 1459 goto done; 1460 1461 /* Parse or build a socket buffer using the actual received length */ 1462 page = trans->data; 1463 if (endpoint->config.status_enable) 1464 ipa_endpoint_status_parse(endpoint, page, trans->len); 1465 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1466 trans->data = NULL; /* Pages have been consumed */ 1467 done: 1468 ipa_endpoint_replenish(endpoint); 1469 } 1470 1471 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1472 struct gsi_trans *trans) 1473 { 1474 if (endpoint->toward_ipa) { 1475 struct ipa *ipa = endpoint->ipa; 1476 1477 /* Nothing to do for command transactions */ 1478 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1479 struct sk_buff *skb = trans->data; 1480 1481 if (skb) 1482 dev_kfree_skb_any(skb); 1483 } 1484 } else { 1485 struct page *page = trans->data; 1486 1487 if (page) 1488 put_page(page); 1489 } 1490 } 1491 1492 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1493 { 1494 const struct ipa_reg *reg; 1495 u32 val; 1496 1497 reg = ipa_reg(ipa, ROUTE); 1498 /* ROUTE_DIS is 0 */ 1499 val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id); 1500 val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE); 1501 /* ROUTE_DEF_HDR_OFST is 0 */ 1502 val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id); 1503 val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR); 1504 1505 iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 1506 } 1507 1508 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1509 { 1510 ipa_endpoint_default_route_set(ipa, 0); 1511 } 1512 1513 /** 1514 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1515 * @endpoint: Endpoint to be reset 1516 * 1517 * If aggregation is active on an RX endpoint when a reset is performed 1518 * on its underlying GSI channel, a special sequence of actions must be 1519 * taken to ensure the IPA pipeline is properly cleared. 1520 * 1521 * Return: 0 if successful, or a negative error code 1522 */ 1523 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1524 { 1525 struct device *dev = &endpoint->ipa->pdev->dev; 1526 struct ipa *ipa = endpoint->ipa; 1527 struct gsi *gsi = &ipa->gsi; 1528 bool suspended = false; 1529 dma_addr_t addr; 1530 u32 retries; 1531 u32 len = 1; 1532 void *virt; 1533 int ret; 1534 1535 virt = kzalloc(len, GFP_KERNEL); 1536 if (!virt) 1537 return -ENOMEM; 1538 1539 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1540 if (dma_mapping_error(dev, addr)) { 1541 ret = -ENOMEM; 1542 goto out_kfree; 1543 } 1544 1545 /* Force close aggregation before issuing the reset */ 1546 ipa_endpoint_force_close(endpoint); 1547 1548 /* Reset and reconfigure the channel with the doorbell engine 1549 * disabled. Then poll until we know aggregation is no longer 1550 * active. We'll re-enable the doorbell (if appropriate) when 1551 * we reset again below. 1552 */ 1553 gsi_channel_reset(gsi, endpoint->channel_id, false); 1554 1555 /* Make sure the channel isn't suspended */ 1556 suspended = ipa_endpoint_program_suspend(endpoint, false); 1557 1558 /* Start channel and do a 1 byte read */ 1559 ret = gsi_channel_start(gsi, endpoint->channel_id); 1560 if (ret) 1561 goto out_suspend_again; 1562 1563 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1564 if (ret) 1565 goto err_endpoint_stop; 1566 1567 /* Wait for aggregation to be closed on the channel */ 1568 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1569 do { 1570 if (!ipa_endpoint_aggr_active(endpoint)) 1571 break; 1572 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1573 } while (retries--); 1574 1575 /* Check one last time */ 1576 if (ipa_endpoint_aggr_active(endpoint)) 1577 dev_err(dev, "endpoint %u still active during reset\n", 1578 endpoint->endpoint_id); 1579 1580 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1581 1582 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1583 if (ret) 1584 goto out_suspend_again; 1585 1586 /* Finally, reset and reconfigure the channel again (re-enabling 1587 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1588 * complete the channel reset sequence. Finish by suspending the 1589 * channel again (if necessary). 1590 */ 1591 gsi_channel_reset(gsi, endpoint->channel_id, true); 1592 1593 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1594 1595 goto out_suspend_again; 1596 1597 err_endpoint_stop: 1598 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1599 out_suspend_again: 1600 if (suspended) 1601 (void)ipa_endpoint_program_suspend(endpoint, true); 1602 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1603 out_kfree: 1604 kfree(virt); 1605 1606 return ret; 1607 } 1608 1609 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1610 { 1611 u32 channel_id = endpoint->channel_id; 1612 struct ipa *ipa = endpoint->ipa; 1613 bool special; 1614 int ret = 0; 1615 1616 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1617 * is active, we need to handle things specially to recover. 1618 * All other cases just need to reset the underlying GSI channel. 1619 */ 1620 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && 1621 endpoint->config.aggregation; 1622 if (special && ipa_endpoint_aggr_active(endpoint)) 1623 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1624 else 1625 gsi_channel_reset(&ipa->gsi, channel_id, true); 1626 1627 if (ret) 1628 dev_err(&ipa->pdev->dev, 1629 "error %d resetting channel %u for endpoint %u\n", 1630 ret, endpoint->channel_id, endpoint->endpoint_id); 1631 } 1632 1633 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1634 { 1635 if (endpoint->toward_ipa) { 1636 /* Newer versions of IPA use GSI channel flow control 1637 * instead of endpoint DELAY mode to prevent sending data. 1638 * Flow control is disabled for newly-allocated channels, 1639 * and we can assume flow control is not (ever) enabled 1640 * for AP TX channels. 1641 */ 1642 if (endpoint->ipa->version < IPA_VERSION_4_2) 1643 ipa_endpoint_program_delay(endpoint, false); 1644 } else { 1645 /* Ensure suspend mode is off on all AP RX endpoints */ 1646 (void)ipa_endpoint_program_suspend(endpoint, false); 1647 } 1648 ipa_endpoint_init_cfg(endpoint); 1649 ipa_endpoint_init_nat(endpoint); 1650 ipa_endpoint_init_hdr(endpoint); 1651 ipa_endpoint_init_hdr_ext(endpoint); 1652 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1653 ipa_endpoint_init_mode(endpoint); 1654 ipa_endpoint_init_aggr(endpoint); 1655 if (!endpoint->toward_ipa) { 1656 if (endpoint->config.rx.holb_drop) 1657 ipa_endpoint_init_hol_block_enable(endpoint, 0); 1658 else 1659 ipa_endpoint_init_hol_block_disable(endpoint); 1660 } 1661 ipa_endpoint_init_deaggr(endpoint); 1662 ipa_endpoint_init_rsrc_grp(endpoint); 1663 ipa_endpoint_init_seq(endpoint); 1664 ipa_endpoint_status(endpoint); 1665 } 1666 1667 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1668 { 1669 u32 endpoint_id = endpoint->endpoint_id; 1670 struct ipa *ipa = endpoint->ipa; 1671 struct gsi *gsi = &ipa->gsi; 1672 int ret; 1673 1674 ret = gsi_channel_start(gsi, endpoint->channel_id); 1675 if (ret) { 1676 dev_err(&ipa->pdev->dev, 1677 "error %d starting %cX channel %u for endpoint %u\n", 1678 ret, endpoint->toward_ipa ? 'T' : 'R', 1679 endpoint->channel_id, endpoint_id); 1680 return ret; 1681 } 1682 1683 if (!endpoint->toward_ipa) { 1684 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); 1685 ipa_endpoint_replenish_enable(endpoint); 1686 } 1687 1688 __set_bit(endpoint_id, ipa->enabled); 1689 1690 return 0; 1691 } 1692 1693 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1694 { 1695 u32 endpoint_id = endpoint->endpoint_id; 1696 struct ipa *ipa = endpoint->ipa; 1697 struct gsi *gsi = &ipa->gsi; 1698 int ret; 1699 1700 if (!test_bit(endpoint_id, ipa->enabled)) 1701 return; 1702 1703 __clear_bit(endpoint_id, endpoint->ipa->enabled); 1704 1705 if (!endpoint->toward_ipa) { 1706 ipa_endpoint_replenish_disable(endpoint); 1707 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); 1708 } 1709 1710 /* Note that if stop fails, the channel's state is not well-defined */ 1711 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1712 if (ret) 1713 dev_err(&ipa->pdev->dev, 1714 "error %d attempting to stop endpoint %u\n", ret, 1715 endpoint_id); 1716 } 1717 1718 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1719 { 1720 struct device *dev = &endpoint->ipa->pdev->dev; 1721 struct gsi *gsi = &endpoint->ipa->gsi; 1722 int ret; 1723 1724 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) 1725 return; 1726 1727 if (!endpoint->toward_ipa) { 1728 ipa_endpoint_replenish_disable(endpoint); 1729 (void)ipa_endpoint_program_suspend(endpoint, true); 1730 } 1731 1732 ret = gsi_channel_suspend(gsi, endpoint->channel_id); 1733 if (ret) 1734 dev_err(dev, "error %d suspending channel %u\n", ret, 1735 endpoint->channel_id); 1736 } 1737 1738 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1739 { 1740 struct device *dev = &endpoint->ipa->pdev->dev; 1741 struct gsi *gsi = &endpoint->ipa->gsi; 1742 int ret; 1743 1744 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) 1745 return; 1746 1747 if (!endpoint->toward_ipa) 1748 (void)ipa_endpoint_program_suspend(endpoint, false); 1749 1750 ret = gsi_channel_resume(gsi, endpoint->channel_id); 1751 if (ret) 1752 dev_err(dev, "error %d resuming channel %u\n", ret, 1753 endpoint->channel_id); 1754 else if (!endpoint->toward_ipa) 1755 ipa_endpoint_replenish_enable(endpoint); 1756 } 1757 1758 void ipa_endpoint_suspend(struct ipa *ipa) 1759 { 1760 if (!ipa->setup_complete) 1761 return; 1762 1763 if (ipa->modem_netdev) 1764 ipa_modem_suspend(ipa->modem_netdev); 1765 1766 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1767 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1768 } 1769 1770 void ipa_endpoint_resume(struct ipa *ipa) 1771 { 1772 if (!ipa->setup_complete) 1773 return; 1774 1775 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1776 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1777 1778 if (ipa->modem_netdev) 1779 ipa_modem_resume(ipa->modem_netdev); 1780 } 1781 1782 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1783 { 1784 struct gsi *gsi = &endpoint->ipa->gsi; 1785 u32 channel_id = endpoint->channel_id; 1786 1787 /* Only AP endpoints get set up */ 1788 if (endpoint->ee_id != GSI_EE_AP) 1789 return; 1790 1791 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; 1792 if (!endpoint->toward_ipa) { 1793 /* RX transactions require a single TRE, so the maximum 1794 * backlog is the same as the maximum outstanding TREs. 1795 */ 1796 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); 1797 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); 1798 INIT_DELAYED_WORK(&endpoint->replenish_work, 1799 ipa_endpoint_replenish_work); 1800 } 1801 1802 ipa_endpoint_program(endpoint); 1803 1804 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); 1805 } 1806 1807 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1808 { 1809 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); 1810 1811 if (!endpoint->toward_ipa) 1812 cancel_delayed_work_sync(&endpoint->replenish_work); 1813 1814 ipa_endpoint_reset(endpoint); 1815 } 1816 1817 void ipa_endpoint_setup(struct ipa *ipa) 1818 { 1819 u32 endpoint_id; 1820 1821 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) 1822 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1823 } 1824 1825 void ipa_endpoint_teardown(struct ipa *ipa) 1826 { 1827 u32 endpoint_id; 1828 1829 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) 1830 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1831 } 1832 1833 void ipa_endpoint_deconfig(struct ipa *ipa) 1834 { 1835 ipa->available_count = 0; 1836 bitmap_free(ipa->available); 1837 ipa->available = NULL; 1838 } 1839 1840 int ipa_endpoint_config(struct ipa *ipa) 1841 { 1842 struct device *dev = &ipa->pdev->dev; 1843 const struct ipa_reg *reg; 1844 u32 endpoint_id; 1845 u32 tx_count; 1846 u32 rx_count; 1847 u32 rx_base; 1848 u32 limit; 1849 u32 val; 1850 1851 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported. 1852 * Furthermore, the endpoints were not grouped such that TX 1853 * endpoint numbers started with 0 and RX endpoints had numbers 1854 * higher than all TX endpoints, so we can't do the simple 1855 * direction check used for newer hardware below. 1856 * 1857 * For hardware that doesn't support the FLAVOR_0 register, 1858 * just set the available mask to support any endpoint, and 1859 * assume the configuration is valid. 1860 */ 1861 if (ipa->version < IPA_VERSION_3_5) { 1862 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); 1863 if (!ipa->available) 1864 return -ENOMEM; 1865 ipa->available_count = IPA_ENDPOINT_MAX; 1866 1867 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); 1868 1869 return 0; 1870 } 1871 1872 /* Find out about the endpoints supplied by the hardware, and ensure 1873 * the highest one doesn't exceed the number supported by software. 1874 */ 1875 reg = ipa_reg(ipa, FLAVOR_0); 1876 val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); 1877 1878 /* Our RX is an IPA producer; our TX is an IPA consumer. */ 1879 tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val); 1880 rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val); 1881 rx_base = ipa_reg_decode(reg, PROD_LOWEST, val); 1882 1883 limit = rx_base + rx_count; 1884 if (limit > IPA_ENDPOINT_MAX) { 1885 dev_err(dev, "too many endpoints, %u > %u\n", 1886 limit, IPA_ENDPOINT_MAX); 1887 return -EINVAL; 1888 } 1889 1890 /* Allocate and initialize the available endpoint bitmap */ 1891 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); 1892 if (!ipa->available) 1893 return -ENOMEM; 1894 ipa->available_count = limit; 1895 1896 /* Mark all supported RX and TX endpoints as available */ 1897 bitmap_set(ipa->available, 0, tx_count); 1898 bitmap_set(ipa->available, rx_base, rx_count); 1899 1900 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { 1901 struct ipa_endpoint *endpoint; 1902 1903 if (endpoint_id >= limit) { 1904 dev_err(dev, "invalid endpoint id, %u > %u\n", 1905 endpoint_id, limit - 1); 1906 goto err_free_bitmap; 1907 } 1908 1909 if (!test_bit(endpoint_id, ipa->available)) { 1910 dev_err(dev, "unavailable endpoint id %u\n", 1911 endpoint_id); 1912 goto err_free_bitmap; 1913 } 1914 1915 /* Make sure it's pointing in the right direction */ 1916 endpoint = &ipa->endpoint[endpoint_id]; 1917 if (endpoint->toward_ipa) { 1918 if (endpoint_id < tx_count) 1919 continue; 1920 } else if (endpoint_id >= rx_base) { 1921 continue; 1922 } 1923 1924 dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id); 1925 goto err_free_bitmap; 1926 } 1927 1928 return 0; 1929 1930 err_free_bitmap: 1931 ipa_endpoint_deconfig(ipa); 1932 1933 return -EINVAL; 1934 } 1935 1936 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1937 const struct ipa_gsi_endpoint_data *data) 1938 { 1939 struct ipa_endpoint *endpoint; 1940 1941 endpoint = &ipa->endpoint[data->endpoint_id]; 1942 1943 if (data->ee_id == GSI_EE_AP) 1944 ipa->channel_map[data->channel_id] = endpoint; 1945 ipa->name_map[name] = endpoint; 1946 1947 endpoint->ipa = ipa; 1948 endpoint->ee_id = data->ee_id; 1949 endpoint->channel_id = data->channel_id; 1950 endpoint->endpoint_id = data->endpoint_id; 1951 endpoint->toward_ipa = data->toward_ipa; 1952 endpoint->config = data->endpoint.config; 1953 1954 __set_bit(endpoint->endpoint_id, ipa->defined); 1955 } 1956 1957 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1958 { 1959 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); 1960 1961 memset(endpoint, 0, sizeof(*endpoint)); 1962 } 1963 1964 void ipa_endpoint_exit(struct ipa *ipa) 1965 { 1966 u32 endpoint_id; 1967 1968 ipa->filtered = 0; 1969 1970 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) 1971 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1972 1973 bitmap_free(ipa->enabled); 1974 ipa->enabled = NULL; 1975 bitmap_free(ipa->set_up); 1976 ipa->set_up = NULL; 1977 bitmap_free(ipa->defined); 1978 ipa->defined = NULL; 1979 1980 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1981 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1982 } 1983 1984 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1985 int ipa_endpoint_init(struct ipa *ipa, u32 count, 1986 const struct ipa_gsi_endpoint_data *data) 1987 { 1988 enum ipa_endpoint_name name; 1989 u32 filtered; 1990 1991 BUILD_BUG_ON(!IPA_REPLENISH_BATCH); 1992 1993 /* Number of endpoints is one more than the maximum ID */ 1994 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; 1995 if (!ipa->endpoint_count) 1996 return -EINVAL; 1997 1998 /* Initialize endpoint state bitmaps */ 1999 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); 2000 if (!ipa->defined) 2001 return -ENOMEM; 2002 2003 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); 2004 if (!ipa->set_up) 2005 goto err_free_defined; 2006 2007 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); 2008 if (!ipa->enabled) 2009 goto err_free_set_up; 2010 2011 filtered = 0; 2012 for (name = 0; name < count; name++, data++) { 2013 if (ipa_gsi_endpoint_data_empty(data)) 2014 continue; /* Skip over empty slots */ 2015 2016 ipa_endpoint_init_one(ipa, name, data); 2017 2018 if (data->endpoint.filter_support) 2019 filtered |= BIT(data->endpoint_id); 2020 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) 2021 ipa->modem_tx_count++; 2022 } 2023 2024 /* Make sure the set of filtered endpoints is valid */ 2025 if (!ipa_filtered_valid(ipa, filtered)) { 2026 ipa_endpoint_exit(ipa); 2027 2028 return -EINVAL; 2029 } 2030 2031 ipa->filtered = filtered; 2032 2033 return 0; 2034 2035 err_free_set_up: 2036 bitmap_free(ipa->set_up); 2037 ipa->set_up = NULL; 2038 err_free_defined: 2039 bitmap_free(ipa->defined); 2040 ipa->defined = NULL; 2041 2042 return -ENOMEM; 2043 } 2044