1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 77 78 #ifdef IPA_VALIDATE 79 80 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 81 const struct ipa_gsi_endpoint_data *all_data, 82 const struct ipa_gsi_endpoint_data *data) 83 { 84 const struct ipa_gsi_endpoint_data *other_data; 85 struct device *dev = &ipa->pdev->dev; 86 enum ipa_endpoint_name other_name; 87 88 if (ipa_gsi_endpoint_data_empty(data)) 89 return true; 90 91 if (!data->toward_ipa) { 92 if (data->endpoint.filter_support) { 93 dev_err(dev, "filtering not supported for " 94 "RX endpoint %u\n", 95 data->endpoint_id); 96 return false; 97 } 98 99 return true; /* Nothing more to check for RX */ 100 } 101 102 if (data->endpoint.config.status_enable) { 103 other_name = data->endpoint.config.tx.status_endpoint; 104 if (other_name >= count) { 105 dev_err(dev, "status endpoint name %u out of range " 106 "for endpoint %u\n", 107 other_name, data->endpoint_id); 108 return false; 109 } 110 111 /* Status endpoint must be defined... */ 112 other_data = &all_data[other_name]; 113 if (ipa_gsi_endpoint_data_empty(other_data)) { 114 dev_err(dev, "DMA endpoint name %u undefined " 115 "for endpoint %u\n", 116 other_name, data->endpoint_id); 117 return false; 118 } 119 120 /* ...and has to be an RX endpoint... */ 121 if (other_data->toward_ipa) { 122 dev_err(dev, 123 "status endpoint for endpoint %u not RX\n", 124 data->endpoint_id); 125 return false; 126 } 127 128 /* ...and if it's to be an AP endpoint... */ 129 if (other_data->ee_id == GSI_EE_AP) { 130 /* ...make sure it has status enabled. */ 131 if (!other_data->endpoint.config.status_enable) { 132 dev_err(dev, 133 "status not enabled for endpoint %u\n", 134 other_data->endpoint_id); 135 return false; 136 } 137 } 138 } 139 140 if (data->endpoint.config.dma_mode) { 141 other_name = data->endpoint.config.dma_endpoint; 142 if (other_name >= count) { 143 dev_err(dev, "DMA endpoint name %u out of range " 144 "for endpoint %u\n", 145 other_name, data->endpoint_id); 146 return false; 147 } 148 149 other_data = &all_data[other_name]; 150 if (ipa_gsi_endpoint_data_empty(other_data)) { 151 dev_err(dev, "DMA endpoint name %u undefined " 152 "for endpoint %u\n", 153 other_name, data->endpoint_id); 154 return false; 155 } 156 } 157 158 return true; 159 } 160 161 static u32 aggr_byte_limit_max(enum ipa_version version) 162 { 163 if (version < IPA_VERSION_4_5) 164 return field_max(aggr_byte_limit_fmask(true)); 165 166 return field_max(aggr_byte_limit_fmask(false)); 167 } 168 169 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 170 const struct ipa_gsi_endpoint_data *data) 171 { 172 const struct ipa_gsi_endpoint_data *dp = data; 173 struct device *dev = &ipa->pdev->dev; 174 enum ipa_endpoint_name name; 175 u32 limit; 176 177 /* Not sure where this constraint come from... */ 178 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 179 180 if (count > IPA_ENDPOINT_COUNT) { 181 dev_err(dev, "too many endpoints specified (%u > %u)\n", 182 count, IPA_ENDPOINT_COUNT); 183 return false; 184 } 185 186 /* The aggregation byte limit defines the point at which an 187 * aggregation window will close. It is programmed into the 188 * IPA hardware as a number of KB. We don't use "hard byte 189 * limit" aggregation, which means that we need to supply 190 * enough space in a receive buffer to hold a complete MTU 191 * plus normal skb overhead *after* that aggregation byte 192 * limit has been crossed. 193 * 194 * This check ensures we don't define a receive buffer size 195 * that would exceed what we can represent in the field that 196 * is used to program its size. 197 */ 198 limit = aggr_byte_limit_max(ipa->version) * SZ_1K; 199 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 200 if (limit < IPA_RX_BUFFER_SIZE) { 201 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", 202 IPA_RX_BUFFER_SIZE, limit); 203 return false; 204 } 205 206 /* Make sure needed endpoints have defined data */ 207 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 208 dev_err(dev, "command TX endpoint not defined\n"); 209 return false; 210 } 211 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 212 dev_err(dev, "LAN RX endpoint not defined\n"); 213 return false; 214 } 215 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 216 dev_err(dev, "AP->modem TX endpoint not defined\n"); 217 return false; 218 } 219 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 220 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 221 return false; 222 } 223 224 for (name = 0; name < count; name++, dp++) 225 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 226 return false; 227 228 return true; 229 } 230 231 #else /* !IPA_VALIDATE */ 232 233 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 234 const struct ipa_gsi_endpoint_data *data) 235 { 236 return true; 237 } 238 239 #endif /* !IPA_VALIDATE */ 240 241 /* Allocate a transaction to use on a non-command endpoint */ 242 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 243 u32 tre_count) 244 { 245 struct gsi *gsi = &endpoint->ipa->gsi; 246 u32 channel_id = endpoint->channel_id; 247 enum dma_data_direction direction; 248 249 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 250 251 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 252 } 253 254 /* suspend_delay represents suspend for RX, delay for TX endpoints. 255 * Note that suspend is not supported starting with IPA v4.0. 256 */ 257 static bool 258 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 259 { 260 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 261 struct ipa *ipa = endpoint->ipa; 262 bool state; 263 u32 mask; 264 u32 val; 265 266 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 267 * correctly on IPA v4.2. 268 * 269 * if (endpoint->toward_ipa) 270 * assert(ipa->version != IPA_VERSION_4.2); 271 * else 272 * assert(ipa->version == IPA_VERSION_3_5_1); 273 */ 274 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 275 276 val = ioread32(ipa->reg_virt + offset); 277 /* Don't bother if it's already in the requested state */ 278 state = !!(val & mask); 279 if (suspend_delay != state) { 280 val ^= mask; 281 iowrite32(val, ipa->reg_virt + offset); 282 } 283 284 return state; 285 } 286 287 /* We currently don't care what the previous state was for delay mode */ 288 static void 289 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 290 { 291 /* assert(endpoint->toward_ipa); */ 292 293 /* Delay mode doesn't work properly for IPA v4.2 */ 294 if (endpoint->ipa->version != IPA_VERSION_4_2) 295 (void)ipa_endpoint_init_ctrl(endpoint, enable); 296 } 297 298 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 299 { 300 u32 mask = BIT(endpoint->endpoint_id); 301 struct ipa *ipa = endpoint->ipa; 302 u32 offset; 303 u32 val; 304 305 /* assert(mask & ipa->available); */ 306 offset = ipa_reg_state_aggr_active_offset(ipa->version); 307 val = ioread32(ipa->reg_virt + offset); 308 309 return !!(val & mask); 310 } 311 312 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 313 { 314 u32 mask = BIT(endpoint->endpoint_id); 315 struct ipa *ipa = endpoint->ipa; 316 317 /* assert(mask & ipa->available); */ 318 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 319 } 320 321 /** 322 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 323 * @endpoint: Endpoint on which to emulate a suspend 324 * 325 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 326 * with an open aggregation frame. This is to work around a hardware 327 * issue in IPA version 3.5.1 where the suspend interrupt will not be 328 * generated when it should be. 329 */ 330 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 331 { 332 struct ipa *ipa = endpoint->ipa; 333 334 if (!endpoint->data->aggregation) 335 return; 336 337 /* Nothing to do if the endpoint doesn't have aggregation open */ 338 if (!ipa_endpoint_aggr_active(endpoint)) 339 return; 340 341 /* Force close aggregation */ 342 ipa_endpoint_force_close(endpoint); 343 344 ipa_interrupt_simulate_suspend(ipa->interrupt); 345 } 346 347 /* Returns previous suspend state (true means suspend was enabled) */ 348 static bool 349 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 350 { 351 bool suspended; 352 353 if (endpoint->ipa->version != IPA_VERSION_3_5_1) 354 return enable; /* For IPA v4.0+, no change made */ 355 356 /* assert(!endpoint->toward_ipa); */ 357 358 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 359 360 /* A client suspended with an open aggregation frame will not 361 * generate a SUSPEND IPA interrupt. If enabling suspend, have 362 * ipa_endpoint_suspend_aggr() handle this. 363 */ 364 if (enable && !suspended) 365 ipa_endpoint_suspend_aggr(endpoint); 366 367 return suspended; 368 } 369 370 /* Enable or disable delay or suspend mode on all modem endpoints */ 371 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 372 { 373 u32 endpoint_id; 374 375 /* DELAY mode doesn't work correctly on IPA v4.2 */ 376 if (ipa->version == IPA_VERSION_4_2) 377 return; 378 379 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 380 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 381 382 if (endpoint->ee_id != GSI_EE_MODEM) 383 continue; 384 385 /* Set TX delay mode or RX suspend mode */ 386 if (endpoint->toward_ipa) 387 ipa_endpoint_program_delay(endpoint, enable); 388 else 389 (void)ipa_endpoint_program_suspend(endpoint, enable); 390 } 391 } 392 393 /* Reset all modem endpoints to use the default exception endpoint */ 394 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 395 { 396 u32 initialized = ipa->initialized; 397 struct gsi_trans *trans; 398 u32 count; 399 400 /* We need one command per modem TX endpoint. We can get an upper 401 * bound on that by assuming all initialized endpoints are modem->IPA. 402 * That won't happen, and we could be more precise, but this is fine 403 * for now. We need to end the transaction with a "tag process." 404 */ 405 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); 406 trans = ipa_cmd_trans_alloc(ipa, count); 407 if (!trans) { 408 dev_err(&ipa->pdev->dev, 409 "no transaction to reset modem exception endpoints\n"); 410 return -EBUSY; 411 } 412 413 while (initialized) { 414 u32 endpoint_id = __ffs(initialized); 415 struct ipa_endpoint *endpoint; 416 u32 offset; 417 418 initialized ^= BIT(endpoint_id); 419 420 /* We only reset modem TX endpoints */ 421 endpoint = &ipa->endpoint[endpoint_id]; 422 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 423 continue; 424 425 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 426 427 /* Value written is 0, and all bits are updated. That 428 * means status is disabled on the endpoint, and as a 429 * result all other fields in the register are ignored. 430 */ 431 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 432 } 433 434 ipa_cmd_pipeline_clear_add(trans); 435 436 /* XXX This should have a 1 second timeout */ 437 gsi_trans_commit_wait(trans); 438 439 ipa_cmd_pipeline_clear_wait(ipa); 440 441 return 0; 442 } 443 444 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 445 { 446 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 447 u32 val = 0; 448 449 /* FRAG_OFFLOAD_EN is 0 */ 450 if (endpoint->data->checksum) { 451 if (endpoint->toward_ipa) { 452 u32 checksum_offset; 453 454 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 455 CS_OFFLOAD_EN_FMASK); 456 /* Checksum header offset is in 4-byte units */ 457 checksum_offset = sizeof(struct rmnet_map_header); 458 checksum_offset /= sizeof(u32); 459 val |= u32_encode_bits(checksum_offset, 460 CS_METADATA_HDR_OFFSET_FMASK); 461 } else { 462 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 463 CS_OFFLOAD_EN_FMASK); 464 } 465 } else { 466 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 467 CS_OFFLOAD_EN_FMASK); 468 } 469 /* CS_GEN_QMB_MASTER_SEL is 0 */ 470 471 iowrite32(val, endpoint->ipa->reg_virt + offset); 472 } 473 474 /** 475 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 476 * @endpoint: Endpoint pointer 477 * 478 * We program QMAP endpoints so each packet received is preceded by a QMAP 479 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 480 * packet size field, and we have the IPA hardware populate both for each 481 * received packet. The header is configured (in the HDR_EXT register) 482 * to use big endian format. 483 * 484 * The packet size is written into the QMAP header's pkt_len field. That 485 * location is defined here using the HDR_OFST_PKT_SIZE field. 486 * 487 * The mux_id comes from a 4-byte metadata value supplied with each packet 488 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 489 * value that we want, in its low-order byte. A bitmask defined in the 490 * endpoint's METADATA_MASK register defines which byte within the modem 491 * metadata contains the mux_id. And the OFST_METADATA field programmed 492 * here indicates where the extracted byte should be placed within the QMAP 493 * header. 494 */ 495 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 496 { 497 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 498 struct ipa *ipa = endpoint->ipa; 499 u32 val = 0; 500 501 if (endpoint->data->qmap) { 502 size_t header_size = sizeof(struct rmnet_map_header); 503 enum ipa_version version = ipa->version; 504 505 /* We might supply a checksum header after the QMAP header */ 506 if (endpoint->toward_ipa && endpoint->data->checksum) 507 header_size += sizeof(struct rmnet_map_ul_csum_header); 508 val |= ipa_header_size_encoded(version, header_size); 509 510 /* Define how to fill fields in a received QMAP header */ 511 if (!endpoint->toward_ipa) { 512 u32 offset; /* Field offset within header */ 513 514 /* Where IPA will write the metadata value */ 515 offset = offsetof(struct rmnet_map_header, mux_id); 516 val |= ipa_metadata_offset_encoded(version, offset); 517 518 /* Where IPA will write the length */ 519 offset = offsetof(struct rmnet_map_header, pkt_len); 520 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 521 if (version == IPA_VERSION_4_5) 522 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 523 524 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 525 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); 526 } 527 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 528 val |= HDR_OFST_METADATA_VALID_FMASK; 529 530 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 531 /* HDR_A5_MUX is 0 */ 532 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 533 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 534 } 535 536 iowrite32(val, ipa->reg_virt + offset); 537 } 538 539 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 540 { 541 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 542 u32 pad_align = endpoint->data->rx.pad_align; 543 struct ipa *ipa = endpoint->ipa; 544 u32 val = 0; 545 546 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 547 548 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 549 * driver assumes this field is meaningful in packets it receives, 550 * and assumes the header's payload length includes that padding. 551 * The RMNet driver does *not* pad packets it sends, however, so 552 * the pad field (although 0) should be ignored. 553 */ 554 if (endpoint->data->qmap && !endpoint->toward_ipa) { 555 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 556 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 557 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 558 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 559 } 560 561 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 562 if (!endpoint->toward_ipa) 563 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 564 565 /* IPA v4.5 adds some most-significant bits to a few fields, 566 * two of which are defined in the HDR (not HDR_EXT) register. 567 */ 568 if (ipa->version == IPA_VERSION_4_5) { 569 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 570 if (endpoint->data->qmap && !endpoint->toward_ipa) { 571 u32 offset; 572 573 offset = offsetof(struct rmnet_map_header, pkt_len); 574 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 575 val |= u32_encode_bits(offset, 576 HDR_OFST_PKT_SIZE_MSB_FMASK); 577 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 578 } 579 } 580 iowrite32(val, ipa->reg_virt + offset); 581 } 582 583 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 584 { 585 u32 endpoint_id = endpoint->endpoint_id; 586 u32 val = 0; 587 u32 offset; 588 589 if (endpoint->toward_ipa) 590 return; /* Register not valid for TX endpoints */ 591 592 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 593 594 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 595 if (endpoint->data->qmap) 596 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 597 598 iowrite32(val, endpoint->ipa->reg_virt + offset); 599 } 600 601 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 602 { 603 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 604 u32 val; 605 606 if (!endpoint->toward_ipa) 607 return; /* Register not valid for RX endpoints */ 608 609 if (endpoint->data->dma_mode) { 610 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 611 u32 dma_endpoint_id; 612 613 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 614 615 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 616 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 617 } else { 618 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 619 } 620 /* All other bits unspecified (and 0) */ 621 622 iowrite32(val, endpoint->ipa->reg_virt + offset); 623 } 624 625 /* Compute the aggregation size value to use for a given buffer size */ 626 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 627 { 628 /* We don't use "hard byte limit" aggregation, so we define the 629 * aggregation limit such that our buffer has enough space *after* 630 * that limit to receive a full MTU of data, plus overhead. 631 */ 632 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 633 634 return rx_buffer_size / SZ_1K; 635 } 636 637 /* Encoded values for AGGR endpoint register fields */ 638 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 639 { 640 if (version < IPA_VERSION_4_5) 641 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 642 643 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 644 } 645 646 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 647 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 648 { 649 u32 gran_sel; 650 u32 fmask; 651 u32 val; 652 653 if (version < IPA_VERSION_4_5) { 654 /* We set aggregation granularity in ipa_hardware_config() */ 655 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 656 657 return u32_encode_bits(limit, aggr_time_limit_fmask(true)); 658 } 659 660 /* IPA v4.5 expresses the time limit using Qtime. The AP has 661 * pulse generators 0 and 1 available, which were configured 662 * in ipa_qtime_config() to have granularity 100 usec and 663 * 1 msec, respectively. Use pulse generator 0 if possible, 664 * otherwise fall back to pulse generator 1. 665 */ 666 fmask = aggr_time_limit_fmask(false); 667 val = DIV_ROUND_CLOSEST(limit, 100); 668 if (val > field_max(fmask)) { 669 /* Have to use pulse generator 1 (millisecond granularity) */ 670 gran_sel = AGGR_GRAN_SEL_FMASK; 671 val = DIV_ROUND_CLOSEST(limit, 1000); 672 } else { 673 /* We can use pulse generator 0 (100 usec granularity) */ 674 gran_sel = 0; 675 } 676 677 return gran_sel | u32_encode_bits(val, fmask); 678 } 679 680 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 681 { 682 u32 val = enabled ? 1 : 0; 683 684 if (version < IPA_VERSION_4_5) 685 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 686 687 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 688 } 689 690 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 691 { 692 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 693 enum ipa_version version = endpoint->ipa->version; 694 u32 val = 0; 695 696 if (endpoint->data->aggregation) { 697 if (!endpoint->toward_ipa) { 698 bool close_eof; 699 u32 limit; 700 701 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 702 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 703 704 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 705 val |= aggr_byte_limit_encoded(version, limit); 706 707 limit = IPA_AGGR_TIME_LIMIT; 708 val |= aggr_time_limit_encoded(version, limit); 709 710 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 711 712 close_eof = endpoint->data->rx.aggr_close_eof; 713 val |= aggr_sw_eof_active_encoded(version, close_eof); 714 715 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 716 } else { 717 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 718 AGGR_EN_FMASK); 719 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 720 /* other fields ignored */ 721 } 722 /* AGGR_FORCE_CLOSE is 0 */ 723 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 724 } else { 725 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 726 /* other fields ignored */ 727 } 728 729 iowrite32(val, endpoint->ipa->reg_virt + offset); 730 } 731 732 /* Return the Qtime-based head-of-line blocking timer value that 733 * represents the given number of microseconds. The result 734 * includes both the timer value and the selected timer granularity. 735 */ 736 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 737 { 738 u32 gran_sel; 739 u32 val; 740 741 /* IPA v4.5 expresses time limits using Qtime. The AP has 742 * pulse generators 0 and 1 available, which were configured 743 * in ipa_qtime_config() to have granularity 100 usec and 744 * 1 msec, respectively. Use pulse generator 0 if possible, 745 * otherwise fall back to pulse generator 1. 746 */ 747 val = DIV_ROUND_CLOSEST(microseconds, 100); 748 if (val > field_max(TIME_LIMIT_FMASK)) { 749 /* Have to use pulse generator 1 (millisecond granularity) */ 750 gran_sel = GRAN_SEL_FMASK; 751 val = DIV_ROUND_CLOSEST(microseconds, 1000); 752 } else { 753 /* We can use pulse generator 0 (100 usec granularity) */ 754 gran_sel = 0; 755 } 756 757 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 758 } 759 760 /* The head-of-line blocking timer is defined as a tick count. For 761 * IPA version 4.5 the tick count is based on the Qtimer, which is 762 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 763 * each tick represents 128 cycles of the IPA core clock. 764 * 765 * Return the encoded value that should be written to that register 766 * that represents the timeout period provided. For IPA v4.2 this 767 * encodes a base and scale value, while for earlier versions the 768 * value is a simple tick count. 769 */ 770 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 771 { 772 u32 width; 773 u32 scale; 774 u64 ticks; 775 u64 rate; 776 u32 high; 777 u32 val; 778 779 if (!microseconds) 780 return 0; /* Nothing to compute if timer period is 0 */ 781 782 if (ipa->version == IPA_VERSION_4_5) 783 return hol_block_timer_qtime_val(ipa, microseconds); 784 785 /* Use 64 bit arithmetic to avoid overflow... */ 786 rate = ipa_clock_rate(ipa); 787 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 788 /* ...but we still need to fit into a 32-bit register */ 789 WARN_ON(ticks > U32_MAX); 790 791 /* IPA v3.5.1 through v4.1 just record the tick count */ 792 if (ipa->version < IPA_VERSION_4_2) 793 return (u32)ticks; 794 795 /* For IPA v4.2, the tick count is represented by base and 796 * scale fields within the 32-bit timer register, where: 797 * ticks = base << scale; 798 * The best precision is achieved when the base value is as 799 * large as possible. Find the highest set bit in the tick 800 * count, and extract the number of bits in the base field 801 * such that that high bit is included. 802 */ 803 high = fls(ticks); /* 1..32 */ 804 width = HWEIGHT32(BASE_VALUE_FMASK); 805 scale = high > width ? high - width : 0; 806 if (scale) { 807 /* If we're scaling, round up to get a closer result */ 808 ticks += 1 << (scale - 1); 809 /* High bit was set, so rounding might have affected it */ 810 if (fls(ticks) != high) 811 scale++; 812 } 813 814 val = u32_encode_bits(scale, SCALE_FMASK); 815 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 816 817 return val; 818 } 819 820 /* If microseconds is 0, timeout is immediate */ 821 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 822 u32 microseconds) 823 { 824 u32 endpoint_id = endpoint->endpoint_id; 825 struct ipa *ipa = endpoint->ipa; 826 u32 offset; 827 u32 val; 828 829 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 830 val = hol_block_timer_val(ipa, microseconds); 831 iowrite32(val, ipa->reg_virt + offset); 832 } 833 834 static void 835 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 836 { 837 u32 endpoint_id = endpoint->endpoint_id; 838 u32 offset; 839 u32 val; 840 841 val = enable ? HOL_BLOCK_EN_FMASK : 0; 842 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 843 iowrite32(val, endpoint->ipa->reg_virt + offset); 844 } 845 846 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 847 { 848 u32 i; 849 850 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 851 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 852 853 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 854 continue; 855 856 ipa_endpoint_init_hol_block_timer(endpoint, 0); 857 ipa_endpoint_init_hol_block_enable(endpoint, true); 858 } 859 } 860 861 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 862 { 863 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 864 u32 val = 0; 865 866 if (!endpoint->toward_ipa) 867 return; /* Register not valid for RX endpoints */ 868 869 /* DEAGGR_HDR_LEN is 0 */ 870 /* PACKET_OFFSET_VALID is 0 */ 871 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 872 /* MAX_PACKET_LEN is 0 (not enforced) */ 873 874 iowrite32(val, endpoint->ipa->reg_virt + offset); 875 } 876 877 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 878 { 879 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 880 struct ipa *ipa = endpoint->ipa; 881 u32 val; 882 883 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 884 iowrite32(val, ipa->reg_virt + offset); 885 } 886 887 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 888 { 889 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 890 u32 seq_type = endpoint->seq_type; 891 u32 val = 0; 892 893 if (!endpoint->toward_ipa) 894 return; /* Register not valid for RX endpoints */ 895 896 /* Sequencer type is made up of four nibbles */ 897 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 898 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 899 /* The second two apply to replicated packets */ 900 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); 901 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); 902 903 iowrite32(val, endpoint->ipa->reg_virt + offset); 904 } 905 906 /** 907 * ipa_endpoint_skb_tx() - Transmit a socket buffer 908 * @endpoint: Endpoint pointer 909 * @skb: Socket buffer to send 910 * 911 * Returns: 0 if successful, or a negative error code 912 */ 913 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 914 { 915 struct gsi_trans *trans; 916 u32 nr_frags; 917 int ret; 918 919 /* Make sure source endpoint's TLV FIFO has enough entries to 920 * hold the linear portion of the skb and all its fragments. 921 * If not, see if we can linearize it before giving up. 922 */ 923 nr_frags = skb_shinfo(skb)->nr_frags; 924 if (1 + nr_frags > endpoint->trans_tre_max) { 925 if (skb_linearize(skb)) 926 return -E2BIG; 927 nr_frags = 0; 928 } 929 930 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 931 if (!trans) 932 return -EBUSY; 933 934 ret = gsi_trans_skb_add(trans, skb); 935 if (ret) 936 goto err_trans_free; 937 trans->data = skb; /* transaction owns skb now */ 938 939 gsi_trans_commit(trans, !netdev_xmit_more()); 940 941 return 0; 942 943 err_trans_free: 944 gsi_trans_free(trans); 945 946 return -ENOMEM; 947 } 948 949 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 950 { 951 u32 endpoint_id = endpoint->endpoint_id; 952 struct ipa *ipa = endpoint->ipa; 953 u32 val = 0; 954 u32 offset; 955 956 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 957 958 if (endpoint->data->status_enable) { 959 val |= STATUS_EN_FMASK; 960 if (endpoint->toward_ipa) { 961 enum ipa_endpoint_name name; 962 u32 status_endpoint_id; 963 964 name = endpoint->data->tx.status_endpoint; 965 status_endpoint_id = ipa->name_map[name]->endpoint_id; 966 967 val |= u32_encode_bits(status_endpoint_id, 968 STATUS_ENDP_FMASK); 969 } 970 /* STATUS_LOCATION is 0, meaning status element precedes 971 * packet (not present for IPA v4.5) 972 */ 973 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 974 } 975 976 iowrite32(val, ipa->reg_virt + offset); 977 } 978 979 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 980 { 981 struct gsi_trans *trans; 982 bool doorbell = false; 983 struct page *page; 984 u32 offset; 985 u32 len; 986 int ret; 987 988 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 989 if (!page) 990 return -ENOMEM; 991 992 trans = ipa_endpoint_trans_alloc(endpoint, 1); 993 if (!trans) 994 goto err_free_pages; 995 996 /* Offset the buffer to make space for skb headroom */ 997 offset = NET_SKB_PAD; 998 len = IPA_RX_BUFFER_SIZE - offset; 999 1000 ret = gsi_trans_page_add(trans, page, len, offset); 1001 if (ret) 1002 goto err_trans_free; 1003 trans->data = page; /* transaction owns page now */ 1004 1005 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 1006 doorbell = true; 1007 endpoint->replenish_ready = 0; 1008 } 1009 1010 gsi_trans_commit(trans, doorbell); 1011 1012 return 0; 1013 1014 err_trans_free: 1015 gsi_trans_free(trans); 1016 err_free_pages: 1017 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1018 1019 return -ENOMEM; 1020 } 1021 1022 /** 1023 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 1024 * @endpoint: Endpoint to be replenished 1025 * @count: Number of buffers to send to hardware 1026 * 1027 * Allocate RX packet wrapper structures with maximal socket buffers 1028 * for an endpoint. These are supplied to the hardware, which fills 1029 * them with incoming data. 1030 */ 1031 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 1032 { 1033 struct gsi *gsi; 1034 u32 backlog; 1035 1036 if (!endpoint->replenish_enabled) { 1037 if (count) 1038 atomic_add(count, &endpoint->replenish_saved); 1039 return; 1040 } 1041 1042 1043 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 1044 if (ipa_endpoint_replenish_one(endpoint)) 1045 goto try_again_later; 1046 if (count) 1047 atomic_add(count, &endpoint->replenish_backlog); 1048 1049 return; 1050 1051 try_again_later: 1052 /* The last one didn't succeed, so fix the backlog */ 1053 backlog = atomic_inc_return(&endpoint->replenish_backlog); 1054 1055 if (count) 1056 atomic_add(count, &endpoint->replenish_backlog); 1057 1058 /* Whenever a receive buffer transaction completes we'll try to 1059 * replenish again. It's unlikely, but if we fail to supply even 1060 * one buffer, nothing will trigger another replenish attempt. 1061 * Receive buffer transactions use one TRE, so schedule work to 1062 * try replenishing again if our backlog is *all* available TREs. 1063 */ 1064 gsi = &endpoint->ipa->gsi; 1065 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 1066 schedule_delayed_work(&endpoint->replenish_work, 1067 msecs_to_jiffies(1)); 1068 } 1069 1070 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1071 { 1072 struct gsi *gsi = &endpoint->ipa->gsi; 1073 u32 max_backlog; 1074 u32 saved; 1075 1076 endpoint->replenish_enabled = true; 1077 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 1078 atomic_add(saved, &endpoint->replenish_backlog); 1079 1080 /* Start replenishing if hardware currently has no buffers */ 1081 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 1082 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 1083 ipa_endpoint_replenish(endpoint, 0); 1084 } 1085 1086 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1087 { 1088 u32 backlog; 1089 1090 endpoint->replenish_enabled = false; 1091 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 1092 atomic_add(backlog, &endpoint->replenish_saved); 1093 } 1094 1095 static void ipa_endpoint_replenish_work(struct work_struct *work) 1096 { 1097 struct delayed_work *dwork = to_delayed_work(work); 1098 struct ipa_endpoint *endpoint; 1099 1100 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1101 1102 ipa_endpoint_replenish(endpoint, 0); 1103 } 1104 1105 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1106 void *data, u32 len, u32 extra) 1107 { 1108 struct sk_buff *skb; 1109 1110 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1111 if (skb) { 1112 skb_put(skb, len); 1113 memcpy(skb->data, data, len); 1114 skb->truesize += extra; 1115 } 1116 1117 /* Now receive it, or drop it if there's no netdev */ 1118 if (endpoint->netdev) 1119 ipa_modem_skb_rx(endpoint->netdev, skb); 1120 else if (skb) 1121 dev_kfree_skb_any(skb); 1122 } 1123 1124 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1125 struct page *page, u32 len) 1126 { 1127 struct sk_buff *skb; 1128 1129 /* Nothing to do if there's no netdev */ 1130 if (!endpoint->netdev) 1131 return false; 1132 1133 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1134 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1135 if (skb) { 1136 /* Reserve the headroom and account for the data */ 1137 skb_reserve(skb, NET_SKB_PAD); 1138 skb_put(skb, len); 1139 } 1140 1141 /* Receive the buffer (or record drop if unable to build it) */ 1142 ipa_modem_skb_rx(endpoint->netdev, skb); 1143 1144 return skb != NULL; 1145 } 1146 1147 /* The format of a packet status element is the same for several status 1148 * types (opcodes). Other types aren't currently supported. 1149 */ 1150 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1151 { 1152 switch (opcode) { 1153 case IPA_STATUS_OPCODE_PACKET: 1154 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1155 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1156 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1157 return true; 1158 default: 1159 return false; 1160 } 1161 } 1162 1163 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1164 const struct ipa_status *status) 1165 { 1166 u32 endpoint_id; 1167 1168 if (!ipa_status_format_packet(status->opcode)) 1169 return true; 1170 if (!status->pkt_len) 1171 return true; 1172 endpoint_id = u32_get_bits(status->endp_dst_idx, 1173 IPA_STATUS_DST_IDX_FMASK); 1174 if (endpoint_id != endpoint->endpoint_id) 1175 return true; 1176 1177 return false; /* Don't skip this packet, process it */ 1178 } 1179 1180 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1181 const struct ipa_status *status) 1182 { 1183 struct ipa_endpoint *command_endpoint; 1184 struct ipa *ipa = endpoint->ipa; 1185 u32 endpoint_id; 1186 1187 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1188 return false; /* No valid tag */ 1189 1190 /* The status contains a valid tag. We know the packet was sent to 1191 * this endpoint (already verified by ipa_endpoint_status_skip()). 1192 * If the packet came from the AP->command TX endpoint we know 1193 * this packet was sent as part of the pipeline clear process. 1194 */ 1195 endpoint_id = u8_get_bits(status->endp_src_idx, 1196 IPA_STATUS_SRC_IDX_FMASK); 1197 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1198 if (endpoint_id == command_endpoint->endpoint_id) { 1199 complete(&ipa->completion); 1200 } else { 1201 dev_err(&ipa->pdev->dev, 1202 "unexpected tagged packet from endpoint %u\n", 1203 endpoint_id); 1204 } 1205 1206 return true; 1207 } 1208 1209 /* Return whether the status indicates the packet should be dropped */ 1210 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1211 const struct ipa_status *status) 1212 { 1213 u32 val; 1214 1215 /* If the status indicates a tagged transfer, we'll drop the packet */ 1216 if (ipa_endpoint_status_tag(endpoint, status)) 1217 return true; 1218 1219 /* Deaggregation exceptions we drop; all other types we consume */ 1220 if (status->exception) 1221 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1222 1223 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1224 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1225 1226 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1227 } 1228 1229 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1230 struct page *page, u32 total_len) 1231 { 1232 void *data = page_address(page) + NET_SKB_PAD; 1233 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1234 u32 resid = total_len; 1235 1236 while (resid) { 1237 const struct ipa_status *status = data; 1238 u32 align; 1239 u32 len; 1240 1241 if (resid < sizeof(*status)) { 1242 dev_err(&endpoint->ipa->pdev->dev, 1243 "short message (%u bytes < %zu byte status)\n", 1244 resid, sizeof(*status)); 1245 break; 1246 } 1247 1248 /* Skip over status packets that lack packet data */ 1249 if (ipa_endpoint_status_skip(endpoint, status)) { 1250 data += sizeof(*status); 1251 resid -= sizeof(*status); 1252 continue; 1253 } 1254 1255 /* Compute the amount of buffer space consumed by the packet, 1256 * including the status element. If the hardware is configured 1257 * to pad packet data to an aligned boundary, account for that. 1258 * And if checksum offload is enabled a trailer containing 1259 * computed checksum information will be appended. 1260 */ 1261 align = endpoint->data->rx.pad_align ? : 1; 1262 len = le16_to_cpu(status->pkt_len); 1263 len = sizeof(*status) + ALIGN(len, align); 1264 if (endpoint->data->checksum) 1265 len += sizeof(struct rmnet_map_dl_csum_trailer); 1266 1267 if (!ipa_endpoint_status_drop(endpoint, status)) { 1268 void *data2; 1269 u32 extra; 1270 u32 len2; 1271 1272 /* Client receives only packet data (no status) */ 1273 data2 = data + sizeof(*status); 1274 len2 = le16_to_cpu(status->pkt_len); 1275 1276 /* Have the true size reflect the extra unused space in 1277 * the original receive buffer. Distribute the "cost" 1278 * proportionately across all aggregated packets in the 1279 * buffer. 1280 */ 1281 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1282 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1283 } 1284 1285 /* Consume status and the full packet it describes */ 1286 data += len; 1287 resid -= len; 1288 } 1289 } 1290 1291 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1292 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1293 struct gsi_trans *trans) 1294 { 1295 } 1296 1297 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1298 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1299 struct gsi_trans *trans) 1300 { 1301 struct page *page; 1302 1303 ipa_endpoint_replenish(endpoint, 1); 1304 1305 if (trans->cancelled) 1306 return; 1307 1308 /* Parse or build a socket buffer using the actual received length */ 1309 page = trans->data; 1310 if (endpoint->data->status_enable) 1311 ipa_endpoint_status_parse(endpoint, page, trans->len); 1312 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1313 trans->data = NULL; /* Pages have been consumed */ 1314 } 1315 1316 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1317 struct gsi_trans *trans) 1318 { 1319 if (endpoint->toward_ipa) 1320 ipa_endpoint_tx_complete(endpoint, trans); 1321 else 1322 ipa_endpoint_rx_complete(endpoint, trans); 1323 } 1324 1325 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1326 struct gsi_trans *trans) 1327 { 1328 if (endpoint->toward_ipa) { 1329 struct ipa *ipa = endpoint->ipa; 1330 1331 /* Nothing to do for command transactions */ 1332 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1333 struct sk_buff *skb = trans->data; 1334 1335 if (skb) 1336 dev_kfree_skb_any(skb); 1337 } 1338 } else { 1339 struct page *page = trans->data; 1340 1341 if (page) 1342 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1343 } 1344 } 1345 1346 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1347 { 1348 u32 val; 1349 1350 /* ROUTE_DIS is 0 */ 1351 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1352 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1353 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1354 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1355 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1356 1357 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1358 } 1359 1360 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1361 { 1362 ipa_endpoint_default_route_set(ipa, 0); 1363 } 1364 1365 /** 1366 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1367 * @endpoint: Endpoint to be reset 1368 * 1369 * If aggregation is active on an RX endpoint when a reset is performed 1370 * on its underlying GSI channel, a special sequence of actions must be 1371 * taken to ensure the IPA pipeline is properly cleared. 1372 * 1373 * Return: 0 if successful, or a negative error code 1374 */ 1375 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1376 { 1377 struct device *dev = &endpoint->ipa->pdev->dev; 1378 struct ipa *ipa = endpoint->ipa; 1379 struct gsi *gsi = &ipa->gsi; 1380 bool suspended = false; 1381 dma_addr_t addr; 1382 u32 retries; 1383 u32 len = 1; 1384 void *virt; 1385 int ret; 1386 1387 virt = kzalloc(len, GFP_KERNEL); 1388 if (!virt) 1389 return -ENOMEM; 1390 1391 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1392 if (dma_mapping_error(dev, addr)) { 1393 ret = -ENOMEM; 1394 goto out_kfree; 1395 } 1396 1397 /* Force close aggregation before issuing the reset */ 1398 ipa_endpoint_force_close(endpoint); 1399 1400 /* Reset and reconfigure the channel with the doorbell engine 1401 * disabled. Then poll until we know aggregation is no longer 1402 * active. We'll re-enable the doorbell (if appropriate) when 1403 * we reset again below. 1404 */ 1405 gsi_channel_reset(gsi, endpoint->channel_id, false); 1406 1407 /* Make sure the channel isn't suspended */ 1408 suspended = ipa_endpoint_program_suspend(endpoint, false); 1409 1410 /* Start channel and do a 1 byte read */ 1411 ret = gsi_channel_start(gsi, endpoint->channel_id); 1412 if (ret) 1413 goto out_suspend_again; 1414 1415 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1416 if (ret) 1417 goto err_endpoint_stop; 1418 1419 /* Wait for aggregation to be closed on the channel */ 1420 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1421 do { 1422 if (!ipa_endpoint_aggr_active(endpoint)) 1423 break; 1424 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1425 } while (retries--); 1426 1427 /* Check one last time */ 1428 if (ipa_endpoint_aggr_active(endpoint)) 1429 dev_err(dev, "endpoint %u still active during reset\n", 1430 endpoint->endpoint_id); 1431 1432 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1433 1434 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1435 if (ret) 1436 goto out_suspend_again; 1437 1438 /* Finally, reset and reconfigure the channel again (re-enabling the 1439 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1440 * complete the channel reset sequence. Finish by suspending the 1441 * channel again (if necessary). 1442 */ 1443 gsi_channel_reset(gsi, endpoint->channel_id, true); 1444 1445 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1446 1447 goto out_suspend_again; 1448 1449 err_endpoint_stop: 1450 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1451 out_suspend_again: 1452 if (suspended) 1453 (void)ipa_endpoint_program_suspend(endpoint, true); 1454 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1455 out_kfree: 1456 kfree(virt); 1457 1458 return ret; 1459 } 1460 1461 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1462 { 1463 u32 channel_id = endpoint->channel_id; 1464 struct ipa *ipa = endpoint->ipa; 1465 bool special; 1466 int ret = 0; 1467 1468 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1469 * is active, we need to handle things specially to recover. 1470 * All other cases just need to reset the underlying GSI channel. 1471 */ 1472 special = ipa->version == IPA_VERSION_3_5_1 && 1473 !endpoint->toward_ipa && 1474 endpoint->data->aggregation; 1475 if (special && ipa_endpoint_aggr_active(endpoint)) 1476 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1477 else 1478 gsi_channel_reset(&ipa->gsi, channel_id, true); 1479 1480 if (ret) 1481 dev_err(&ipa->pdev->dev, 1482 "error %d resetting channel %u for endpoint %u\n", 1483 ret, endpoint->channel_id, endpoint->endpoint_id); 1484 } 1485 1486 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1487 { 1488 if (endpoint->toward_ipa) 1489 ipa_endpoint_program_delay(endpoint, false); 1490 else 1491 (void)ipa_endpoint_program_suspend(endpoint, false); 1492 ipa_endpoint_init_cfg(endpoint); 1493 ipa_endpoint_init_hdr(endpoint); 1494 ipa_endpoint_init_hdr_ext(endpoint); 1495 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1496 ipa_endpoint_init_mode(endpoint); 1497 ipa_endpoint_init_aggr(endpoint); 1498 ipa_endpoint_init_deaggr(endpoint); 1499 ipa_endpoint_init_rsrc_grp(endpoint); 1500 ipa_endpoint_init_seq(endpoint); 1501 ipa_endpoint_status(endpoint); 1502 } 1503 1504 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1505 { 1506 struct ipa *ipa = endpoint->ipa; 1507 struct gsi *gsi = &ipa->gsi; 1508 int ret; 1509 1510 ret = gsi_channel_start(gsi, endpoint->channel_id); 1511 if (ret) { 1512 dev_err(&ipa->pdev->dev, 1513 "error %d starting %cX channel %u for endpoint %u\n", 1514 ret, endpoint->toward_ipa ? 'T' : 'R', 1515 endpoint->channel_id, endpoint->endpoint_id); 1516 return ret; 1517 } 1518 1519 if (!endpoint->toward_ipa) { 1520 ipa_interrupt_suspend_enable(ipa->interrupt, 1521 endpoint->endpoint_id); 1522 ipa_endpoint_replenish_enable(endpoint); 1523 } 1524 1525 ipa->enabled |= BIT(endpoint->endpoint_id); 1526 1527 return 0; 1528 } 1529 1530 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1531 { 1532 u32 mask = BIT(endpoint->endpoint_id); 1533 struct ipa *ipa = endpoint->ipa; 1534 struct gsi *gsi = &ipa->gsi; 1535 int ret; 1536 1537 if (!(ipa->enabled & mask)) 1538 return; 1539 1540 ipa->enabled ^= mask; 1541 1542 if (!endpoint->toward_ipa) { 1543 ipa_endpoint_replenish_disable(endpoint); 1544 ipa_interrupt_suspend_disable(ipa->interrupt, 1545 endpoint->endpoint_id); 1546 } 1547 1548 /* Note that if stop fails, the channel's state is not well-defined */ 1549 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1550 if (ret) 1551 dev_err(&ipa->pdev->dev, 1552 "error %d attempting to stop endpoint %u\n", ret, 1553 endpoint->endpoint_id); 1554 } 1555 1556 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1557 { 1558 struct device *dev = &endpoint->ipa->pdev->dev; 1559 struct gsi *gsi = &endpoint->ipa->gsi; 1560 bool stop_channel; 1561 int ret; 1562 1563 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1564 return; 1565 1566 if (!endpoint->toward_ipa) { 1567 ipa_endpoint_replenish_disable(endpoint); 1568 (void)ipa_endpoint_program_suspend(endpoint, true); 1569 } 1570 1571 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1572 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1573 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1574 if (ret) 1575 dev_err(dev, "error %d suspending channel %u\n", ret, 1576 endpoint->channel_id); 1577 } 1578 1579 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1580 { 1581 struct device *dev = &endpoint->ipa->pdev->dev; 1582 struct gsi *gsi = &endpoint->ipa->gsi; 1583 bool start_channel; 1584 int ret; 1585 1586 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1587 return; 1588 1589 if (!endpoint->toward_ipa) 1590 (void)ipa_endpoint_program_suspend(endpoint, false); 1591 1592 /* IPA v3.5.1 doesn't use channel start for resume */ 1593 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1594 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1595 if (ret) 1596 dev_err(dev, "error %d resuming channel %u\n", ret, 1597 endpoint->channel_id); 1598 else if (!endpoint->toward_ipa) 1599 ipa_endpoint_replenish_enable(endpoint); 1600 } 1601 1602 void ipa_endpoint_suspend(struct ipa *ipa) 1603 { 1604 if (!ipa->setup_complete) 1605 return; 1606 1607 if (ipa->modem_netdev) 1608 ipa_modem_suspend(ipa->modem_netdev); 1609 1610 ipa_cmd_pipeline_clear(ipa); 1611 1612 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1613 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1614 } 1615 1616 void ipa_endpoint_resume(struct ipa *ipa) 1617 { 1618 if (!ipa->setup_complete) 1619 return; 1620 1621 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1622 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1623 1624 if (ipa->modem_netdev) 1625 ipa_modem_resume(ipa->modem_netdev); 1626 } 1627 1628 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1629 { 1630 struct gsi *gsi = &endpoint->ipa->gsi; 1631 u32 channel_id = endpoint->channel_id; 1632 1633 /* Only AP endpoints get set up */ 1634 if (endpoint->ee_id != GSI_EE_AP) 1635 return; 1636 1637 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1638 if (!endpoint->toward_ipa) { 1639 /* RX transactions require a single TRE, so the maximum 1640 * backlog is the same as the maximum outstanding TREs. 1641 */ 1642 endpoint->replenish_enabled = false; 1643 atomic_set(&endpoint->replenish_saved, 1644 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1645 atomic_set(&endpoint->replenish_backlog, 0); 1646 INIT_DELAYED_WORK(&endpoint->replenish_work, 1647 ipa_endpoint_replenish_work); 1648 } 1649 1650 ipa_endpoint_program(endpoint); 1651 1652 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1653 } 1654 1655 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1656 { 1657 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1658 1659 if (!endpoint->toward_ipa) 1660 cancel_delayed_work_sync(&endpoint->replenish_work); 1661 1662 ipa_endpoint_reset(endpoint); 1663 } 1664 1665 void ipa_endpoint_setup(struct ipa *ipa) 1666 { 1667 u32 initialized = ipa->initialized; 1668 1669 ipa->set_up = 0; 1670 while (initialized) { 1671 u32 endpoint_id = __ffs(initialized); 1672 1673 initialized ^= BIT(endpoint_id); 1674 1675 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1676 } 1677 } 1678 1679 void ipa_endpoint_teardown(struct ipa *ipa) 1680 { 1681 u32 set_up = ipa->set_up; 1682 1683 while (set_up) { 1684 u32 endpoint_id = __fls(set_up); 1685 1686 set_up ^= BIT(endpoint_id); 1687 1688 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1689 } 1690 ipa->set_up = 0; 1691 } 1692 1693 int ipa_endpoint_config(struct ipa *ipa) 1694 { 1695 struct device *dev = &ipa->pdev->dev; 1696 u32 initialized; 1697 u32 rx_base; 1698 u32 rx_mask; 1699 u32 tx_mask; 1700 int ret = 0; 1701 u32 max; 1702 u32 val; 1703 1704 /* Find out about the endpoints supplied by the hardware, and ensure 1705 * the highest one doesn't exceed the number we support. 1706 */ 1707 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1708 1709 /* Our RX is an IPA producer */ 1710 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1711 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1712 if (max > IPA_ENDPOINT_MAX) { 1713 dev_err(dev, "too many endpoints (%u > %u)\n", 1714 max, IPA_ENDPOINT_MAX); 1715 return -EINVAL; 1716 } 1717 rx_mask = GENMASK(max - 1, rx_base); 1718 1719 /* Our TX is an IPA consumer */ 1720 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1721 tx_mask = GENMASK(max - 1, 0); 1722 1723 ipa->available = rx_mask | tx_mask; 1724 1725 /* Check for initialized endpoints not supported by the hardware */ 1726 if (ipa->initialized & ~ipa->available) { 1727 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1728 ipa->initialized & ~ipa->available); 1729 ret = -EINVAL; /* Report other errors too */ 1730 } 1731 1732 initialized = ipa->initialized; 1733 while (initialized) { 1734 u32 endpoint_id = __ffs(initialized); 1735 struct ipa_endpoint *endpoint; 1736 1737 initialized ^= BIT(endpoint_id); 1738 1739 /* Make sure it's pointing in the right direction */ 1740 endpoint = &ipa->endpoint[endpoint_id]; 1741 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1742 dev_err(dev, "endpoint id %u wrong direction\n", 1743 endpoint_id); 1744 ret = -EINVAL; 1745 } 1746 } 1747 1748 return ret; 1749 } 1750 1751 void ipa_endpoint_deconfig(struct ipa *ipa) 1752 { 1753 ipa->available = 0; /* Nothing more to do */ 1754 } 1755 1756 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1757 const struct ipa_gsi_endpoint_data *data) 1758 { 1759 struct ipa_endpoint *endpoint; 1760 1761 endpoint = &ipa->endpoint[data->endpoint_id]; 1762 1763 if (data->ee_id == GSI_EE_AP) 1764 ipa->channel_map[data->channel_id] = endpoint; 1765 ipa->name_map[name] = endpoint; 1766 1767 endpoint->ipa = ipa; 1768 endpoint->ee_id = data->ee_id; 1769 endpoint->seq_type = data->endpoint.seq_type; 1770 endpoint->channel_id = data->channel_id; 1771 endpoint->endpoint_id = data->endpoint_id; 1772 endpoint->toward_ipa = data->toward_ipa; 1773 endpoint->data = &data->endpoint.config; 1774 1775 ipa->initialized |= BIT(endpoint->endpoint_id); 1776 } 1777 1778 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1779 { 1780 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1781 1782 memset(endpoint, 0, sizeof(*endpoint)); 1783 } 1784 1785 void ipa_endpoint_exit(struct ipa *ipa) 1786 { 1787 u32 initialized = ipa->initialized; 1788 1789 while (initialized) { 1790 u32 endpoint_id = __fls(initialized); 1791 1792 initialized ^= BIT(endpoint_id); 1793 1794 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1795 } 1796 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1797 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1798 } 1799 1800 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1801 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1802 const struct ipa_gsi_endpoint_data *data) 1803 { 1804 enum ipa_endpoint_name name; 1805 u32 filter_map; 1806 1807 if (!ipa_endpoint_data_valid(ipa, count, data)) 1808 return 0; /* Error */ 1809 1810 ipa->initialized = 0; 1811 1812 filter_map = 0; 1813 for (name = 0; name < count; name++, data++) { 1814 if (ipa_gsi_endpoint_data_empty(data)) 1815 continue; /* Skip over empty slots */ 1816 1817 ipa_endpoint_init_one(ipa, name, data); 1818 1819 if (data->endpoint.filter_support) 1820 filter_map |= BIT(data->endpoint_id); 1821 } 1822 1823 if (!ipa_filter_map_valid(ipa, filter_map)) 1824 goto err_endpoint_exit; 1825 1826 return filter_map; /* Non-zero bitmask */ 1827 1828 err_endpoint_exit: 1829 ipa_endpoint_exit(ipa); 1830 1831 return 0; /* Error */ 1832 } 1833