1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02, 46 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 47 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 48 IPA_STATUS_OPCODE_LOG = 0x10, 49 IPA_STATUS_OPCODE_DCMP = 0x20, 50 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 51 }; 52 53 /** enum ipa_status_exception - status element exception type */ 54 enum ipa_status_exception { 55 /* 0 means no exception */ 56 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 57 IPA_STATUS_EXCEPTION_IPTYPE = 0x04, 58 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08, 59 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, 60 IPA_STATUS_EXCEPTION_SW_FILT = 0x20, 61 /* The meaning of the next value depends on whether the IP version */ 62 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */ 63 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT, 64 }; 65 66 /* Status element provided by hardware */ 67 struct ipa_status { 68 u8 opcode; /* enum ipa_status_opcode */ 69 u8 exception; /* enum ipa_status_exception */ 70 __le16 mask; 71 __le16 pkt_len; 72 u8 endp_src_idx; 73 u8 endp_dst_idx; 74 __le32 metadata; 75 __le32 flags1; 76 __le64 flags2; 77 __le32 flags3; 78 __le32 flags4; 79 }; 80 81 /* Field masks for struct ipa_status structure fields */ 82 83 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 84 85 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 86 87 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0) 88 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1) 89 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2) 90 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3) 91 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4) 92 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14) 93 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15) 94 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16) 95 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17) 96 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 97 98 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0) 99 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1) 100 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14) 101 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16) 102 103 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0) 104 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8) 105 106 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0) 107 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1) 108 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11) 109 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12) 110 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16) 111 112 #ifdef IPA_VALIDATE 113 114 static void ipa_endpoint_validate_build(void) 115 { 116 /* The aggregation byte limit defines the point at which an 117 * aggregation window will close. It is programmed into the 118 * IPA hardware as a number of KB. We don't use "hard byte 119 * limit" aggregation, which means that we need to supply 120 * enough space in a receive buffer to hold a complete MTU 121 * plus normal skb overhead *after* that aggregation byte 122 * limit has been crossed. 123 * 124 * This check just ensures we don't define a receive buffer 125 * size that would exceed what we can represent in the field 126 * that is used to program its size. 127 */ 128 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > 129 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + 130 IPA_MTU + IPA_RX_BUFFER_OVERHEAD); 131 132 /* I honestly don't know where this requirement comes from. But 133 * it holds, and if we someday need to loosen the constraint we 134 * can try to track it down. 135 */ 136 BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 137 } 138 139 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 140 const struct ipa_gsi_endpoint_data *all_data, 141 const struct ipa_gsi_endpoint_data *data) 142 { 143 const struct ipa_gsi_endpoint_data *other_data; 144 struct device *dev = &ipa->pdev->dev; 145 enum ipa_endpoint_name other_name; 146 147 if (ipa_gsi_endpoint_data_empty(data)) 148 return true; 149 150 if (!data->toward_ipa) { 151 if (data->endpoint.filter_support) { 152 dev_err(dev, "filtering not supported for " 153 "RX endpoint %u\n", 154 data->endpoint_id); 155 return false; 156 } 157 158 return true; /* Nothing more to check for RX */ 159 } 160 161 if (data->endpoint.config.status_enable) { 162 other_name = data->endpoint.config.tx.status_endpoint; 163 if (other_name >= count) { 164 dev_err(dev, "status endpoint name %u out of range " 165 "for endpoint %u\n", 166 other_name, data->endpoint_id); 167 return false; 168 } 169 170 /* Status endpoint must be defined... */ 171 other_data = &all_data[other_name]; 172 if (ipa_gsi_endpoint_data_empty(other_data)) { 173 dev_err(dev, "DMA endpoint name %u undefined " 174 "for endpoint %u\n", 175 other_name, data->endpoint_id); 176 return false; 177 } 178 179 /* ...and has to be an RX endpoint... */ 180 if (other_data->toward_ipa) { 181 dev_err(dev, 182 "status endpoint for endpoint %u not RX\n", 183 data->endpoint_id); 184 return false; 185 } 186 187 /* ...and if it's to be an AP endpoint... */ 188 if (other_data->ee_id == GSI_EE_AP) { 189 /* ...make sure it has status enabled. */ 190 if (!other_data->endpoint.config.status_enable) { 191 dev_err(dev, 192 "status not enabled for endpoint %u\n", 193 other_data->endpoint_id); 194 return false; 195 } 196 } 197 } 198 199 if (data->endpoint.config.dma_mode) { 200 other_name = data->endpoint.config.dma_endpoint; 201 if (other_name >= count) { 202 dev_err(dev, "DMA endpoint name %u out of range " 203 "for endpoint %u\n", 204 other_name, data->endpoint_id); 205 return false; 206 } 207 208 other_data = &all_data[other_name]; 209 if (ipa_gsi_endpoint_data_empty(other_data)) { 210 dev_err(dev, "DMA endpoint name %u undefined " 211 "for endpoint %u\n", 212 other_name, data->endpoint_id); 213 return false; 214 } 215 } 216 217 return true; 218 } 219 220 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 221 const struct ipa_gsi_endpoint_data *data) 222 { 223 const struct ipa_gsi_endpoint_data *dp = data; 224 struct device *dev = &ipa->pdev->dev; 225 enum ipa_endpoint_name name; 226 227 ipa_endpoint_validate_build(); 228 229 if (count > IPA_ENDPOINT_COUNT) { 230 dev_err(dev, "too many endpoints specified (%u > %u)\n", 231 count, IPA_ENDPOINT_COUNT); 232 return false; 233 } 234 235 /* Make sure needed endpoints have defined data */ 236 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 237 dev_err(dev, "command TX endpoint not defined\n"); 238 return false; 239 } 240 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 241 dev_err(dev, "LAN RX endpoint not defined\n"); 242 return false; 243 } 244 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 245 dev_err(dev, "AP->modem TX endpoint not defined\n"); 246 return false; 247 } 248 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 249 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 250 return false; 251 } 252 253 for (name = 0; name < count; name++, dp++) 254 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 255 return false; 256 257 return true; 258 } 259 260 #else /* !IPA_VALIDATE */ 261 262 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 263 const struct ipa_gsi_endpoint_data *data) 264 { 265 return true; 266 } 267 268 #endif /* !IPA_VALIDATE */ 269 270 /* Allocate a transaction to use on a non-command endpoint */ 271 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 272 u32 tre_count) 273 { 274 struct gsi *gsi = &endpoint->ipa->gsi; 275 u32 channel_id = endpoint->channel_id; 276 enum dma_data_direction direction; 277 278 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 279 280 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 281 } 282 283 /* suspend_delay represents suspend for RX, delay for TX endpoints. 284 * Note that suspend is not supported starting with IPA v4.0. 285 */ 286 static bool 287 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 288 { 289 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 290 struct ipa *ipa = endpoint->ipa; 291 bool state; 292 u32 mask; 293 u32 val; 294 295 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 296 * correctly on IPA v4.2. 297 * 298 * if (endpoint->toward_ipa) 299 * assert(ipa->version != IPA_VERSION_4.2); 300 * else 301 * assert(ipa->version == IPA_VERSION_3_5_1); 302 */ 303 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 304 305 val = ioread32(ipa->reg_virt + offset); 306 /* Don't bother if it's already in the requested state */ 307 state = !!(val & mask); 308 if (suspend_delay != state) { 309 val ^= mask; 310 iowrite32(val, ipa->reg_virt + offset); 311 } 312 313 return state; 314 } 315 316 /* We currently don't care what the previous state was for delay mode */ 317 static void 318 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 319 { 320 /* assert(endpoint->toward_ipa); */ 321 322 /* Delay mode doesn't work properly for IPA v4.2 */ 323 if (endpoint->ipa->version != IPA_VERSION_4_2) 324 (void)ipa_endpoint_init_ctrl(endpoint, enable); 325 } 326 327 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 328 { 329 u32 mask = BIT(endpoint->endpoint_id); 330 struct ipa *ipa = endpoint->ipa; 331 u32 offset; 332 u32 val; 333 334 /* assert(mask & ipa->available); */ 335 offset = ipa_reg_state_aggr_active_offset(ipa->version); 336 val = ioread32(ipa->reg_virt + offset); 337 338 return !!(val & mask); 339 } 340 341 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 342 { 343 u32 mask = BIT(endpoint->endpoint_id); 344 struct ipa *ipa = endpoint->ipa; 345 346 /* assert(mask & ipa->available); */ 347 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 348 } 349 350 /** 351 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 352 * @endpoint_id: Endpoint on which to emulate a suspend 353 * 354 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 355 * with an open aggregation frame. This is to work around a hardware 356 * issue in IPA version 3.5.1 where the suspend interrupt will not be 357 * generated when it should be. 358 */ 359 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 360 { 361 struct ipa *ipa = endpoint->ipa; 362 363 if (!endpoint->data->aggregation) 364 return; 365 366 /* Nothing to do if the endpoint doesn't have aggregation open */ 367 if (!ipa_endpoint_aggr_active(endpoint)) 368 return; 369 370 /* Force close aggregation */ 371 ipa_endpoint_force_close(endpoint); 372 373 ipa_interrupt_simulate_suspend(ipa->interrupt); 374 } 375 376 /* Returns previous suspend state (true means suspend was enabled) */ 377 static bool 378 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 379 { 380 bool suspended; 381 382 if (endpoint->ipa->version != IPA_VERSION_3_5_1) 383 return enable; /* For IPA v4.0+, no change made */ 384 385 /* assert(!endpoint->toward_ipa); */ 386 387 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 388 389 /* A client suspended with an open aggregation frame will not 390 * generate a SUSPEND IPA interrupt. If enabling suspend, have 391 * ipa_endpoint_suspend_aggr() handle this. 392 */ 393 if (enable && !suspended) 394 ipa_endpoint_suspend_aggr(endpoint); 395 396 return suspended; 397 } 398 399 /* Enable or disable delay or suspend mode on all modem endpoints */ 400 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 401 { 402 u32 endpoint_id; 403 404 /* DELAY mode doesn't work correctly on IPA v4.2 */ 405 if (ipa->version == IPA_VERSION_4_2) 406 return; 407 408 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 409 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 410 411 if (endpoint->ee_id != GSI_EE_MODEM) 412 continue; 413 414 /* Set TX delay mode or RX suspend mode */ 415 if (endpoint->toward_ipa) 416 ipa_endpoint_program_delay(endpoint, enable); 417 else 418 (void)ipa_endpoint_program_suspend(endpoint, enable); 419 } 420 } 421 422 /* Reset all modem endpoints to use the default exception endpoint */ 423 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 424 { 425 u32 initialized = ipa->initialized; 426 struct gsi_trans *trans; 427 u32 count; 428 429 /* We need one command per modem TX endpoint. We can get an upper 430 * bound on that by assuming all initialized endpoints are modem->IPA. 431 * That won't happen, and we could be more precise, but this is fine 432 * for now. We need to end the transaction with a "tag process." 433 */ 434 count = hweight32(initialized) + ipa_cmd_tag_process_count(); 435 trans = ipa_cmd_trans_alloc(ipa, count); 436 if (!trans) { 437 dev_err(&ipa->pdev->dev, 438 "no transaction to reset modem exception endpoints\n"); 439 return -EBUSY; 440 } 441 442 while (initialized) { 443 u32 endpoint_id = __ffs(initialized); 444 struct ipa_endpoint *endpoint; 445 u32 offset; 446 447 initialized ^= BIT(endpoint_id); 448 449 /* We only reset modem TX endpoints */ 450 endpoint = &ipa->endpoint[endpoint_id]; 451 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 452 continue; 453 454 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 455 456 /* Value written is 0, and all bits are updated. That 457 * means status is disabled on the endpoint, and as a 458 * result all other fields in the register are ignored. 459 */ 460 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 461 } 462 463 ipa_cmd_tag_process_add(trans); 464 465 /* XXX This should have a 1 second timeout */ 466 gsi_trans_commit_wait(trans); 467 468 return 0; 469 } 470 471 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 472 { 473 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 474 u32 val = 0; 475 476 /* FRAG_OFFLOAD_EN is 0 */ 477 if (endpoint->data->checksum) { 478 if (endpoint->toward_ipa) { 479 u32 checksum_offset; 480 481 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 482 CS_OFFLOAD_EN_FMASK); 483 /* Checksum header offset is in 4-byte units */ 484 checksum_offset = sizeof(struct rmnet_map_header); 485 checksum_offset /= sizeof(u32); 486 val |= u32_encode_bits(checksum_offset, 487 CS_METADATA_HDR_OFFSET_FMASK); 488 } else { 489 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 490 CS_OFFLOAD_EN_FMASK); 491 } 492 } else { 493 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 494 CS_OFFLOAD_EN_FMASK); 495 } 496 /* CS_GEN_QMB_MASTER_SEL is 0 */ 497 498 iowrite32(val, endpoint->ipa->reg_virt + offset); 499 } 500 501 /** 502 * We program QMAP endpoints so each packet received is preceded by a QMAP 503 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 504 * packet size field, and we have the IPA hardware populate both for each 505 * received packet. The header is configured (in the HDR_EXT register) 506 * to use big endian format. 507 * 508 * The packet size is written into the QMAP header's pkt_len field. That 509 * location is defined here using the HDR_OFST_PKT_SIZE field. 510 * 511 * The mux_id comes from a 4-byte metadata value supplied with each packet 512 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 513 * value that we want, in its low-order byte. A bitmask defined in the 514 * endpoint's METADATA_MASK register defines which byte within the modem 515 * metadata contains the mux_id. And the OFST_METADATA field programmed 516 * here indicates where the extracted byte should be placed within the QMAP 517 * header. 518 */ 519 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 520 { 521 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 522 u32 val = 0; 523 524 if (endpoint->data->qmap) { 525 size_t header_size = sizeof(struct rmnet_map_header); 526 527 /* We might supply a checksum header after the QMAP header */ 528 if (endpoint->toward_ipa && endpoint->data->checksum) 529 header_size += sizeof(struct rmnet_map_ul_csum_header); 530 val |= u32_encode_bits(header_size, HDR_LEN_FMASK); 531 532 /* Define how to fill fields in a received QMAP header */ 533 if (!endpoint->toward_ipa) { 534 u32 off; /* Field offset within header */ 535 536 /* Where IPA will write the metadata value */ 537 off = offsetof(struct rmnet_map_header, mux_id); 538 val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK); 539 540 /* Where IPA will write the length */ 541 off = offsetof(struct rmnet_map_header, pkt_len); 542 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 543 val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); 544 } 545 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 546 val |= HDR_OFST_METADATA_VALID_FMASK; 547 548 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 549 /* HDR_A5_MUX is 0 */ 550 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 551 /* HDR_METADATA_REG_VALID is 0 (TX only) */ 552 } 553 554 iowrite32(val, endpoint->ipa->reg_virt + offset); 555 } 556 557 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 558 { 559 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 560 u32 pad_align = endpoint->data->rx.pad_align; 561 u32 val = 0; 562 563 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 564 565 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 566 * driver assumes this field is meaningful in packets it receives, 567 * and assumes the header's payload length includes that padding. 568 * The RMNet driver does *not* pad packets it sends, however, so 569 * the pad field (although 0) should be ignored. 570 */ 571 if (endpoint->data->qmap && !endpoint->toward_ipa) { 572 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 573 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 574 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 575 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 576 } 577 578 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 579 if (!endpoint->toward_ipa) 580 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 581 582 iowrite32(val, endpoint->ipa->reg_virt + offset); 583 } 584 585 586 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 587 { 588 u32 endpoint_id = endpoint->endpoint_id; 589 u32 val = 0; 590 u32 offset; 591 592 if (endpoint->toward_ipa) 593 return; /* Register not valid for TX endpoints */ 594 595 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 596 597 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 598 if (endpoint->data->qmap) 599 val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 600 601 iowrite32(val, endpoint->ipa->reg_virt + offset); 602 } 603 604 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 605 { 606 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 607 u32 val; 608 609 if (!endpoint->toward_ipa) 610 return; /* Register not valid for RX endpoints */ 611 612 if (endpoint->data->dma_mode) { 613 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 614 u32 dma_endpoint_id; 615 616 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 617 618 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 619 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 620 } else { 621 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 622 } 623 /* All other bits unspecified (and 0) */ 624 625 iowrite32(val, endpoint->ipa->reg_virt + offset); 626 } 627 628 /* Compute the aggregation size value to use for a given buffer size */ 629 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 630 { 631 /* We don't use "hard byte limit" aggregation, so we define the 632 * aggregation limit such that our buffer has enough space *after* 633 * that limit to receive a full MTU of data, plus overhead. 634 */ 635 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 636 637 return rx_buffer_size / SZ_1K; 638 } 639 640 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 641 { 642 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 643 u32 val = 0; 644 645 if (endpoint->data->aggregation) { 646 if (!endpoint->toward_ipa) { 647 u32 limit; 648 649 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 650 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 651 652 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 653 val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); 654 655 limit = IPA_AGGR_TIME_LIMIT_DEFAULT; 656 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 657 val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); 658 659 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 660 661 if (endpoint->data->rx.aggr_close_eof) 662 val |= AGGR_SW_EOF_ACTIVE_FMASK; 663 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 664 } else { 665 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 666 AGGR_EN_FMASK); 667 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 668 /* other fields ignored */ 669 } 670 /* AGGR_FORCE_CLOSE is 0 */ 671 } else { 672 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 673 /* other fields ignored */ 674 } 675 676 iowrite32(val, endpoint->ipa->reg_virt + offset); 677 } 678 679 /* The head-of-line blocking timer is defined as a tick count, where each 680 * tick represents 128 cycles of the IPA core clock. Return the value 681 * that should be written to that register that represents the timeout 682 * period provided. 683 */ 684 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) 685 { 686 u32 width; 687 u32 scale; 688 u64 ticks; 689 u64 rate; 690 u32 high; 691 u32 val; 692 693 if (!microseconds) 694 return 0; /* Nothing to compute if timer period is 0 */ 695 696 /* Use 64 bit arithmetic to avoid overflow... */ 697 rate = ipa_clock_rate(ipa); 698 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 699 /* ...but we still need to fit into a 32-bit register */ 700 WARN_ON(ticks > U32_MAX); 701 702 /* IPA v3.5.1 just records the tick count */ 703 if (ipa->version == IPA_VERSION_3_5_1) 704 return (u32)ticks; 705 706 /* For IPA v4.2, the tick count is represented by base and 707 * scale fields within the 32-bit timer register, where: 708 * ticks = base << scale; 709 * The best precision is achieved when the base value is as 710 * large as possible. Find the highest set bit in the tick 711 * count, and extract the number of bits in the base field 712 * such that that high bit is included. 713 */ 714 high = fls(ticks); /* 1..32 */ 715 width = HWEIGHT32(BASE_VALUE_FMASK); 716 scale = high > width ? high - width : 0; 717 if (scale) { 718 /* If we're scaling, round up to get a closer result */ 719 ticks += 1 << (scale - 1); 720 /* High bit was set, so rounding might have affected it */ 721 if (fls(ticks) != high) 722 scale++; 723 } 724 725 val = u32_encode_bits(scale, SCALE_FMASK); 726 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 727 728 return val; 729 } 730 731 /* If microseconds is 0, timeout is immediate */ 732 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 733 u32 microseconds) 734 { 735 u32 endpoint_id = endpoint->endpoint_id; 736 struct ipa *ipa = endpoint->ipa; 737 u32 offset; 738 u32 val; 739 740 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 741 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); 742 iowrite32(val, ipa->reg_virt + offset); 743 } 744 745 static void 746 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 747 { 748 u32 endpoint_id = endpoint->endpoint_id; 749 u32 offset; 750 u32 val; 751 752 val = enable ? HOL_BLOCK_EN_FMASK : 0; 753 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 754 iowrite32(val, endpoint->ipa->reg_virt + offset); 755 } 756 757 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 758 { 759 u32 i; 760 761 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 762 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 763 764 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 765 continue; 766 767 ipa_endpoint_init_hol_block_timer(endpoint, 0); 768 ipa_endpoint_init_hol_block_enable(endpoint, true); 769 } 770 } 771 772 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 773 { 774 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 775 u32 val = 0; 776 777 if (!endpoint->toward_ipa) 778 return; /* Register not valid for RX endpoints */ 779 780 /* DEAGGR_HDR_LEN is 0 */ 781 /* PACKET_OFFSET_VALID is 0 */ 782 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 783 /* MAX_PACKET_LEN is 0 (not enforced) */ 784 785 iowrite32(val, endpoint->ipa->reg_virt + offset); 786 } 787 788 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 789 { 790 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 791 u32 seq_type = endpoint->seq_type; 792 u32 val = 0; 793 794 if (!endpoint->toward_ipa) 795 return; /* Register not valid for RX endpoints */ 796 797 /* Sequencer type is made up of four nibbles */ 798 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 799 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 800 /* The second two apply to replicated packets */ 801 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK); 802 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK); 803 804 iowrite32(val, endpoint->ipa->reg_virt + offset); 805 } 806 807 /** 808 * ipa_endpoint_skb_tx() - Transmit a socket buffer 809 * @endpoint: Endpoint pointer 810 * @skb: Socket buffer to send 811 * 812 * Returns: 0 if successful, or a negative error code 813 */ 814 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 815 { 816 struct gsi_trans *trans; 817 u32 nr_frags; 818 int ret; 819 820 /* Make sure source endpoint's TLV FIFO has enough entries to 821 * hold the linear portion of the skb and all its fragments. 822 * If not, see if we can linearize it before giving up. 823 */ 824 nr_frags = skb_shinfo(skb)->nr_frags; 825 if (1 + nr_frags > endpoint->trans_tre_max) { 826 if (skb_linearize(skb)) 827 return -E2BIG; 828 nr_frags = 0; 829 } 830 831 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 832 if (!trans) 833 return -EBUSY; 834 835 ret = gsi_trans_skb_add(trans, skb); 836 if (ret) 837 goto err_trans_free; 838 trans->data = skb; /* transaction owns skb now */ 839 840 gsi_trans_commit(trans, !netdev_xmit_more()); 841 842 return 0; 843 844 err_trans_free: 845 gsi_trans_free(trans); 846 847 return -ENOMEM; 848 } 849 850 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 851 { 852 u32 endpoint_id = endpoint->endpoint_id; 853 struct ipa *ipa = endpoint->ipa; 854 u32 val = 0; 855 u32 offset; 856 857 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 858 859 if (endpoint->data->status_enable) { 860 val |= STATUS_EN_FMASK; 861 if (endpoint->toward_ipa) { 862 enum ipa_endpoint_name name; 863 u32 status_endpoint_id; 864 865 name = endpoint->data->tx.status_endpoint; 866 status_endpoint_id = ipa->name_map[name]->endpoint_id; 867 868 val |= u32_encode_bits(status_endpoint_id, 869 STATUS_ENDP_FMASK); 870 } 871 /* STATUS_LOCATION is 0 (status element precedes packet) */ 872 /* The next field is present for IPA v4.0 and above */ 873 /* STATUS_PKT_SUPPRESS_FMASK is 0 */ 874 } 875 876 iowrite32(val, ipa->reg_virt + offset); 877 } 878 879 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 880 { 881 struct gsi_trans *trans; 882 bool doorbell = false; 883 struct page *page; 884 u32 offset; 885 u32 len; 886 int ret; 887 888 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 889 if (!page) 890 return -ENOMEM; 891 892 trans = ipa_endpoint_trans_alloc(endpoint, 1); 893 if (!trans) 894 goto err_free_pages; 895 896 /* Offset the buffer to make space for skb headroom */ 897 offset = NET_SKB_PAD; 898 len = IPA_RX_BUFFER_SIZE - offset; 899 900 ret = gsi_trans_page_add(trans, page, len, offset); 901 if (ret) 902 goto err_trans_free; 903 trans->data = page; /* transaction owns page now */ 904 905 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 906 doorbell = true; 907 endpoint->replenish_ready = 0; 908 } 909 910 gsi_trans_commit(trans, doorbell); 911 912 return 0; 913 914 err_trans_free: 915 gsi_trans_free(trans); 916 err_free_pages: 917 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 918 919 return -ENOMEM; 920 } 921 922 /** 923 * ipa_endpoint_replenish() - Replenish the Rx packets cache. 924 * 925 * Allocate RX packet wrapper structures with maximal socket buffers 926 * for an endpoint. These are supplied to the hardware, which fills 927 * them with incoming data. 928 */ 929 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 930 { 931 struct gsi *gsi; 932 u32 backlog; 933 934 if (!endpoint->replenish_enabled) { 935 if (count) 936 atomic_add(count, &endpoint->replenish_saved); 937 return; 938 } 939 940 941 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 942 if (ipa_endpoint_replenish_one(endpoint)) 943 goto try_again_later; 944 if (count) 945 atomic_add(count, &endpoint->replenish_backlog); 946 947 return; 948 949 try_again_later: 950 /* The last one didn't succeed, so fix the backlog */ 951 backlog = atomic_inc_return(&endpoint->replenish_backlog); 952 953 if (count) 954 atomic_add(count, &endpoint->replenish_backlog); 955 956 /* Whenever a receive buffer transaction completes we'll try to 957 * replenish again. It's unlikely, but if we fail to supply even 958 * one buffer, nothing will trigger another replenish attempt. 959 * Receive buffer transactions use one TRE, so schedule work to 960 * try replenishing again if our backlog is *all* available TREs. 961 */ 962 gsi = &endpoint->ipa->gsi; 963 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 964 schedule_delayed_work(&endpoint->replenish_work, 965 msecs_to_jiffies(1)); 966 } 967 968 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 969 { 970 struct gsi *gsi = &endpoint->ipa->gsi; 971 u32 max_backlog; 972 u32 saved; 973 974 endpoint->replenish_enabled = true; 975 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 976 atomic_add(saved, &endpoint->replenish_backlog); 977 978 /* Start replenishing if hardware currently has no buffers */ 979 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 980 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 981 ipa_endpoint_replenish(endpoint, 0); 982 } 983 984 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 985 { 986 u32 backlog; 987 988 endpoint->replenish_enabled = false; 989 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 990 atomic_add(backlog, &endpoint->replenish_saved); 991 } 992 993 static void ipa_endpoint_replenish_work(struct work_struct *work) 994 { 995 struct delayed_work *dwork = to_delayed_work(work); 996 struct ipa_endpoint *endpoint; 997 998 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 999 1000 ipa_endpoint_replenish(endpoint, 0); 1001 } 1002 1003 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1004 void *data, u32 len, u32 extra) 1005 { 1006 struct sk_buff *skb; 1007 1008 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1009 if (skb) { 1010 skb_put(skb, len); 1011 memcpy(skb->data, data, len); 1012 skb->truesize += extra; 1013 } 1014 1015 /* Now receive it, or drop it if there's no netdev */ 1016 if (endpoint->netdev) 1017 ipa_modem_skb_rx(endpoint->netdev, skb); 1018 else if (skb) 1019 dev_kfree_skb_any(skb); 1020 } 1021 1022 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1023 struct page *page, u32 len) 1024 { 1025 struct sk_buff *skb; 1026 1027 /* Nothing to do if there's no netdev */ 1028 if (!endpoint->netdev) 1029 return false; 1030 1031 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1032 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1033 if (skb) { 1034 /* Reserve the headroom and account for the data */ 1035 skb_reserve(skb, NET_SKB_PAD); 1036 skb_put(skb, len); 1037 } 1038 1039 /* Receive the buffer (or record drop if unable to build it) */ 1040 ipa_modem_skb_rx(endpoint->netdev, skb); 1041 1042 return skb != NULL; 1043 } 1044 1045 /* The format of a packet status element is the same for several status 1046 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types 1047 * aren't currently supported 1048 */ 1049 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1050 { 1051 switch (opcode) { 1052 case IPA_STATUS_OPCODE_PACKET: 1053 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1054 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1055 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1056 return true; 1057 default: 1058 return false; 1059 } 1060 } 1061 1062 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1063 const struct ipa_status *status) 1064 { 1065 u32 endpoint_id; 1066 1067 if (!ipa_status_format_packet(status->opcode)) 1068 return true; 1069 if (!status->pkt_len) 1070 return true; 1071 endpoint_id = u32_get_bits(status->endp_dst_idx, 1072 IPA_STATUS_DST_IDX_FMASK); 1073 if (endpoint_id != endpoint->endpoint_id) 1074 return true; 1075 1076 return false; /* Don't skip this packet, process it */ 1077 } 1078 1079 /* Return whether the status indicates the packet should be dropped */ 1080 static bool ipa_status_drop_packet(const struct ipa_status *status) 1081 { 1082 u32 val; 1083 1084 /* Deaggregation exceptions we drop; others we consume */ 1085 if (status->exception) 1086 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1087 1088 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1089 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1090 1091 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1092 } 1093 1094 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1095 struct page *page, u32 total_len) 1096 { 1097 void *data = page_address(page) + NET_SKB_PAD; 1098 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1099 u32 resid = total_len; 1100 1101 while (resid) { 1102 const struct ipa_status *status = data; 1103 u32 align; 1104 u32 len; 1105 1106 if (resid < sizeof(*status)) { 1107 dev_err(&endpoint->ipa->pdev->dev, 1108 "short message (%u bytes < %zu byte status)\n", 1109 resid, sizeof(*status)); 1110 break; 1111 } 1112 1113 /* Skip over status packets that lack packet data */ 1114 if (ipa_endpoint_status_skip(endpoint, status)) { 1115 data += sizeof(*status); 1116 resid -= sizeof(*status); 1117 continue; 1118 } 1119 1120 /* Compute the amount of buffer space consumed by the 1121 * packet, including the status element. If the hardware 1122 * is configured to pad packet data to an aligned boundary, 1123 * account for that. And if checksum offload is is enabled 1124 * a trailer containing computed checksum information will 1125 * be appended. 1126 */ 1127 align = endpoint->data->rx.pad_align ? : 1; 1128 len = le16_to_cpu(status->pkt_len); 1129 len = sizeof(*status) + ALIGN(len, align); 1130 if (endpoint->data->checksum) 1131 len += sizeof(struct rmnet_map_dl_csum_trailer); 1132 1133 /* Charge the new packet with a proportional fraction of 1134 * the unused space in the original receive buffer. 1135 * XXX Charge a proportion of the *whole* receive buffer? 1136 */ 1137 if (!ipa_status_drop_packet(status)) { 1138 u32 extra = unused * len / total_len; 1139 void *data2 = data + sizeof(*status); 1140 u32 len2 = le16_to_cpu(status->pkt_len); 1141 1142 /* Client receives only packet data (no status) */ 1143 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1144 } 1145 1146 /* Consume status and the full packet it describes */ 1147 data += len; 1148 resid -= len; 1149 } 1150 } 1151 1152 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1153 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1154 struct gsi_trans *trans) 1155 { 1156 } 1157 1158 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1159 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1160 struct gsi_trans *trans) 1161 { 1162 struct page *page; 1163 1164 ipa_endpoint_replenish(endpoint, 1); 1165 1166 if (trans->cancelled) 1167 return; 1168 1169 /* Parse or build a socket buffer using the actual received length */ 1170 page = trans->data; 1171 if (endpoint->data->status_enable) 1172 ipa_endpoint_status_parse(endpoint, page, trans->len); 1173 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1174 trans->data = NULL; /* Pages have been consumed */ 1175 } 1176 1177 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1178 struct gsi_trans *trans) 1179 { 1180 if (endpoint->toward_ipa) 1181 ipa_endpoint_tx_complete(endpoint, trans); 1182 else 1183 ipa_endpoint_rx_complete(endpoint, trans); 1184 } 1185 1186 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1187 struct gsi_trans *trans) 1188 { 1189 if (endpoint->toward_ipa) { 1190 struct ipa *ipa = endpoint->ipa; 1191 1192 /* Nothing to do for command transactions */ 1193 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1194 struct sk_buff *skb = trans->data; 1195 1196 if (skb) 1197 dev_kfree_skb_any(skb); 1198 } 1199 } else { 1200 struct page *page = trans->data; 1201 1202 if (page) 1203 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1204 } 1205 } 1206 1207 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1208 { 1209 u32 val; 1210 1211 /* ROUTE_DIS is 0 */ 1212 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1213 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1214 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1215 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1216 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1217 1218 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1219 } 1220 1221 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1222 { 1223 ipa_endpoint_default_route_set(ipa, 0); 1224 } 1225 1226 /** 1227 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1228 * @endpoint: Endpoint to be reset 1229 * 1230 * If aggregation is active on an RX endpoint when a reset is performed 1231 * on its underlying GSI channel, a special sequence of actions must be 1232 * taken to ensure the IPA pipeline is properly cleared. 1233 * 1234 * @Return: 0 if successful, or a negative error code 1235 */ 1236 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1237 { 1238 struct device *dev = &endpoint->ipa->pdev->dev; 1239 struct ipa *ipa = endpoint->ipa; 1240 struct gsi *gsi = &ipa->gsi; 1241 bool suspended = false; 1242 dma_addr_t addr; 1243 bool legacy; 1244 u32 retries; 1245 u32 len = 1; 1246 void *virt; 1247 int ret; 1248 1249 virt = kzalloc(len, GFP_KERNEL); 1250 if (!virt) 1251 return -ENOMEM; 1252 1253 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1254 if (dma_mapping_error(dev, addr)) { 1255 ret = -ENOMEM; 1256 goto out_kfree; 1257 } 1258 1259 /* Force close aggregation before issuing the reset */ 1260 ipa_endpoint_force_close(endpoint); 1261 1262 /* Reset and reconfigure the channel with the doorbell engine 1263 * disabled. Then poll until we know aggregation is no longer 1264 * active. We'll re-enable the doorbell (if appropriate) when 1265 * we reset again below. 1266 */ 1267 gsi_channel_reset(gsi, endpoint->channel_id, false); 1268 1269 /* Make sure the channel isn't suspended */ 1270 suspended = ipa_endpoint_program_suspend(endpoint, false); 1271 1272 /* Start channel and do a 1 byte read */ 1273 ret = gsi_channel_start(gsi, endpoint->channel_id); 1274 if (ret) 1275 goto out_suspend_again; 1276 1277 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1278 if (ret) 1279 goto err_endpoint_stop; 1280 1281 /* Wait for aggregation to be closed on the channel */ 1282 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1283 do { 1284 if (!ipa_endpoint_aggr_active(endpoint)) 1285 break; 1286 msleep(1); 1287 } while (retries--); 1288 1289 /* Check one last time */ 1290 if (ipa_endpoint_aggr_active(endpoint)) 1291 dev_err(dev, "endpoint %u still active during reset\n", 1292 endpoint->endpoint_id); 1293 1294 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1295 1296 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1297 if (ret) 1298 goto out_suspend_again; 1299 1300 /* Finally, reset and reconfigure the channel again (re-enabling the 1301 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1302 * complete the channel reset sequence. Finish by suspending the 1303 * channel again (if necessary). 1304 */ 1305 legacy = ipa->version == IPA_VERSION_3_5_1; 1306 gsi_channel_reset(gsi, endpoint->channel_id, legacy); 1307 1308 msleep(1); 1309 1310 goto out_suspend_again; 1311 1312 err_endpoint_stop: 1313 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1314 out_suspend_again: 1315 if (suspended) 1316 (void)ipa_endpoint_program_suspend(endpoint, true); 1317 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1318 out_kfree: 1319 kfree(virt); 1320 1321 return ret; 1322 } 1323 1324 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1325 { 1326 u32 channel_id = endpoint->channel_id; 1327 struct ipa *ipa = endpoint->ipa; 1328 bool special; 1329 bool legacy; 1330 int ret = 0; 1331 1332 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1333 * is active, we need to handle things specially to recover. 1334 * All other cases just need to reset the underlying GSI channel. 1335 * 1336 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. 1337 */ 1338 legacy = ipa->version == IPA_VERSION_3_5_1; 1339 special = !endpoint->toward_ipa && endpoint->data->aggregation; 1340 if (special && ipa_endpoint_aggr_active(endpoint)) 1341 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1342 else 1343 gsi_channel_reset(&ipa->gsi, channel_id, legacy); 1344 1345 if (ret) 1346 dev_err(&ipa->pdev->dev, 1347 "error %d resetting channel %u for endpoint %u\n", 1348 ret, endpoint->channel_id, endpoint->endpoint_id); 1349 } 1350 1351 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1352 { 1353 if (endpoint->toward_ipa) 1354 ipa_endpoint_program_delay(endpoint, false); 1355 else 1356 (void)ipa_endpoint_program_suspend(endpoint, false); 1357 ipa_endpoint_init_cfg(endpoint); 1358 ipa_endpoint_init_hdr(endpoint); 1359 ipa_endpoint_init_hdr_ext(endpoint); 1360 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1361 ipa_endpoint_init_mode(endpoint); 1362 ipa_endpoint_init_aggr(endpoint); 1363 ipa_endpoint_init_deaggr(endpoint); 1364 ipa_endpoint_init_seq(endpoint); 1365 ipa_endpoint_status(endpoint); 1366 } 1367 1368 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1369 { 1370 struct ipa *ipa = endpoint->ipa; 1371 struct gsi *gsi = &ipa->gsi; 1372 int ret; 1373 1374 ret = gsi_channel_start(gsi, endpoint->channel_id); 1375 if (ret) { 1376 dev_err(&ipa->pdev->dev, 1377 "error %d starting %cX channel %u for endpoint %u\n", 1378 ret, endpoint->toward_ipa ? 'T' : 'R', 1379 endpoint->channel_id, endpoint->endpoint_id); 1380 return ret; 1381 } 1382 1383 if (!endpoint->toward_ipa) { 1384 ipa_interrupt_suspend_enable(ipa->interrupt, 1385 endpoint->endpoint_id); 1386 ipa_endpoint_replenish_enable(endpoint); 1387 } 1388 1389 ipa->enabled |= BIT(endpoint->endpoint_id); 1390 1391 return 0; 1392 } 1393 1394 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1395 { 1396 u32 mask = BIT(endpoint->endpoint_id); 1397 struct ipa *ipa = endpoint->ipa; 1398 struct gsi *gsi = &ipa->gsi; 1399 int ret; 1400 1401 if (!(ipa->enabled & mask)) 1402 return; 1403 1404 ipa->enabled ^= mask; 1405 1406 if (!endpoint->toward_ipa) { 1407 ipa_endpoint_replenish_disable(endpoint); 1408 ipa_interrupt_suspend_disable(ipa->interrupt, 1409 endpoint->endpoint_id); 1410 } 1411 1412 /* Note that if stop fails, the channel's state is not well-defined */ 1413 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1414 if (ret) 1415 dev_err(&ipa->pdev->dev, 1416 "error %d attempting to stop endpoint %u\n", ret, 1417 endpoint->endpoint_id); 1418 } 1419 1420 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1421 { 1422 struct device *dev = &endpoint->ipa->pdev->dev; 1423 struct gsi *gsi = &endpoint->ipa->gsi; 1424 bool stop_channel; 1425 int ret; 1426 1427 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1428 return; 1429 1430 if (!endpoint->toward_ipa) 1431 ipa_endpoint_replenish_disable(endpoint); 1432 1433 if (!endpoint->toward_ipa) 1434 (void)ipa_endpoint_program_suspend(endpoint, true); 1435 1436 /* IPA v3.5.1 doesn't use channel stop for suspend */ 1437 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1438 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1439 if (ret) 1440 dev_err(dev, "error %d suspending channel %u\n", ret, 1441 endpoint->channel_id); 1442 } 1443 1444 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1445 { 1446 struct device *dev = &endpoint->ipa->pdev->dev; 1447 struct gsi *gsi = &endpoint->ipa->gsi; 1448 bool start_channel; 1449 int ret; 1450 1451 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1452 return; 1453 1454 if (!endpoint->toward_ipa) 1455 (void)ipa_endpoint_program_suspend(endpoint, false); 1456 1457 /* IPA v3.5.1 doesn't use channel start for resume */ 1458 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 1459 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1460 if (ret) 1461 dev_err(dev, "error %d resuming channel %u\n", ret, 1462 endpoint->channel_id); 1463 else if (!endpoint->toward_ipa) 1464 ipa_endpoint_replenish_enable(endpoint); 1465 } 1466 1467 void ipa_endpoint_suspend(struct ipa *ipa) 1468 { 1469 if (ipa->modem_netdev) 1470 ipa_modem_suspend(ipa->modem_netdev); 1471 1472 ipa_cmd_tag_process(ipa); 1473 1474 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1475 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1476 } 1477 1478 void ipa_endpoint_resume(struct ipa *ipa) 1479 { 1480 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1481 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1482 1483 if (ipa->modem_netdev) 1484 ipa_modem_resume(ipa->modem_netdev); 1485 } 1486 1487 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1488 { 1489 struct gsi *gsi = &endpoint->ipa->gsi; 1490 u32 channel_id = endpoint->channel_id; 1491 1492 /* Only AP endpoints get set up */ 1493 if (endpoint->ee_id != GSI_EE_AP) 1494 return; 1495 1496 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1497 if (!endpoint->toward_ipa) { 1498 /* RX transactions require a single TRE, so the maximum 1499 * backlog is the same as the maximum outstanding TREs. 1500 */ 1501 endpoint->replenish_enabled = false; 1502 atomic_set(&endpoint->replenish_saved, 1503 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1504 atomic_set(&endpoint->replenish_backlog, 0); 1505 INIT_DELAYED_WORK(&endpoint->replenish_work, 1506 ipa_endpoint_replenish_work); 1507 } 1508 1509 ipa_endpoint_program(endpoint); 1510 1511 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1512 } 1513 1514 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1515 { 1516 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1517 1518 if (!endpoint->toward_ipa) 1519 cancel_delayed_work_sync(&endpoint->replenish_work); 1520 1521 ipa_endpoint_reset(endpoint); 1522 } 1523 1524 void ipa_endpoint_setup(struct ipa *ipa) 1525 { 1526 u32 initialized = ipa->initialized; 1527 1528 ipa->set_up = 0; 1529 while (initialized) { 1530 u32 endpoint_id = __ffs(initialized); 1531 1532 initialized ^= BIT(endpoint_id); 1533 1534 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1535 } 1536 } 1537 1538 void ipa_endpoint_teardown(struct ipa *ipa) 1539 { 1540 u32 set_up = ipa->set_up; 1541 1542 while (set_up) { 1543 u32 endpoint_id = __fls(set_up); 1544 1545 set_up ^= BIT(endpoint_id); 1546 1547 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1548 } 1549 ipa->set_up = 0; 1550 } 1551 1552 int ipa_endpoint_config(struct ipa *ipa) 1553 { 1554 struct device *dev = &ipa->pdev->dev; 1555 u32 initialized; 1556 u32 rx_base; 1557 u32 rx_mask; 1558 u32 tx_mask; 1559 int ret = 0; 1560 u32 max; 1561 u32 val; 1562 1563 /* Find out about the endpoints supplied by the hardware, and ensure 1564 * the highest one doesn't exceed the number we support. 1565 */ 1566 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1567 1568 /* Our RX is an IPA producer */ 1569 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); 1570 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); 1571 if (max > IPA_ENDPOINT_MAX) { 1572 dev_err(dev, "too many endpoints (%u > %u)\n", 1573 max, IPA_ENDPOINT_MAX); 1574 return -EINVAL; 1575 } 1576 rx_mask = GENMASK(max - 1, rx_base); 1577 1578 /* Our TX is an IPA consumer */ 1579 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); 1580 tx_mask = GENMASK(max - 1, 0); 1581 1582 ipa->available = rx_mask | tx_mask; 1583 1584 /* Check for initialized endpoints not supported by the hardware */ 1585 if (ipa->initialized & ~ipa->available) { 1586 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1587 ipa->initialized & ~ipa->available); 1588 ret = -EINVAL; /* Report other errors too */ 1589 } 1590 1591 initialized = ipa->initialized; 1592 while (initialized) { 1593 u32 endpoint_id = __ffs(initialized); 1594 struct ipa_endpoint *endpoint; 1595 1596 initialized ^= BIT(endpoint_id); 1597 1598 /* Make sure it's pointing in the right direction */ 1599 endpoint = &ipa->endpoint[endpoint_id]; 1600 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 1601 dev_err(dev, "endpoint id %u wrong direction\n", 1602 endpoint_id); 1603 ret = -EINVAL; 1604 } 1605 } 1606 1607 return ret; 1608 } 1609 1610 void ipa_endpoint_deconfig(struct ipa *ipa) 1611 { 1612 ipa->available = 0; /* Nothing more to do */ 1613 } 1614 1615 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1616 const struct ipa_gsi_endpoint_data *data) 1617 { 1618 struct ipa_endpoint *endpoint; 1619 1620 endpoint = &ipa->endpoint[data->endpoint_id]; 1621 1622 if (data->ee_id == GSI_EE_AP) 1623 ipa->channel_map[data->channel_id] = endpoint; 1624 ipa->name_map[name] = endpoint; 1625 1626 endpoint->ipa = ipa; 1627 endpoint->ee_id = data->ee_id; 1628 endpoint->seq_type = data->endpoint.seq_type; 1629 endpoint->channel_id = data->channel_id; 1630 endpoint->endpoint_id = data->endpoint_id; 1631 endpoint->toward_ipa = data->toward_ipa; 1632 endpoint->data = &data->endpoint.config; 1633 1634 ipa->initialized |= BIT(endpoint->endpoint_id); 1635 } 1636 1637 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1638 { 1639 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1640 1641 memset(endpoint, 0, sizeof(*endpoint)); 1642 } 1643 1644 void ipa_endpoint_exit(struct ipa *ipa) 1645 { 1646 u32 initialized = ipa->initialized; 1647 1648 while (initialized) { 1649 u32 endpoint_id = __fls(initialized); 1650 1651 initialized ^= BIT(endpoint_id); 1652 1653 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1654 } 1655 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1656 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1657 } 1658 1659 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1660 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1661 const struct ipa_gsi_endpoint_data *data) 1662 { 1663 enum ipa_endpoint_name name; 1664 u32 filter_map; 1665 1666 if (!ipa_endpoint_data_valid(ipa, count, data)) 1667 return 0; /* Error */ 1668 1669 ipa->initialized = 0; 1670 1671 filter_map = 0; 1672 for (name = 0; name < count; name++, data++) { 1673 if (ipa_gsi_endpoint_data_empty(data)) 1674 continue; /* Skip over empty slots */ 1675 1676 ipa_endpoint_init_one(ipa, name, data); 1677 1678 if (data->endpoint.filter_support) 1679 filter_map |= BIT(data->endpoint_id); 1680 } 1681 1682 if (!ipa_filter_map_valid(ipa, filter_map)) 1683 goto err_endpoint_exit; 1684 1685 return filter_map; /* Non-zero bitmask */ 1686 1687 err_endpoint_exit: 1688 ipa_endpoint_exit(ipa); 1689 1690 return 0; /* Error */ 1691 } 1692