1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2019-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/device.h> 9 #include <linux/slab.h> 10 #include <linux/bitfield.h> 11 #include <linux/if_rmnet.h> 12 #include <linux/dma-direction.h> 13 14 #include "gsi.h" 15 #include "gsi_trans.h" 16 #include "ipa.h" 17 #include "ipa_data.h" 18 #include "ipa_endpoint.h" 19 #include "ipa_cmd.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_table.h" 23 #include "ipa_gsi.h" 24 #include "ipa_clock.h" 25 26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 27 28 #define IPA_REPLENISH_BATCH 16 29 30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 32 33 /* The amount of RX buffer space consumed by standard skb overhead */ 34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 35 36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ 37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ 38 39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ 41 42 /** enum ipa_status_opcode - status element opcode hardware values */ 43 enum ipa_status_opcode { 44 IPA_STATUS_OPCODE_PACKET = 0x01, 45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 48 }; 49 50 /** enum ipa_status_exception - status element exception type */ 51 enum ipa_status_exception { 52 /* 0 means no exception */ 53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 54 }; 55 56 /* Status element provided by hardware */ 57 struct ipa_status { 58 u8 opcode; /* enum ipa_status_opcode */ 59 u8 exception; /* enum ipa_status_exception */ 60 __le16 mask; 61 __le16 pkt_len; 62 u8 endp_src_idx; 63 u8 endp_dst_idx; 64 __le32 metadata; 65 __le32 flags1; 66 __le64 flags2; 67 __le32 flags3; 68 __le32 flags4; 69 }; 70 71 /* Field masks for struct ipa_status structure fields */ 72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) 73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) 77 78 #ifdef IPA_VALIDATE 79 80 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 81 const struct ipa_gsi_endpoint_data *all_data, 82 const struct ipa_gsi_endpoint_data *data) 83 { 84 const struct ipa_gsi_endpoint_data *other_data; 85 struct device *dev = &ipa->pdev->dev; 86 enum ipa_endpoint_name other_name; 87 88 if (ipa_gsi_endpoint_data_empty(data)) 89 return true; 90 91 if (!data->toward_ipa) { 92 if (data->endpoint.filter_support) { 93 dev_err(dev, "filtering not supported for " 94 "RX endpoint %u\n", 95 data->endpoint_id); 96 return false; 97 } 98 99 return true; /* Nothing more to check for RX */ 100 } 101 102 if (data->endpoint.config.status_enable) { 103 other_name = data->endpoint.config.tx.status_endpoint; 104 if (other_name >= count) { 105 dev_err(dev, "status endpoint name %u out of range " 106 "for endpoint %u\n", 107 other_name, data->endpoint_id); 108 return false; 109 } 110 111 /* Status endpoint must be defined... */ 112 other_data = &all_data[other_name]; 113 if (ipa_gsi_endpoint_data_empty(other_data)) { 114 dev_err(dev, "DMA endpoint name %u undefined " 115 "for endpoint %u\n", 116 other_name, data->endpoint_id); 117 return false; 118 } 119 120 /* ...and has to be an RX endpoint... */ 121 if (other_data->toward_ipa) { 122 dev_err(dev, 123 "status endpoint for endpoint %u not RX\n", 124 data->endpoint_id); 125 return false; 126 } 127 128 /* ...and if it's to be an AP endpoint... */ 129 if (other_data->ee_id == GSI_EE_AP) { 130 /* ...make sure it has status enabled. */ 131 if (!other_data->endpoint.config.status_enable) { 132 dev_err(dev, 133 "status not enabled for endpoint %u\n", 134 other_data->endpoint_id); 135 return false; 136 } 137 } 138 } 139 140 if (data->endpoint.config.dma_mode) { 141 other_name = data->endpoint.config.dma_endpoint; 142 if (other_name >= count) { 143 dev_err(dev, "DMA endpoint name %u out of range " 144 "for endpoint %u\n", 145 other_name, data->endpoint_id); 146 return false; 147 } 148 149 other_data = &all_data[other_name]; 150 if (ipa_gsi_endpoint_data_empty(other_data)) { 151 dev_err(dev, "DMA endpoint name %u undefined " 152 "for endpoint %u\n", 153 other_name, data->endpoint_id); 154 return false; 155 } 156 } 157 158 return true; 159 } 160 161 static u32 aggr_byte_limit_max(enum ipa_version version) 162 { 163 if (version < IPA_VERSION_4_5) 164 return field_max(aggr_byte_limit_fmask(true)); 165 166 return field_max(aggr_byte_limit_fmask(false)); 167 } 168 169 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 170 const struct ipa_gsi_endpoint_data *data) 171 { 172 const struct ipa_gsi_endpoint_data *dp = data; 173 struct device *dev = &ipa->pdev->dev; 174 enum ipa_endpoint_name name; 175 u32 limit; 176 177 if (count > IPA_ENDPOINT_COUNT) { 178 dev_err(dev, "too many endpoints specified (%u > %u)\n", 179 count, IPA_ENDPOINT_COUNT); 180 return false; 181 } 182 183 /* The aggregation byte limit defines the point at which an 184 * aggregation window will close. It is programmed into the 185 * IPA hardware as a number of KB. We don't use "hard byte 186 * limit" aggregation, which means that we need to supply 187 * enough space in a receive buffer to hold a complete MTU 188 * plus normal skb overhead *after* that aggregation byte 189 * limit has been crossed. 190 * 191 * This check ensures we don't define a receive buffer size 192 * that would exceed what we can represent in the field that 193 * is used to program its size. 194 */ 195 limit = aggr_byte_limit_max(ipa->version) * SZ_1K; 196 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 197 if (limit < IPA_RX_BUFFER_SIZE) { 198 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", 199 IPA_RX_BUFFER_SIZE, limit); 200 return false; 201 } 202 203 /* Make sure needed endpoints have defined data */ 204 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 205 dev_err(dev, "command TX endpoint not defined\n"); 206 return false; 207 } 208 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 209 dev_err(dev, "LAN RX endpoint not defined\n"); 210 return false; 211 } 212 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 213 dev_err(dev, "AP->modem TX endpoint not defined\n"); 214 return false; 215 } 216 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 217 dev_err(dev, "AP<-modem RX endpoint not defined\n"); 218 return false; 219 } 220 221 for (name = 0; name < count; name++, dp++) 222 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 223 return false; 224 225 return true; 226 } 227 228 #else /* !IPA_VALIDATE */ 229 230 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 231 const struct ipa_gsi_endpoint_data *data) 232 { 233 return true; 234 } 235 236 #endif /* !IPA_VALIDATE */ 237 238 /* Allocate a transaction to use on a non-command endpoint */ 239 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 240 u32 tre_count) 241 { 242 struct gsi *gsi = &endpoint->ipa->gsi; 243 u32 channel_id = endpoint->channel_id; 244 enum dma_data_direction direction; 245 246 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 247 248 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 249 } 250 251 /* suspend_delay represents suspend for RX, delay for TX endpoints. 252 * Note that suspend is not supported starting with IPA v4.0. 253 */ 254 static bool 255 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 256 { 257 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 258 struct ipa *ipa = endpoint->ipa; 259 bool state; 260 u32 mask; 261 u32 val; 262 263 /* Suspend is not supported for IPA v4.0+. Delay doesn't work 264 * correctly on IPA v4.2. 265 * 266 * if (endpoint->toward_ipa) 267 * assert(ipa->version != IPA_VERSION_4.2); 268 * else 269 * assert(ipa->version < IPA_VERSION_4_0); 270 */ 271 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 272 273 val = ioread32(ipa->reg_virt + offset); 274 /* Don't bother if it's already in the requested state */ 275 state = !!(val & mask); 276 if (suspend_delay != state) { 277 val ^= mask; 278 iowrite32(val, ipa->reg_virt + offset); 279 } 280 281 return state; 282 } 283 284 /* We currently don't care what the previous state was for delay mode */ 285 static void 286 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) 287 { 288 /* assert(endpoint->toward_ipa); */ 289 290 /* Delay mode doesn't work properly for IPA v4.2 */ 291 if (endpoint->ipa->version != IPA_VERSION_4_2) 292 (void)ipa_endpoint_init_ctrl(endpoint, enable); 293 } 294 295 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 296 { 297 u32 mask = BIT(endpoint->endpoint_id); 298 struct ipa *ipa = endpoint->ipa; 299 u32 offset; 300 u32 val; 301 302 /* assert(mask & ipa->available); */ 303 offset = ipa_reg_state_aggr_active_offset(ipa->version); 304 val = ioread32(ipa->reg_virt + offset); 305 306 return !!(val & mask); 307 } 308 309 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 310 { 311 u32 mask = BIT(endpoint->endpoint_id); 312 struct ipa *ipa = endpoint->ipa; 313 314 /* assert(mask & ipa->available); */ 315 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 316 } 317 318 /** 319 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 320 * @endpoint: Endpoint on which to emulate a suspend 321 * 322 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 323 * with an open aggregation frame. This is to work around a hardware 324 * issue in IPA version 3.5.1 where the suspend interrupt will not be 325 * generated when it should be. 326 */ 327 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 328 { 329 struct ipa *ipa = endpoint->ipa; 330 331 if (!endpoint->data->aggregation) 332 return; 333 334 /* Nothing to do if the endpoint doesn't have aggregation open */ 335 if (!ipa_endpoint_aggr_active(endpoint)) 336 return; 337 338 /* Force close aggregation */ 339 ipa_endpoint_force_close(endpoint); 340 341 ipa_interrupt_simulate_suspend(ipa->interrupt); 342 } 343 344 /* Returns previous suspend state (true means suspend was enabled) */ 345 static bool 346 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) 347 { 348 bool suspended; 349 350 if (endpoint->ipa->version >= IPA_VERSION_4_0) 351 return enable; /* For IPA v4.0+, no change made */ 352 353 /* assert(!endpoint->toward_ipa); */ 354 355 suspended = ipa_endpoint_init_ctrl(endpoint, enable); 356 357 /* A client suspended with an open aggregation frame will not 358 * generate a SUSPEND IPA interrupt. If enabling suspend, have 359 * ipa_endpoint_suspend_aggr() handle this. 360 */ 361 if (enable && !suspended) 362 ipa_endpoint_suspend_aggr(endpoint); 363 364 return suspended; 365 } 366 367 /* Enable or disable delay or suspend mode on all modem endpoints */ 368 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 369 { 370 u32 endpoint_id; 371 372 /* DELAY mode doesn't work correctly on IPA v4.2 */ 373 if (ipa->version == IPA_VERSION_4_2) 374 return; 375 376 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 377 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 378 379 if (endpoint->ee_id != GSI_EE_MODEM) 380 continue; 381 382 /* Set TX delay mode or RX suspend mode */ 383 if (endpoint->toward_ipa) 384 ipa_endpoint_program_delay(endpoint, enable); 385 else 386 (void)ipa_endpoint_program_suspend(endpoint, enable); 387 } 388 } 389 390 /* Reset all modem endpoints to use the default exception endpoint */ 391 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 392 { 393 u32 initialized = ipa->initialized; 394 struct gsi_trans *trans; 395 u32 count; 396 397 /* We need one command per modem TX endpoint. We can get an upper 398 * bound on that by assuming all initialized endpoints are modem->IPA. 399 * That won't happen, and we could be more precise, but this is fine 400 * for now. End the transaction with commands to clear the pipeline. 401 */ 402 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); 403 trans = ipa_cmd_trans_alloc(ipa, count); 404 if (!trans) { 405 dev_err(&ipa->pdev->dev, 406 "no transaction to reset modem exception endpoints\n"); 407 return -EBUSY; 408 } 409 410 while (initialized) { 411 u32 endpoint_id = __ffs(initialized); 412 struct ipa_endpoint *endpoint; 413 u32 offset; 414 415 initialized ^= BIT(endpoint_id); 416 417 /* We only reset modem TX endpoints */ 418 endpoint = &ipa->endpoint[endpoint_id]; 419 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 420 continue; 421 422 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 423 424 /* Value written is 0, and all bits are updated. That 425 * means status is disabled on the endpoint, and as a 426 * result all other fields in the register are ignored. 427 */ 428 ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 429 } 430 431 ipa_cmd_pipeline_clear_add(trans); 432 433 /* XXX This should have a 1 second timeout */ 434 gsi_trans_commit_wait(trans); 435 436 ipa_cmd_pipeline_clear_wait(ipa); 437 438 return 0; 439 } 440 441 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 442 { 443 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 444 enum ipa_cs_offload_en enabled; 445 u32 val = 0; 446 447 /* FRAG_OFFLOAD_EN is 0 */ 448 if (endpoint->data->checksum) { 449 enum ipa_version version = endpoint->ipa->version; 450 451 if (endpoint->toward_ipa) { 452 u32 checksum_offset; 453 454 /* Checksum header offset is in 4-byte units */ 455 checksum_offset = sizeof(struct rmnet_map_header); 456 checksum_offset /= sizeof(u32); 457 val |= u32_encode_bits(checksum_offset, 458 CS_METADATA_HDR_OFFSET_FMASK); 459 460 enabled = version < IPA_VERSION_4_5 461 ? IPA_CS_OFFLOAD_UL 462 : IPA_CS_OFFLOAD_INLINE; 463 } else { 464 enabled = version < IPA_VERSION_4_5 465 ? IPA_CS_OFFLOAD_DL 466 : IPA_CS_OFFLOAD_INLINE; 467 } 468 } else { 469 enabled = IPA_CS_OFFLOAD_NONE; 470 } 471 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK); 472 /* CS_GEN_QMB_MASTER_SEL is 0 */ 473 474 iowrite32(val, endpoint->ipa->reg_virt + offset); 475 } 476 477 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) 478 { 479 u32 offset; 480 u32 val; 481 482 if (!endpoint->toward_ipa) 483 return; 484 485 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); 486 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); 487 488 iowrite32(val, endpoint->ipa->reg_virt + offset); 489 } 490 491 static u32 492 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) 493 { 494 u32 header_size = sizeof(struct rmnet_map_header); 495 496 /* Without checksum offload, we just have the MAP header */ 497 if (!endpoint->data->checksum) 498 return header_size; 499 500 if (version < IPA_VERSION_4_5) { 501 /* Checksum header inserted for AP TX endpoints only */ 502 if (endpoint->toward_ipa) 503 header_size += sizeof(struct rmnet_map_ul_csum_header); 504 } else { 505 /* Checksum header is used in both directions */ 506 header_size += sizeof(struct rmnet_map_v5_csum_header); 507 } 508 509 return header_size; 510 } 511 512 /** 513 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register 514 * @endpoint: Endpoint pointer 515 * 516 * We program QMAP endpoints so each packet received is preceded by a QMAP 517 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte 518 * packet size field, and we have the IPA hardware populate both for each 519 * received packet. The header is configured (in the HDR_EXT register) 520 * to use big endian format. 521 * 522 * The packet size is written into the QMAP header's pkt_len field. That 523 * location is defined here using the HDR_OFST_PKT_SIZE field. 524 * 525 * The mux_id comes from a 4-byte metadata value supplied with each packet 526 * by the modem. It is *not* a QMAP header, but it does contain the mux_id 527 * value that we want, in its low-order byte. A bitmask defined in the 528 * endpoint's METADATA_MASK register defines which byte within the modem 529 * metadata contains the mux_id. And the OFST_METADATA field programmed 530 * here indicates where the extracted byte should be placed within the QMAP 531 * header. 532 */ 533 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 534 { 535 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 536 struct ipa *ipa = endpoint->ipa; 537 u32 val = 0; 538 539 if (endpoint->data->qmap) { 540 enum ipa_version version = ipa->version; 541 size_t header_size; 542 543 header_size = ipa_qmap_header_size(version, endpoint); 544 val = ipa_header_size_encoded(version, header_size); 545 546 /* Define how to fill fields in a received QMAP header */ 547 if (!endpoint->toward_ipa) { 548 u32 offset; /* Field offset within header */ 549 550 /* Where IPA will write the metadata value */ 551 offset = offsetof(struct rmnet_map_header, mux_id); 552 val |= ipa_metadata_offset_encoded(version, offset); 553 554 /* Where IPA will write the length */ 555 offset = offsetof(struct rmnet_map_header, pkt_len); 556 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ 557 if (version >= IPA_VERSION_4_5) 558 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); 559 560 val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 561 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); 562 } 563 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ 564 val |= HDR_OFST_METADATA_VALID_FMASK; 565 566 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ 567 /* HDR_A5_MUX is 0 */ 568 /* HDR_LEN_INC_DEAGG_HDR is 0 */ 569 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 570 } 571 572 iowrite32(val, ipa->reg_virt + offset); 573 } 574 575 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 576 { 577 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 578 u32 pad_align = endpoint->data->rx.pad_align; 579 struct ipa *ipa = endpoint->ipa; 580 u32 val = 0; 581 582 val |= HDR_ENDIANNESS_FMASK; /* big endian */ 583 584 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet 585 * driver assumes this field is meaningful in packets it receives, 586 * and assumes the header's payload length includes that padding. 587 * The RMNet driver does *not* pad packets it sends, however, so 588 * the pad field (although 0) should be ignored. 589 */ 590 if (endpoint->data->qmap && !endpoint->toward_ipa) { 591 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 592 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 593 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; 594 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 595 } 596 597 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 598 if (!endpoint->toward_ipa) 599 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 600 601 /* IPA v4.5 adds some most-significant bits to a few fields, 602 * two of which are defined in the HDR (not HDR_EXT) register. 603 */ 604 if (ipa->version >= IPA_VERSION_4_5) { 605 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ 606 if (endpoint->data->qmap && !endpoint->toward_ipa) { 607 u32 offset; 608 609 offset = offsetof(struct rmnet_map_header, pkt_len); 610 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); 611 val |= u32_encode_bits(offset, 612 HDR_OFST_PKT_SIZE_MSB_FMASK); 613 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ 614 } 615 } 616 iowrite32(val, ipa->reg_virt + offset); 617 } 618 619 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 620 { 621 u32 endpoint_id = endpoint->endpoint_id; 622 u32 val = 0; 623 u32 offset; 624 625 if (endpoint->toward_ipa) 626 return; /* Register not valid for TX endpoints */ 627 628 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 629 630 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 631 if (endpoint->data->qmap) 632 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); 633 634 iowrite32(val, endpoint->ipa->reg_virt + offset); 635 } 636 637 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 638 { 639 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 640 u32 val; 641 642 if (!endpoint->toward_ipa) 643 return; /* Register not valid for RX endpoints */ 644 645 if (endpoint->data->dma_mode) { 646 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 647 u32 dma_endpoint_id; 648 649 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 650 651 val = u32_encode_bits(IPA_DMA, MODE_FMASK); 652 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 653 } else { 654 val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 655 } 656 /* All other bits unspecified (and 0) */ 657 658 iowrite32(val, endpoint->ipa->reg_virt + offset); 659 } 660 661 /* Compute the aggregation size value to use for a given buffer size */ 662 static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 663 { 664 /* We don't use "hard byte limit" aggregation, so we define the 665 * aggregation limit such that our buffer has enough space *after* 666 * that limit to receive a full MTU of data, plus overhead. 667 */ 668 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 669 670 return rx_buffer_size / SZ_1K; 671 } 672 673 /* Encoded values for AGGR endpoint register fields */ 674 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) 675 { 676 if (version < IPA_VERSION_4_5) 677 return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); 678 679 return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); 680 } 681 682 /* Encode the aggregation timer limit (microseconds) based on IPA version */ 683 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) 684 { 685 u32 gran_sel; 686 u32 fmask; 687 u32 val; 688 689 if (version < IPA_VERSION_4_5) { 690 /* We set aggregation granularity in ipa_hardware_config() */ 691 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); 692 693 return u32_encode_bits(limit, aggr_time_limit_fmask(true)); 694 } 695 696 /* IPA v4.5 expresses the time limit using Qtime. The AP has 697 * pulse generators 0 and 1 available, which were configured 698 * in ipa_qtime_config() to have granularity 100 usec and 699 * 1 msec, respectively. Use pulse generator 0 if possible, 700 * otherwise fall back to pulse generator 1. 701 */ 702 fmask = aggr_time_limit_fmask(false); 703 val = DIV_ROUND_CLOSEST(limit, 100); 704 if (val > field_max(fmask)) { 705 /* Have to use pulse generator 1 (millisecond granularity) */ 706 gran_sel = AGGR_GRAN_SEL_FMASK; 707 val = DIV_ROUND_CLOSEST(limit, 1000); 708 } else { 709 /* We can use pulse generator 0 (100 usec granularity) */ 710 gran_sel = 0; 711 } 712 713 return gran_sel | u32_encode_bits(val, fmask); 714 } 715 716 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) 717 { 718 u32 val = enabled ? 1 : 0; 719 720 if (version < IPA_VERSION_4_5) 721 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); 722 723 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); 724 } 725 726 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 727 { 728 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 729 enum ipa_version version = endpoint->ipa->version; 730 u32 val = 0; 731 732 if (endpoint->data->aggregation) { 733 if (!endpoint->toward_ipa) { 734 bool close_eof; 735 u32 limit; 736 737 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 738 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 739 740 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 741 val |= aggr_byte_limit_encoded(version, limit); 742 743 limit = IPA_AGGR_TIME_LIMIT; 744 val |= aggr_time_limit_encoded(version, limit); 745 746 /* AGGR_PKT_LIMIT is 0 (unlimited) */ 747 748 close_eof = endpoint->data->rx.aggr_close_eof; 749 val |= aggr_sw_eof_active_encoded(version, close_eof); 750 751 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 752 } else { 753 val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 754 AGGR_EN_FMASK); 755 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 756 /* other fields ignored */ 757 } 758 /* AGGR_FORCE_CLOSE is 0 */ 759 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ 760 } else { 761 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 762 /* other fields ignored */ 763 } 764 765 iowrite32(val, endpoint->ipa->reg_virt + offset); 766 } 767 768 /* Return the Qtime-based head-of-line blocking timer value that 769 * represents the given number of microseconds. The result 770 * includes both the timer value and the selected timer granularity. 771 */ 772 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) 773 { 774 u32 gran_sel; 775 u32 val; 776 777 /* IPA v4.5 expresses time limits using Qtime. The AP has 778 * pulse generators 0 and 1 available, which were configured 779 * in ipa_qtime_config() to have granularity 100 usec and 780 * 1 msec, respectively. Use pulse generator 0 if possible, 781 * otherwise fall back to pulse generator 1. 782 */ 783 val = DIV_ROUND_CLOSEST(microseconds, 100); 784 if (val > field_max(TIME_LIMIT_FMASK)) { 785 /* Have to use pulse generator 1 (millisecond granularity) */ 786 gran_sel = GRAN_SEL_FMASK; 787 val = DIV_ROUND_CLOSEST(microseconds, 1000); 788 } else { 789 /* We can use pulse generator 0 (100 usec granularity) */ 790 gran_sel = 0; 791 } 792 793 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); 794 } 795 796 /* The head-of-line blocking timer is defined as a tick count. For 797 * IPA version 4.5 the tick count is based on the Qtimer, which is 798 * derived from the 19.2 MHz SoC XO clock. For older IPA versions 799 * each tick represents 128 cycles of the IPA core clock. 800 * 801 * Return the encoded value that should be written to that register 802 * that represents the timeout period provided. For IPA v4.2 this 803 * encodes a base and scale value, while for earlier versions the 804 * value is a simple tick count. 805 */ 806 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) 807 { 808 u32 width; 809 u32 scale; 810 u64 ticks; 811 u64 rate; 812 u32 high; 813 u32 val; 814 815 if (!microseconds) 816 return 0; /* Nothing to compute if timer period is 0 */ 817 818 if (ipa->version >= IPA_VERSION_4_5) 819 return hol_block_timer_qtime_val(ipa, microseconds); 820 821 /* Use 64 bit arithmetic to avoid overflow... */ 822 rate = ipa_clock_rate(ipa); 823 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); 824 /* ...but we still need to fit into a 32-bit register */ 825 WARN_ON(ticks > U32_MAX); 826 827 /* IPA v3.5.1 through v4.1 just record the tick count */ 828 if (ipa->version < IPA_VERSION_4_2) 829 return (u32)ticks; 830 831 /* For IPA v4.2, the tick count is represented by base and 832 * scale fields within the 32-bit timer register, where: 833 * ticks = base << scale; 834 * The best precision is achieved when the base value is as 835 * large as possible. Find the highest set bit in the tick 836 * count, and extract the number of bits in the base field 837 * such that high bit is included. 838 */ 839 high = fls(ticks); /* 1..32 */ 840 width = HWEIGHT32(BASE_VALUE_FMASK); 841 scale = high > width ? high - width : 0; 842 if (scale) { 843 /* If we're scaling, round up to get a closer result */ 844 ticks += 1 << (scale - 1); 845 /* High bit was set, so rounding might have affected it */ 846 if (fls(ticks) != high) 847 scale++; 848 } 849 850 val = u32_encode_bits(scale, SCALE_FMASK); 851 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); 852 853 return val; 854 } 855 856 /* If microseconds is 0, timeout is immediate */ 857 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 858 u32 microseconds) 859 { 860 u32 endpoint_id = endpoint->endpoint_id; 861 struct ipa *ipa = endpoint->ipa; 862 u32 offset; 863 u32 val; 864 865 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 866 val = hol_block_timer_val(ipa, microseconds); 867 iowrite32(val, ipa->reg_virt + offset); 868 } 869 870 static void 871 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 872 { 873 u32 endpoint_id = endpoint->endpoint_id; 874 u32 offset; 875 u32 val; 876 877 val = enable ? HOL_BLOCK_EN_FMASK : 0; 878 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 879 iowrite32(val, endpoint->ipa->reg_virt + offset); 880 } 881 882 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 883 { 884 u32 i; 885 886 for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 887 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 888 889 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) 890 continue; 891 892 ipa_endpoint_init_hol_block_timer(endpoint, 0); 893 ipa_endpoint_init_hol_block_enable(endpoint, true); 894 } 895 } 896 897 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 898 { 899 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 900 u32 val = 0; 901 902 if (!endpoint->toward_ipa) 903 return; /* Register not valid for RX endpoints */ 904 905 /* DEAGGR_HDR_LEN is 0 */ 906 /* PACKET_OFFSET_VALID is 0 */ 907 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 908 /* MAX_PACKET_LEN is 0 (not enforced) */ 909 910 iowrite32(val, endpoint->ipa->reg_virt + offset); 911 } 912 913 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) 914 { 915 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); 916 struct ipa *ipa = endpoint->ipa; 917 u32 val; 918 919 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); 920 iowrite32(val, ipa->reg_virt + offset); 921 } 922 923 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 924 { 925 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 926 u32 val = 0; 927 928 if (!endpoint->toward_ipa) 929 return; /* Register not valid for RX endpoints */ 930 931 /* Low-order byte configures primary packet processing */ 932 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); 933 934 /* Second byte configures replicated packet processing */ 935 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, 936 SEQ_REP_TYPE_FMASK); 937 938 iowrite32(val, endpoint->ipa->reg_virt + offset); 939 } 940 941 /** 942 * ipa_endpoint_skb_tx() - Transmit a socket buffer 943 * @endpoint: Endpoint pointer 944 * @skb: Socket buffer to send 945 * 946 * Returns: 0 if successful, or a negative error code 947 */ 948 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 949 { 950 struct gsi_trans *trans; 951 u32 nr_frags; 952 int ret; 953 954 /* Make sure source endpoint's TLV FIFO has enough entries to 955 * hold the linear portion of the skb and all its fragments. 956 * If not, see if we can linearize it before giving up. 957 */ 958 nr_frags = skb_shinfo(skb)->nr_frags; 959 if (1 + nr_frags > endpoint->trans_tre_max) { 960 if (skb_linearize(skb)) 961 return -E2BIG; 962 nr_frags = 0; 963 } 964 965 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 966 if (!trans) 967 return -EBUSY; 968 969 ret = gsi_trans_skb_add(trans, skb); 970 if (ret) 971 goto err_trans_free; 972 trans->data = skb; /* transaction owns skb now */ 973 974 gsi_trans_commit(trans, !netdev_xmit_more()); 975 976 return 0; 977 978 err_trans_free: 979 gsi_trans_free(trans); 980 981 return -ENOMEM; 982 } 983 984 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 985 { 986 u32 endpoint_id = endpoint->endpoint_id; 987 struct ipa *ipa = endpoint->ipa; 988 u32 val = 0; 989 u32 offset; 990 991 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 992 993 if (endpoint->data->status_enable) { 994 val |= STATUS_EN_FMASK; 995 if (endpoint->toward_ipa) { 996 enum ipa_endpoint_name name; 997 u32 status_endpoint_id; 998 999 name = endpoint->data->tx.status_endpoint; 1000 status_endpoint_id = ipa->name_map[name]->endpoint_id; 1001 1002 val |= u32_encode_bits(status_endpoint_id, 1003 STATUS_ENDP_FMASK); 1004 } 1005 /* STATUS_LOCATION is 0, meaning status element precedes 1006 * packet (not present for IPA v4.5) 1007 */ 1008 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ 1009 } 1010 1011 iowrite32(val, ipa->reg_virt + offset); 1012 } 1013 1014 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 1015 { 1016 struct gsi_trans *trans; 1017 bool doorbell = false; 1018 struct page *page; 1019 u32 offset; 1020 u32 len; 1021 int ret; 1022 1023 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 1024 if (!page) 1025 return -ENOMEM; 1026 1027 trans = ipa_endpoint_trans_alloc(endpoint, 1); 1028 if (!trans) 1029 goto err_free_pages; 1030 1031 /* Offset the buffer to make space for skb headroom */ 1032 offset = NET_SKB_PAD; 1033 len = IPA_RX_BUFFER_SIZE - offset; 1034 1035 ret = gsi_trans_page_add(trans, page, len, offset); 1036 if (ret) 1037 goto err_trans_free; 1038 trans->data = page; /* transaction owns page now */ 1039 1040 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 1041 doorbell = true; 1042 endpoint->replenish_ready = 0; 1043 } 1044 1045 gsi_trans_commit(trans, doorbell); 1046 1047 return 0; 1048 1049 err_trans_free: 1050 gsi_trans_free(trans); 1051 err_free_pages: 1052 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1053 1054 return -ENOMEM; 1055 } 1056 1057 /** 1058 * ipa_endpoint_replenish() - Replenish endpoint receive buffers 1059 * @endpoint: Endpoint to be replenished 1060 * @add_one: Whether this is replacing a just-consumed buffer 1061 * 1062 * The IPA hardware can hold a fixed number of receive buffers for an RX 1063 * endpoint, based on the number of entries in the underlying channel ring 1064 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many 1065 * more receive buffers can be supplied to the hardware. Replenishing for 1066 * an endpoint can be disabled, in which case requests to replenish a 1067 * buffer are "saved", and transferred to the backlog once it is re-enabled 1068 * again. 1069 */ 1070 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) 1071 { 1072 struct gsi *gsi; 1073 u32 backlog; 1074 1075 if (!endpoint->replenish_enabled) { 1076 if (add_one) 1077 atomic_inc(&endpoint->replenish_saved); 1078 return; 1079 } 1080 1081 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 1082 if (ipa_endpoint_replenish_one(endpoint)) 1083 goto try_again_later; 1084 if (add_one) 1085 atomic_inc(&endpoint->replenish_backlog); 1086 1087 return; 1088 1089 try_again_later: 1090 /* The last one didn't succeed, so fix the backlog */ 1091 backlog = atomic_inc_return(&endpoint->replenish_backlog); 1092 1093 if (add_one) 1094 atomic_inc(&endpoint->replenish_backlog); 1095 1096 /* Whenever a receive buffer transaction completes we'll try to 1097 * replenish again. It's unlikely, but if we fail to supply even 1098 * one buffer, nothing will trigger another replenish attempt. 1099 * Receive buffer transactions use one TRE, so schedule work to 1100 * try replenishing again if our backlog is *all* available TREs. 1101 */ 1102 gsi = &endpoint->ipa->gsi; 1103 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 1104 schedule_delayed_work(&endpoint->replenish_work, 1105 msecs_to_jiffies(1)); 1106 } 1107 1108 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 1109 { 1110 struct gsi *gsi = &endpoint->ipa->gsi; 1111 u32 max_backlog; 1112 u32 saved; 1113 1114 endpoint->replenish_enabled = true; 1115 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 1116 atomic_add(saved, &endpoint->replenish_backlog); 1117 1118 /* Start replenishing if hardware currently has no buffers */ 1119 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 1120 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 1121 ipa_endpoint_replenish(endpoint, false); 1122 } 1123 1124 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 1125 { 1126 u32 backlog; 1127 1128 endpoint->replenish_enabled = false; 1129 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 1130 atomic_add(backlog, &endpoint->replenish_saved); 1131 } 1132 1133 static void ipa_endpoint_replenish_work(struct work_struct *work) 1134 { 1135 struct delayed_work *dwork = to_delayed_work(work); 1136 struct ipa_endpoint *endpoint; 1137 1138 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 1139 1140 ipa_endpoint_replenish(endpoint, false); 1141 } 1142 1143 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 1144 void *data, u32 len, u32 extra) 1145 { 1146 struct sk_buff *skb; 1147 1148 skb = __dev_alloc_skb(len, GFP_ATOMIC); 1149 if (skb) { 1150 skb_put(skb, len); 1151 memcpy(skb->data, data, len); 1152 skb->truesize += extra; 1153 } 1154 1155 /* Now receive it, or drop it if there's no netdev */ 1156 if (endpoint->netdev) 1157 ipa_modem_skb_rx(endpoint->netdev, skb); 1158 else if (skb) 1159 dev_kfree_skb_any(skb); 1160 } 1161 1162 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 1163 struct page *page, u32 len) 1164 { 1165 struct sk_buff *skb; 1166 1167 /* Nothing to do if there's no netdev */ 1168 if (!endpoint->netdev) 1169 return false; 1170 1171 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 1172 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 1173 if (skb) { 1174 /* Reserve the headroom and account for the data */ 1175 skb_reserve(skb, NET_SKB_PAD); 1176 skb_put(skb, len); 1177 } 1178 1179 /* Receive the buffer (or record drop if unable to build it) */ 1180 ipa_modem_skb_rx(endpoint->netdev, skb); 1181 1182 return skb != NULL; 1183 } 1184 1185 /* The format of a packet status element is the same for several status 1186 * types (opcodes). Other types aren't currently supported. 1187 */ 1188 static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 1189 { 1190 switch (opcode) { 1191 case IPA_STATUS_OPCODE_PACKET: 1192 case IPA_STATUS_OPCODE_DROPPED_PACKET: 1193 case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 1194 case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 1195 return true; 1196 default: 1197 return false; 1198 } 1199 } 1200 1201 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 1202 const struct ipa_status *status) 1203 { 1204 u32 endpoint_id; 1205 1206 if (!ipa_status_format_packet(status->opcode)) 1207 return true; 1208 if (!status->pkt_len) 1209 return true; 1210 endpoint_id = u8_get_bits(status->endp_dst_idx, 1211 IPA_STATUS_DST_IDX_FMASK); 1212 if (endpoint_id != endpoint->endpoint_id) 1213 return true; 1214 1215 return false; /* Don't skip this packet, process it */ 1216 } 1217 1218 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, 1219 const struct ipa_status *status) 1220 { 1221 struct ipa_endpoint *command_endpoint; 1222 struct ipa *ipa = endpoint->ipa; 1223 u32 endpoint_id; 1224 1225 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) 1226 return false; /* No valid tag */ 1227 1228 /* The status contains a valid tag. We know the packet was sent to 1229 * this endpoint (already verified by ipa_endpoint_status_skip()). 1230 * If the packet came from the AP->command TX endpoint we know 1231 * this packet was sent as part of the pipeline clear process. 1232 */ 1233 endpoint_id = u8_get_bits(status->endp_src_idx, 1234 IPA_STATUS_SRC_IDX_FMASK); 1235 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 1236 if (endpoint_id == command_endpoint->endpoint_id) { 1237 complete(&ipa->completion); 1238 } else { 1239 dev_err(&ipa->pdev->dev, 1240 "unexpected tagged packet from endpoint %u\n", 1241 endpoint_id); 1242 } 1243 1244 return true; 1245 } 1246 1247 /* Return whether the status indicates the packet should be dropped */ 1248 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, 1249 const struct ipa_status *status) 1250 { 1251 u32 val; 1252 1253 /* If the status indicates a tagged transfer, we'll drop the packet */ 1254 if (ipa_endpoint_status_tag(endpoint, status)) 1255 return true; 1256 1257 /* Deaggregation exceptions we drop; all other types we consume */ 1258 if (status->exception) 1259 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 1260 1261 /* Drop the packet if it fails to match a routing rule; otherwise no */ 1262 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1263 1264 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 1265 } 1266 1267 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 1268 struct page *page, u32 total_len) 1269 { 1270 void *data = page_address(page) + NET_SKB_PAD; 1271 u32 unused = IPA_RX_BUFFER_SIZE - total_len; 1272 u32 resid = total_len; 1273 1274 while (resid) { 1275 const struct ipa_status *status = data; 1276 u32 align; 1277 u32 len; 1278 1279 if (resid < sizeof(*status)) { 1280 dev_err(&endpoint->ipa->pdev->dev, 1281 "short message (%u bytes < %zu byte status)\n", 1282 resid, sizeof(*status)); 1283 break; 1284 } 1285 1286 /* Skip over status packets that lack packet data */ 1287 if (ipa_endpoint_status_skip(endpoint, status)) { 1288 data += sizeof(*status); 1289 resid -= sizeof(*status); 1290 continue; 1291 } 1292 1293 /* Compute the amount of buffer space consumed by the packet, 1294 * including the status element. If the hardware is configured 1295 * to pad packet data to an aligned boundary, account for that. 1296 * And if checksum offload is enabled a trailer containing 1297 * computed checksum information will be appended. 1298 */ 1299 align = endpoint->data->rx.pad_align ? : 1; 1300 len = le16_to_cpu(status->pkt_len); 1301 len = sizeof(*status) + ALIGN(len, align); 1302 if (endpoint->data->checksum) 1303 len += sizeof(struct rmnet_map_dl_csum_trailer); 1304 1305 if (!ipa_endpoint_status_drop(endpoint, status)) { 1306 void *data2; 1307 u32 extra; 1308 u32 len2; 1309 1310 /* Client receives only packet data (no status) */ 1311 data2 = data + sizeof(*status); 1312 len2 = le16_to_cpu(status->pkt_len); 1313 1314 /* Have the true size reflect the extra unused space in 1315 * the original receive buffer. Distribute the "cost" 1316 * proportionately across all aggregated packets in the 1317 * buffer. 1318 */ 1319 extra = DIV_ROUND_CLOSEST(unused * len, total_len); 1320 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 1321 } 1322 1323 /* Consume status and the full packet it describes */ 1324 data += len; 1325 resid -= len; 1326 } 1327 } 1328 1329 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 1330 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 1331 struct gsi_trans *trans) 1332 { 1333 } 1334 1335 /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 1336 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 1337 struct gsi_trans *trans) 1338 { 1339 struct page *page; 1340 1341 ipa_endpoint_replenish(endpoint, true); 1342 1343 if (trans->cancelled) 1344 return; 1345 1346 /* Parse or build a socket buffer using the actual received length */ 1347 page = trans->data; 1348 if (endpoint->data->status_enable) 1349 ipa_endpoint_status_parse(endpoint, page, trans->len); 1350 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 1351 trans->data = NULL; /* Pages have been consumed */ 1352 } 1353 1354 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 1355 struct gsi_trans *trans) 1356 { 1357 if (endpoint->toward_ipa) 1358 ipa_endpoint_tx_complete(endpoint, trans); 1359 else 1360 ipa_endpoint_rx_complete(endpoint, trans); 1361 } 1362 1363 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 1364 struct gsi_trans *trans) 1365 { 1366 if (endpoint->toward_ipa) { 1367 struct ipa *ipa = endpoint->ipa; 1368 1369 /* Nothing to do for command transactions */ 1370 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 1371 struct sk_buff *skb = trans->data; 1372 1373 if (skb) 1374 dev_kfree_skb_any(skb); 1375 } 1376 } else { 1377 struct page *page = trans->data; 1378 1379 if (page) 1380 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 1381 } 1382 } 1383 1384 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 1385 { 1386 u32 val; 1387 1388 /* ROUTE_DIS is 0 */ 1389 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 1390 val |= ROUTE_DEF_HDR_TABLE_FMASK; 1391 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 1392 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 1393 val |= ROUTE_DEF_RETAIN_HDR_FMASK; 1394 1395 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 1396 } 1397 1398 void ipa_endpoint_default_route_clear(struct ipa *ipa) 1399 { 1400 ipa_endpoint_default_route_set(ipa, 0); 1401 } 1402 1403 /** 1404 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 1405 * @endpoint: Endpoint to be reset 1406 * 1407 * If aggregation is active on an RX endpoint when a reset is performed 1408 * on its underlying GSI channel, a special sequence of actions must be 1409 * taken to ensure the IPA pipeline is properly cleared. 1410 * 1411 * Return: 0 if successful, or a negative error code 1412 */ 1413 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 1414 { 1415 struct device *dev = &endpoint->ipa->pdev->dev; 1416 struct ipa *ipa = endpoint->ipa; 1417 struct gsi *gsi = &ipa->gsi; 1418 bool suspended = false; 1419 dma_addr_t addr; 1420 u32 retries; 1421 u32 len = 1; 1422 void *virt; 1423 int ret; 1424 1425 virt = kzalloc(len, GFP_KERNEL); 1426 if (!virt) 1427 return -ENOMEM; 1428 1429 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 1430 if (dma_mapping_error(dev, addr)) { 1431 ret = -ENOMEM; 1432 goto out_kfree; 1433 } 1434 1435 /* Force close aggregation before issuing the reset */ 1436 ipa_endpoint_force_close(endpoint); 1437 1438 /* Reset and reconfigure the channel with the doorbell engine 1439 * disabled. Then poll until we know aggregation is no longer 1440 * active. We'll re-enable the doorbell (if appropriate) when 1441 * we reset again below. 1442 */ 1443 gsi_channel_reset(gsi, endpoint->channel_id, false); 1444 1445 /* Make sure the channel isn't suspended */ 1446 suspended = ipa_endpoint_program_suspend(endpoint, false); 1447 1448 /* Start channel and do a 1 byte read */ 1449 ret = gsi_channel_start(gsi, endpoint->channel_id); 1450 if (ret) 1451 goto out_suspend_again; 1452 1453 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 1454 if (ret) 1455 goto err_endpoint_stop; 1456 1457 /* Wait for aggregation to be closed on the channel */ 1458 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 1459 do { 1460 if (!ipa_endpoint_aggr_active(endpoint)) 1461 break; 1462 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1463 } while (retries--); 1464 1465 /* Check one last time */ 1466 if (ipa_endpoint_aggr_active(endpoint)) 1467 dev_err(dev, "endpoint %u still active during reset\n", 1468 endpoint->endpoint_id); 1469 1470 gsi_trans_read_byte_done(gsi, endpoint->channel_id); 1471 1472 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1473 if (ret) 1474 goto out_suspend_again; 1475 1476 /* Finally, reset and reconfigure the channel again (re-enabling 1477 * the doorbell engine if appropriate). Sleep for 1 millisecond to 1478 * complete the channel reset sequence. Finish by suspending the 1479 * channel again (if necessary). 1480 */ 1481 gsi_channel_reset(gsi, endpoint->channel_id, true); 1482 1483 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 1484 1485 goto out_suspend_again; 1486 1487 err_endpoint_stop: 1488 (void)gsi_channel_stop(gsi, endpoint->channel_id); 1489 out_suspend_again: 1490 if (suspended) 1491 (void)ipa_endpoint_program_suspend(endpoint, true); 1492 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 1493 out_kfree: 1494 kfree(virt); 1495 1496 return ret; 1497 } 1498 1499 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 1500 { 1501 u32 channel_id = endpoint->channel_id; 1502 struct ipa *ipa = endpoint->ipa; 1503 bool special; 1504 int ret = 0; 1505 1506 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 1507 * is active, we need to handle things specially to recover. 1508 * All other cases just need to reset the underlying GSI channel. 1509 */ 1510 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && 1511 endpoint->data->aggregation; 1512 if (special && ipa_endpoint_aggr_active(endpoint)) 1513 ret = ipa_endpoint_reset_rx_aggr(endpoint); 1514 else 1515 gsi_channel_reset(&ipa->gsi, channel_id, true); 1516 1517 if (ret) 1518 dev_err(&ipa->pdev->dev, 1519 "error %d resetting channel %u for endpoint %u\n", 1520 ret, endpoint->channel_id, endpoint->endpoint_id); 1521 } 1522 1523 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1524 { 1525 if (endpoint->toward_ipa) 1526 ipa_endpoint_program_delay(endpoint, false); 1527 else 1528 (void)ipa_endpoint_program_suspend(endpoint, false); 1529 ipa_endpoint_init_cfg(endpoint); 1530 ipa_endpoint_init_nat(endpoint); 1531 ipa_endpoint_init_hdr(endpoint); 1532 ipa_endpoint_init_hdr_ext(endpoint); 1533 ipa_endpoint_init_hdr_metadata_mask(endpoint); 1534 ipa_endpoint_init_mode(endpoint); 1535 ipa_endpoint_init_aggr(endpoint); 1536 ipa_endpoint_init_deaggr(endpoint); 1537 ipa_endpoint_init_rsrc_grp(endpoint); 1538 ipa_endpoint_init_seq(endpoint); 1539 ipa_endpoint_status(endpoint); 1540 } 1541 1542 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 1543 { 1544 struct ipa *ipa = endpoint->ipa; 1545 struct gsi *gsi = &ipa->gsi; 1546 int ret; 1547 1548 ret = gsi_channel_start(gsi, endpoint->channel_id); 1549 if (ret) { 1550 dev_err(&ipa->pdev->dev, 1551 "error %d starting %cX channel %u for endpoint %u\n", 1552 ret, endpoint->toward_ipa ? 'T' : 'R', 1553 endpoint->channel_id, endpoint->endpoint_id); 1554 return ret; 1555 } 1556 1557 if (!endpoint->toward_ipa) { 1558 ipa_interrupt_suspend_enable(ipa->interrupt, 1559 endpoint->endpoint_id); 1560 ipa_endpoint_replenish_enable(endpoint); 1561 } 1562 1563 ipa->enabled |= BIT(endpoint->endpoint_id); 1564 1565 return 0; 1566 } 1567 1568 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 1569 { 1570 u32 mask = BIT(endpoint->endpoint_id); 1571 struct ipa *ipa = endpoint->ipa; 1572 struct gsi *gsi = &ipa->gsi; 1573 int ret; 1574 1575 if (!(ipa->enabled & mask)) 1576 return; 1577 1578 ipa->enabled ^= mask; 1579 1580 if (!endpoint->toward_ipa) { 1581 ipa_endpoint_replenish_disable(endpoint); 1582 ipa_interrupt_suspend_disable(ipa->interrupt, 1583 endpoint->endpoint_id); 1584 } 1585 1586 /* Note that if stop fails, the channel's state is not well-defined */ 1587 ret = gsi_channel_stop(gsi, endpoint->channel_id); 1588 if (ret) 1589 dev_err(&ipa->pdev->dev, 1590 "error %d attempting to stop endpoint %u\n", ret, 1591 endpoint->endpoint_id); 1592 } 1593 1594 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 1595 { 1596 struct device *dev = &endpoint->ipa->pdev->dev; 1597 struct gsi *gsi = &endpoint->ipa->gsi; 1598 bool stop_channel; 1599 int ret; 1600 1601 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1602 return; 1603 1604 if (!endpoint->toward_ipa) { 1605 ipa_endpoint_replenish_disable(endpoint); 1606 (void)ipa_endpoint_program_suspend(endpoint, true); 1607 } 1608 1609 /* Starting with IPA v4.0, endpoints are suspended by stopping the 1610 * underlying GSI channel rather than using endpoint suspend mode. 1611 */ 1612 stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; 1613 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 1614 if (ret) 1615 dev_err(dev, "error %d suspending channel %u\n", ret, 1616 endpoint->channel_id); 1617 } 1618 1619 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 1620 { 1621 struct device *dev = &endpoint->ipa->pdev->dev; 1622 struct gsi *gsi = &endpoint->ipa->gsi; 1623 bool start_channel; 1624 int ret; 1625 1626 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 1627 return; 1628 1629 if (!endpoint->toward_ipa) 1630 (void)ipa_endpoint_program_suspend(endpoint, false); 1631 1632 /* Starting with IPA v4.0, the underlying GSI channel must be 1633 * restarted for resume. 1634 */ 1635 start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; 1636 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 1637 if (ret) 1638 dev_err(dev, "error %d resuming channel %u\n", ret, 1639 endpoint->channel_id); 1640 else if (!endpoint->toward_ipa) 1641 ipa_endpoint_replenish_enable(endpoint); 1642 } 1643 1644 void ipa_endpoint_suspend(struct ipa *ipa) 1645 { 1646 if (!ipa->setup_complete) 1647 return; 1648 1649 if (ipa->modem_netdev) 1650 ipa_modem_suspend(ipa->modem_netdev); 1651 1652 ipa_cmd_pipeline_clear(ipa); 1653 1654 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1655 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1656 } 1657 1658 void ipa_endpoint_resume(struct ipa *ipa) 1659 { 1660 if (!ipa->setup_complete) 1661 return; 1662 1663 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 1664 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 1665 1666 if (ipa->modem_netdev) 1667 ipa_modem_resume(ipa->modem_netdev); 1668 } 1669 1670 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 1671 { 1672 struct gsi *gsi = &endpoint->ipa->gsi; 1673 u32 channel_id = endpoint->channel_id; 1674 1675 /* Only AP endpoints get set up */ 1676 if (endpoint->ee_id != GSI_EE_AP) 1677 return; 1678 1679 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 1680 if (!endpoint->toward_ipa) { 1681 /* RX transactions require a single TRE, so the maximum 1682 * backlog is the same as the maximum outstanding TREs. 1683 */ 1684 endpoint->replenish_enabled = false; 1685 atomic_set(&endpoint->replenish_saved, 1686 gsi_channel_tre_max(gsi, endpoint->channel_id)); 1687 atomic_set(&endpoint->replenish_backlog, 0); 1688 INIT_DELAYED_WORK(&endpoint->replenish_work, 1689 ipa_endpoint_replenish_work); 1690 } 1691 1692 ipa_endpoint_program(endpoint); 1693 1694 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 1695 } 1696 1697 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 1698 { 1699 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 1700 1701 if (!endpoint->toward_ipa) 1702 cancel_delayed_work_sync(&endpoint->replenish_work); 1703 1704 ipa_endpoint_reset(endpoint); 1705 } 1706 1707 void ipa_endpoint_setup(struct ipa *ipa) 1708 { 1709 u32 initialized = ipa->initialized; 1710 1711 ipa->set_up = 0; 1712 while (initialized) { 1713 u32 endpoint_id = __ffs(initialized); 1714 1715 initialized ^= BIT(endpoint_id); 1716 1717 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 1718 } 1719 } 1720 1721 void ipa_endpoint_teardown(struct ipa *ipa) 1722 { 1723 u32 set_up = ipa->set_up; 1724 1725 while (set_up) { 1726 u32 endpoint_id = __fls(set_up); 1727 1728 set_up ^= BIT(endpoint_id); 1729 1730 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 1731 } 1732 ipa->set_up = 0; 1733 } 1734 1735 int ipa_endpoint_config(struct ipa *ipa) 1736 { 1737 struct device *dev = &ipa->pdev->dev; 1738 u32 initialized; 1739 u32 rx_base; 1740 u32 rx_mask; 1741 u32 tx_mask; 1742 int ret = 0; 1743 u32 max; 1744 u32 val; 1745 1746 /* Find out about the endpoints supplied by the hardware, and ensure 1747 * the highest one doesn't exceed the number we support. 1748 */ 1749 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 1750 1751 /* Our RX is an IPA producer */ 1752 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); 1753 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); 1754 if (max > IPA_ENDPOINT_MAX) { 1755 dev_err(dev, "too many endpoints (%u > %u)\n", 1756 max, IPA_ENDPOINT_MAX); 1757 return -EINVAL; 1758 } 1759 rx_mask = GENMASK(max - 1, rx_base); 1760 1761 /* Our TX is an IPA consumer */ 1762 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); 1763 tx_mask = GENMASK(max - 1, 0); 1764 1765 ipa->available = rx_mask | tx_mask; 1766 1767 /* Check for initialized endpoints not supported by the hardware */ 1768 if (ipa->initialized & ~ipa->available) { 1769 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 1770 ipa->initialized & ~ipa->available); 1771 ret = -EINVAL; /* Report other errors too */ 1772 } 1773 1774 initialized = ipa->initialized; 1775 while (initialized) { 1776 u32 endpoint_id = __ffs(initialized); 1777 struct ipa_endpoint *endpoint; 1778 1779 initialized ^= BIT(endpoint_id); 1780 1781 /* Make sure it's pointing in the right direction */ 1782 endpoint = &ipa->endpoint[endpoint_id]; 1783 if ((endpoint_id < rx_base) != endpoint->toward_ipa) { 1784 dev_err(dev, "endpoint id %u wrong direction\n", 1785 endpoint_id); 1786 ret = -EINVAL; 1787 } 1788 } 1789 1790 return ret; 1791 } 1792 1793 void ipa_endpoint_deconfig(struct ipa *ipa) 1794 { 1795 ipa->available = 0; /* Nothing more to do */ 1796 } 1797 1798 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 1799 const struct ipa_gsi_endpoint_data *data) 1800 { 1801 struct ipa_endpoint *endpoint; 1802 1803 endpoint = &ipa->endpoint[data->endpoint_id]; 1804 1805 if (data->ee_id == GSI_EE_AP) 1806 ipa->channel_map[data->channel_id] = endpoint; 1807 ipa->name_map[name] = endpoint; 1808 1809 endpoint->ipa = ipa; 1810 endpoint->ee_id = data->ee_id; 1811 endpoint->channel_id = data->channel_id; 1812 endpoint->endpoint_id = data->endpoint_id; 1813 endpoint->toward_ipa = data->toward_ipa; 1814 endpoint->data = &data->endpoint.config; 1815 1816 ipa->initialized |= BIT(endpoint->endpoint_id); 1817 } 1818 1819 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 1820 { 1821 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 1822 1823 memset(endpoint, 0, sizeof(*endpoint)); 1824 } 1825 1826 void ipa_endpoint_exit(struct ipa *ipa) 1827 { 1828 u32 initialized = ipa->initialized; 1829 1830 while (initialized) { 1831 u32 endpoint_id = __fls(initialized); 1832 1833 initialized ^= BIT(endpoint_id); 1834 1835 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 1836 } 1837 memset(ipa->name_map, 0, sizeof(ipa->name_map)); 1838 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 1839 } 1840 1841 /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 1842 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 1843 const struct ipa_gsi_endpoint_data *data) 1844 { 1845 enum ipa_endpoint_name name; 1846 u32 filter_map; 1847 1848 if (!ipa_endpoint_data_valid(ipa, count, data)) 1849 return 0; /* Error */ 1850 1851 ipa->initialized = 0; 1852 1853 filter_map = 0; 1854 for (name = 0; name < count; name++, data++) { 1855 if (ipa_gsi_endpoint_data_empty(data)) 1856 continue; /* Skip over empty slots */ 1857 1858 ipa_endpoint_init_one(ipa, name, data); 1859 1860 if (data->endpoint.filter_support) 1861 filter_map |= BIT(data->endpoint_id); 1862 } 1863 1864 if (!ipa_filter_map_valid(ipa, filter_map)) 1865 goto err_endpoint_exit; 1866 1867 return filter_map; /* Non-zero bitmask */ 1868 1869 err_endpoint_exit: 1870 ipa_endpoint_exit(ipa); 1871 1872 return 0; /* Error */ 1873 } 1874