1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2021 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/atomic.h> 9 #include <linux/bitfield.h> 10 #include <linux/device.h> 11 #include <linux/bug.h> 12 #include <linux/io.h> 13 #include <linux/firmware.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/of_address.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/qcom_scm.h> 20 #include <linux/soc/qcom/mdt_loader.h> 21 22 #include "ipa.h" 23 #include "ipa_power.h" 24 #include "ipa_data.h" 25 #include "ipa_endpoint.h" 26 #include "ipa_resource.h" 27 #include "ipa_cmd.h" 28 #include "ipa_reg.h" 29 #include "ipa_mem.h" 30 #include "ipa_table.h" 31 #include "ipa_smp2p.h" 32 #include "ipa_modem.h" 33 #include "ipa_uc.h" 34 #include "ipa_interrupt.h" 35 #include "gsi_trans.h" 36 #include "ipa_sysfs.h" 37 38 /** 39 * DOC: The IP Accelerator 40 * 41 * This driver supports the Qualcomm IP Accelerator (IPA), which is a 42 * networking component found in many Qualcomm SoCs. The IPA is connected 43 * to the application processor (AP), but is also connected (and partially 44 * controlled by) other "execution environments" (EEs), such as a modem. 45 * 46 * The IPA is the conduit between the AP and the modem that carries network 47 * traffic. This driver presents a network interface representing the 48 * connection of the modem to external (e.g. LTE) networks. 49 * 50 * The IPA provides protocol checksum calculation, offloading this work 51 * from the AP. The IPA offers additional functionality, including routing, 52 * filtering, and NAT support, but that more advanced functionality is not 53 * currently supported. Despite that, some resources--including routing 54 * tables and filter tables--are defined in this driver because they must 55 * be initialized even when the advanced hardware features are not used. 56 * 57 * There are two distinct layers that implement the IPA hardware, and this 58 * is reflected in the organization of the driver. The generic software 59 * interface (GSI) is an integral component of the IPA, providing a 60 * well-defined communication layer between the AP subsystem and the IPA 61 * core. The GSI implements a set of "channels" used for communication 62 * between the AP and the IPA. 63 * 64 * The IPA layer uses GSI channels to implement its "endpoints". And while 65 * a GSI channel carries data between the AP and the IPA, a pair of IPA 66 * endpoints is used to carry traffic between two EEs. Specifically, the main 67 * modem network interface is implemented by two pairs of endpoints: a TX 68 * endpoint on the AP coupled with an RX endpoint on the modem; and another 69 * RX endpoint on the AP receiving data from a TX endpoint on the modem. 70 */ 71 72 /* The name of the GSI firmware file relative to /lib/firmware */ 73 #define IPA_FW_PATH_DEFAULT "ipa_fws.mdt" 74 #define IPA_PAS_ID 15 75 76 /* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */ 77 #define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */ 78 #define TAG_TIMESTAMP_SHIFT 14 79 #define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */ 80 81 /* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */ 82 #define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */ 83 84 /** 85 * ipa_setup() - Set up IPA hardware 86 * @ipa: IPA pointer 87 * 88 * Perform initialization that requires issuing immediate commands on 89 * the command TX endpoint. If the modem is doing GSI firmware load 90 * and initialization, this function will be called when an SMP2P 91 * interrupt has been signaled by the modem. Otherwise it will be 92 * called from ipa_probe() after GSI firmware has been successfully 93 * loaded, authenticated, and started by Trust Zone. 94 */ 95 int ipa_setup(struct ipa *ipa) 96 { 97 struct ipa_endpoint *exception_endpoint; 98 struct ipa_endpoint *command_endpoint; 99 struct device *dev = &ipa->pdev->dev; 100 int ret; 101 102 ret = gsi_setup(&ipa->gsi); 103 if (ret) 104 return ret; 105 106 ret = ipa_power_setup(ipa); 107 if (ret) 108 goto err_gsi_teardown; 109 110 ipa_endpoint_setup(ipa); 111 112 /* We need to use the AP command TX endpoint to perform other 113 * initialization, so we enable first. 114 */ 115 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 116 ret = ipa_endpoint_enable_one(command_endpoint); 117 if (ret) 118 goto err_endpoint_teardown; 119 120 ret = ipa_mem_setup(ipa); /* No matching teardown required */ 121 if (ret) 122 goto err_command_disable; 123 124 ret = ipa_table_setup(ipa); /* No matching teardown required */ 125 if (ret) 126 goto err_command_disable; 127 128 /* Enable the exception handling endpoint, and tell the hardware 129 * to use it by default. 130 */ 131 exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; 132 ret = ipa_endpoint_enable_one(exception_endpoint); 133 if (ret) 134 goto err_command_disable; 135 136 ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); 137 138 /* We're all set. Now prepare for communication with the modem */ 139 ret = ipa_qmi_setup(ipa); 140 if (ret) 141 goto err_default_route_clear; 142 143 ipa->setup_complete = true; 144 145 dev_info(dev, "IPA driver setup completed successfully\n"); 146 147 return 0; 148 149 err_default_route_clear: 150 ipa_endpoint_default_route_clear(ipa); 151 ipa_endpoint_disable_one(exception_endpoint); 152 err_command_disable: 153 ipa_endpoint_disable_one(command_endpoint); 154 err_endpoint_teardown: 155 ipa_endpoint_teardown(ipa); 156 ipa_power_teardown(ipa); 157 err_gsi_teardown: 158 gsi_teardown(&ipa->gsi); 159 160 return ret; 161 } 162 163 /** 164 * ipa_teardown() - Inverse of ipa_setup() 165 * @ipa: IPA pointer 166 */ 167 static void ipa_teardown(struct ipa *ipa) 168 { 169 struct ipa_endpoint *exception_endpoint; 170 struct ipa_endpoint *command_endpoint; 171 172 /* We're going to tear everything down, as if setup never completed */ 173 ipa->setup_complete = false; 174 175 ipa_qmi_teardown(ipa); 176 ipa_endpoint_default_route_clear(ipa); 177 exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; 178 ipa_endpoint_disable_one(exception_endpoint); 179 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 180 ipa_endpoint_disable_one(command_endpoint); 181 ipa_endpoint_teardown(ipa); 182 ipa_power_teardown(ipa); 183 gsi_teardown(&ipa->gsi); 184 } 185 186 static void 187 ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data) 188 { 189 u32 val; 190 191 /* IPA v4.5+ has no backward compatibility register */ 192 if (ipa->version >= IPA_VERSION_4_5) 193 return; 194 195 val = data->backward_compat; 196 iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); 197 } 198 199 static void ipa_hardware_config_tx(struct ipa *ipa) 200 { 201 enum ipa_version version = ipa->version; 202 u32 val; 203 204 if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5) 205 return; 206 207 /* Disable PA mask to allow HOLB drop */ 208 val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); 209 210 val &= ~PA_MASK_EN_FMASK; 211 212 iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); 213 } 214 215 static void ipa_hardware_config_clkon(struct ipa *ipa) 216 { 217 enum ipa_version version = ipa->version; 218 u32 val; 219 220 if (version < IPA_VERSION_3_1 || version >= IPA_VERSION_4_5) 221 return; 222 223 /* Implement some hardware workarounds */ 224 if (version >= IPA_VERSION_4_0) { 225 /* Enable open global clocks in the CLKON configuration */ 226 val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK; 227 } else if (version == IPA_VERSION_3_1) { 228 val = MISC_FMASK; /* Disable MISC clock gating */ 229 } else { 230 return; 231 } 232 233 iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET); 234 } 235 236 /* Configure bus access behavior for IPA components */ 237 static void ipa_hardware_config_comp(struct ipa *ipa) 238 { 239 u32 val; 240 241 /* Nothing to configure prior to IPA v4.0 */ 242 if (ipa->version < IPA_VERSION_4_0) 243 return; 244 245 val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); 246 247 if (ipa->version == IPA_VERSION_4_0) { 248 val &= ~IPA_QMB_SELECT_CONS_EN_FMASK; 249 val &= ~IPA_QMB_SELECT_PROD_EN_FMASK; 250 val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK; 251 } else if (ipa->version < IPA_VERSION_4_5) { 252 val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK; 253 } else { 254 /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */ 255 } 256 257 val |= GSI_MULTI_INORDER_RD_DIS_FMASK; 258 val |= GSI_MULTI_INORDER_WR_DIS_FMASK; 259 260 iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); 261 } 262 263 /* Configure DDR and (possibly) PCIe max read/write QSB values */ 264 static void 265 ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) 266 { 267 const struct ipa_qsb_data *data0; 268 const struct ipa_qsb_data *data1; 269 u32 val; 270 271 /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ 272 data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; 273 if (data->qsb_count > 1) 274 data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE]; 275 276 /* Max outstanding write accesses for QSB masters */ 277 val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK); 278 if (data->qsb_count > 1) 279 val |= u32_encode_bits(data1->max_writes, 280 GEN_QMB_1_MAX_WRITES_FMASK); 281 iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); 282 283 /* Max outstanding read accesses for QSB masters */ 284 val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK); 285 if (ipa->version >= IPA_VERSION_4_0) 286 val |= u32_encode_bits(data0->max_reads_beats, 287 GEN_QMB_0_MAX_READS_BEATS_FMASK); 288 if (data->qsb_count > 1) { 289 val |= u32_encode_bits(data1->max_reads, 290 GEN_QMB_1_MAX_READS_FMASK); 291 if (ipa->version >= IPA_VERSION_4_0) 292 val |= u32_encode_bits(data1->max_reads_beats, 293 GEN_QMB_1_MAX_READS_BEATS_FMASK); 294 } 295 iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); 296 } 297 298 /* The internal inactivity timer clock is used for the aggregation timer */ 299 #define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ 300 301 /* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY 302 * field to represent the given number of microseconds. The value is one 303 * less than the number of timer ticks in the requested period. 0 is not 304 * a valid granularity value (so for example @usec must be at least 16 for 305 * a TIMER_FREQUENCY of 32000). 306 */ 307 static __always_inline u32 ipa_aggr_granularity_val(u32 usec) 308 { 309 return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; 310 } 311 312 /* IPA uses unified Qtime starting at IPA v4.5, implementing various 313 * timestamps and timers independent of the IPA core clock rate. The 314 * Qtimer is based on a 56-bit timestamp incremented at each tick of 315 * a 19.2 MHz SoC crystal oscillator (XO clock). 316 * 317 * For IPA timestamps (tag, NAT, data path logging) a lower resolution 318 * timestamp is achieved by shifting the Qtimer timestamp value right 319 * some number of bits to produce the low-order bits of the coarser 320 * granularity timestamp. 321 * 322 * For timers, a common timer clock is derived from the XO clock using 323 * a divider (we use 192, to produce a 100kHz timer clock). From 324 * this common clock, three "pulse generators" are used to produce 325 * timer ticks at a configurable frequency. IPA timers (such as 326 * those used for aggregation or head-of-line block handling) now 327 * define their period based on one of these pulse generators. 328 */ 329 static void ipa_qtime_config(struct ipa *ipa) 330 { 331 u32 val; 332 333 /* Timer clock divider must be disabled when we change the rate */ 334 iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 335 336 /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */ 337 val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK); 338 val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK); 339 /* Configure tag and NAT Qtime timestamp resolution as well */ 340 val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK); 341 val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK); 342 iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET); 343 344 /* Set granularity of pulse generators used for other timers */ 345 val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK); 346 val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK); 347 val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK); 348 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET); 349 350 /* Actual divider is 1 more than value supplied here */ 351 val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK); 352 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 353 354 /* Divider value is set; re-enable the common timer clock divider */ 355 val |= u32_encode_bits(1, DIV_ENABLE_FMASK); 356 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 357 } 358 359 /* Before IPA v4.5 timing is controlled by a counter register */ 360 static void ipa_hardware_config_counter(struct ipa *ipa) 361 { 362 u32 granularity; 363 u32 val; 364 365 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY); 366 367 val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK); 368 369 iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); 370 } 371 372 static void ipa_hardware_config_timing(struct ipa *ipa) 373 { 374 if (ipa->version < IPA_VERSION_4_5) 375 ipa_hardware_config_counter(ipa); 376 else 377 ipa_qtime_config(ipa); 378 } 379 380 static void ipa_hardware_config_hashing(struct ipa *ipa) 381 { 382 u32 offset; 383 384 if (ipa->version != IPA_VERSION_4_2) 385 return; 386 387 /* IPA v4.2 does not support hashed tables, so disable them */ 388 offset = ipa_reg_filt_rout_hash_en_offset(IPA_VERSION_4_2); 389 iowrite32(0, ipa->reg_virt + offset); 390 } 391 392 static void ipa_idle_indication_cfg(struct ipa *ipa, 393 u32 enter_idle_debounce_thresh, 394 bool const_non_idle_enable) 395 { 396 u32 offset; 397 u32 val; 398 399 val = u32_encode_bits(enter_idle_debounce_thresh, 400 ENTER_IDLE_DEBOUNCE_THRESH_FMASK); 401 if (const_non_idle_enable) 402 val |= CONST_NON_IDLE_ENABLE_FMASK; 403 404 offset = ipa_reg_idle_indication_cfg_offset(ipa->version); 405 iowrite32(val, ipa->reg_virt + offset); 406 } 407 408 /** 409 * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA 410 * @ipa: IPA pointer 411 * 412 * Configures when the IPA signals it is idle to the global clock 413 * controller, which can respond by scaling down the clock to save 414 * power. 415 */ 416 static void ipa_hardware_dcd_config(struct ipa *ipa) 417 { 418 /* Recommended values for IPA 3.5 and later according to IPA HPG */ 419 ipa_idle_indication_cfg(ipa, 256, false); 420 } 421 422 static void ipa_hardware_dcd_deconfig(struct ipa *ipa) 423 { 424 /* Power-on reset values */ 425 ipa_idle_indication_cfg(ipa, 0, true); 426 } 427 428 /** 429 * ipa_hardware_config() - Primitive hardware initialization 430 * @ipa: IPA pointer 431 * @data: IPA configuration data 432 */ 433 static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data) 434 { 435 ipa_hardware_config_bcr(ipa, data); 436 ipa_hardware_config_tx(ipa); 437 ipa_hardware_config_clkon(ipa); 438 ipa_hardware_config_comp(ipa); 439 ipa_hardware_config_qsb(ipa, data); 440 ipa_hardware_config_timing(ipa); 441 ipa_hardware_config_hashing(ipa); 442 ipa_hardware_dcd_config(ipa); 443 } 444 445 /** 446 * ipa_hardware_deconfig() - Inverse of ipa_hardware_config() 447 * @ipa: IPA pointer 448 * 449 * This restores the power-on reset values (even if they aren't different) 450 */ 451 static void ipa_hardware_deconfig(struct ipa *ipa) 452 { 453 /* Mostly we just leave things as we set them. */ 454 ipa_hardware_dcd_deconfig(ipa); 455 } 456 457 /** 458 * ipa_config() - Configure IPA hardware 459 * @ipa: IPA pointer 460 * @data: IPA configuration data 461 * 462 * Perform initialization requiring IPA power to be enabled. 463 */ 464 static int ipa_config(struct ipa *ipa, const struct ipa_data *data) 465 { 466 int ret; 467 468 ipa_hardware_config(ipa, data); 469 470 ret = ipa_mem_config(ipa); 471 if (ret) 472 goto err_hardware_deconfig; 473 474 ipa->interrupt = ipa_interrupt_config(ipa); 475 if (IS_ERR(ipa->interrupt)) { 476 ret = PTR_ERR(ipa->interrupt); 477 ipa->interrupt = NULL; 478 goto err_mem_deconfig; 479 } 480 481 ipa_uc_config(ipa); 482 483 ret = ipa_endpoint_config(ipa); 484 if (ret) 485 goto err_uc_deconfig; 486 487 ipa_table_config(ipa); /* No deconfig required */ 488 489 /* Assign resource limitation to each group; no deconfig required */ 490 ret = ipa_resource_config(ipa, data->resource_data); 491 if (ret) 492 goto err_endpoint_deconfig; 493 494 ret = ipa_modem_config(ipa); 495 if (ret) 496 goto err_endpoint_deconfig; 497 498 return 0; 499 500 err_endpoint_deconfig: 501 ipa_endpoint_deconfig(ipa); 502 err_uc_deconfig: 503 ipa_uc_deconfig(ipa); 504 ipa_interrupt_deconfig(ipa->interrupt); 505 ipa->interrupt = NULL; 506 err_mem_deconfig: 507 ipa_mem_deconfig(ipa); 508 err_hardware_deconfig: 509 ipa_hardware_deconfig(ipa); 510 511 return ret; 512 } 513 514 /** 515 * ipa_deconfig() - Inverse of ipa_config() 516 * @ipa: IPA pointer 517 */ 518 static void ipa_deconfig(struct ipa *ipa) 519 { 520 ipa_modem_deconfig(ipa); 521 ipa_endpoint_deconfig(ipa); 522 ipa_uc_deconfig(ipa); 523 ipa_interrupt_deconfig(ipa->interrupt); 524 ipa->interrupt = NULL; 525 ipa_mem_deconfig(ipa); 526 ipa_hardware_deconfig(ipa); 527 } 528 529 static int ipa_firmware_load(struct device *dev) 530 { 531 const struct firmware *fw; 532 struct device_node *node; 533 struct resource res; 534 phys_addr_t phys; 535 const char *path; 536 ssize_t size; 537 void *virt; 538 int ret; 539 540 node = of_parse_phandle(dev->of_node, "memory-region", 0); 541 if (!node) { 542 dev_err(dev, "DT error getting \"memory-region\" property\n"); 543 return -EINVAL; 544 } 545 546 ret = of_address_to_resource(node, 0, &res); 547 of_node_put(node); 548 if (ret) { 549 dev_err(dev, "error %d getting \"memory-region\" resource\n", 550 ret); 551 return ret; 552 } 553 554 /* Use name from DTB if specified; use default for *any* error */ 555 ret = of_property_read_string(dev->of_node, "firmware-name", &path); 556 if (ret) { 557 dev_dbg(dev, "error %d getting \"firmware-name\" resource\n", 558 ret); 559 path = IPA_FW_PATH_DEFAULT; 560 } 561 562 ret = request_firmware(&fw, path, dev); 563 if (ret) { 564 dev_err(dev, "error %d requesting \"%s\"\n", ret, path); 565 return ret; 566 } 567 568 phys = res.start; 569 size = (size_t)resource_size(&res); 570 virt = memremap(phys, size, MEMREMAP_WC); 571 if (!virt) { 572 dev_err(dev, "unable to remap firmware memory\n"); 573 ret = -ENOMEM; 574 goto out_release_firmware; 575 } 576 577 ret = qcom_mdt_load(dev, fw, path, IPA_PAS_ID, virt, phys, size, NULL); 578 if (ret) 579 dev_err(dev, "error %d loading \"%s\"\n", ret, path); 580 else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID))) 581 dev_err(dev, "error %d authenticating \"%s\"\n", ret, path); 582 583 memunmap(virt); 584 out_release_firmware: 585 release_firmware(fw); 586 587 return ret; 588 } 589 590 static const struct of_device_id ipa_match[] = { 591 { 592 .compatible = "qcom,msm8998-ipa", 593 .data = &ipa_data_v3_1, 594 }, 595 { 596 .compatible = "qcom,sdm845-ipa", 597 .data = &ipa_data_v3_5_1, 598 }, 599 { 600 .compatible = "qcom,sc7180-ipa", 601 .data = &ipa_data_v4_2, 602 }, 603 { 604 .compatible = "qcom,sdx55-ipa", 605 .data = &ipa_data_v4_5, 606 }, 607 { 608 .compatible = "qcom,sm8350-ipa", 609 .data = &ipa_data_v4_9, 610 }, 611 { 612 .compatible = "qcom,sc7280-ipa", 613 .data = &ipa_data_v4_11, 614 }, 615 { }, 616 }; 617 MODULE_DEVICE_TABLE(of, ipa_match); 618 619 /* Check things that can be validated at build time. This just 620 * groups these things BUILD_BUG_ON() calls don't clutter the rest 621 * of the code. 622 * */ 623 static void ipa_validate_build(void) 624 { 625 /* At one time we assumed a 64-bit build, allowing some do_div() 626 * calls to be replaced by simple division or modulo operations. 627 * We currently only perform divide and modulo operations on u32, 628 * u16, or size_t objects, and of those only size_t has any chance 629 * of being a 64-bit value. (It should be guaranteed 32 bits wide 630 * on a 32-bit build, but there is no harm in verifying that.) 631 */ 632 BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4); 633 634 /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ 635 BUILD_BUG_ON(GSI_EE_AP != 0); 636 637 /* There's no point if we have no channels or event rings */ 638 BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX); 639 BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX); 640 641 /* GSI hardware design limits */ 642 BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32); 643 BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31); 644 645 /* The number of TREs in a transaction is limited by the channel's 646 * TLV FIFO size. A transaction structure uses 8-bit fields 647 * to represents the number of TREs it has allocated and used. 648 */ 649 BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX); 650 651 /* This is used as a divisor */ 652 BUILD_BUG_ON(!IPA_AGGR_GRANULARITY); 653 654 /* Aggregation granularity value can't be 0, and must fit */ 655 BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); 656 BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > 657 field_max(AGGR_GRANULARITY_FMASK)); 658 } 659 660 /** 661 * ipa_probe() - IPA platform driver probe function 662 * @pdev: Platform device pointer 663 * 664 * Return: 0 if successful, or a negative error code (possibly 665 * EPROBE_DEFER) 666 * 667 * This is the main entry point for the IPA driver. Initialization proceeds 668 * in several stages: 669 * - The "init" stage involves activities that can be initialized without 670 * access to the IPA hardware. 671 * - The "config" stage requires IPA power to be active so IPA registers 672 * can be accessed, but does not require the use of IPA immediate commands. 673 * - The "setup" stage uses IPA immediate commands, and so requires the GSI 674 * layer to be initialized. 675 * 676 * A Boolean Device Tree "modem-init" property determines whether GSI 677 * initialization will be performed by the AP (Trust Zone) or the modem. 678 * If the AP does GSI initialization, the setup phase is entered after 679 * this has completed successfully. Otherwise the modem initializes 680 * the GSI layer and signals it has finished by sending an SMP2P interrupt 681 * to the AP; this triggers the start if IPA setup. 682 */ 683 static int ipa_probe(struct platform_device *pdev) 684 { 685 struct device *dev = &pdev->dev; 686 const struct ipa_data *data; 687 struct ipa_power *power; 688 bool modem_init; 689 struct ipa *ipa; 690 int ret; 691 692 ipa_validate_build(); 693 694 /* Get configuration data early; needed for power initialization */ 695 data = of_device_get_match_data(dev); 696 if (!data) { 697 dev_err(dev, "matched hardware not supported\n"); 698 return -ENODEV; 699 } 700 701 if (!ipa_version_supported(data->version)) { 702 dev_err(dev, "unsupported IPA version %u\n", data->version); 703 return -EINVAL; 704 } 705 706 /* If we need Trust Zone, make sure it's available */ 707 modem_init = of_property_read_bool(dev->of_node, "modem-init"); 708 if (!modem_init) 709 if (!qcom_scm_is_available()) 710 return -EPROBE_DEFER; 711 712 /* The clock and interconnects might not be ready when we're 713 * probed, so might return -EPROBE_DEFER. 714 */ 715 power = ipa_power_init(dev, data->power_data); 716 if (IS_ERR(power)) 717 return PTR_ERR(power); 718 719 /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ 720 ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); 721 if (!ipa) { 722 ret = -ENOMEM; 723 goto err_power_exit; 724 } 725 726 ipa->pdev = pdev; 727 dev_set_drvdata(dev, ipa); 728 ipa->power = power; 729 ipa->version = data->version; 730 init_completion(&ipa->completion); 731 732 ret = ipa_reg_init(ipa); 733 if (ret) 734 goto err_kfree_ipa; 735 736 ret = ipa_mem_init(ipa, data->mem_data); 737 if (ret) 738 goto err_reg_exit; 739 740 ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, 741 data->endpoint_data); 742 if (ret) 743 goto err_mem_exit; 744 745 /* Result is a non-zero mask of endpoints that support filtering */ 746 ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count, 747 data->endpoint_data); 748 if (!ipa->filter_map) { 749 ret = -EINVAL; 750 goto err_gsi_exit; 751 } 752 753 ret = ipa_table_init(ipa); 754 if (ret) 755 goto err_endpoint_exit; 756 757 ret = ipa_smp2p_init(ipa, modem_init); 758 if (ret) 759 goto err_table_exit; 760 761 /* Power needs to be active for config and setup */ 762 ret = pm_runtime_get_sync(dev); 763 if (WARN_ON(ret < 0)) 764 goto err_power_put; 765 766 ret = ipa_config(ipa, data); 767 if (ret) 768 goto err_power_put; 769 770 dev_info(dev, "IPA driver initialized"); 771 772 /* If the modem is doing early initialization, it will trigger a 773 * call to ipa_setup() when it has finished. In that case we're 774 * done here. 775 */ 776 if (modem_init) 777 goto done; 778 779 /* Otherwise we need to load the firmware and have Trust Zone validate 780 * and install it. If that succeeds we can proceed with setup. 781 */ 782 ret = ipa_firmware_load(dev); 783 if (ret) 784 goto err_deconfig; 785 786 ret = ipa_setup(ipa); 787 if (ret) 788 goto err_deconfig; 789 done: 790 pm_runtime_mark_last_busy(dev); 791 (void)pm_runtime_put_autosuspend(dev); 792 793 return 0; 794 795 err_deconfig: 796 ipa_deconfig(ipa); 797 err_power_put: 798 pm_runtime_put_noidle(dev); 799 ipa_smp2p_exit(ipa); 800 err_table_exit: 801 ipa_table_exit(ipa); 802 err_endpoint_exit: 803 ipa_endpoint_exit(ipa); 804 err_gsi_exit: 805 gsi_exit(&ipa->gsi); 806 err_mem_exit: 807 ipa_mem_exit(ipa); 808 err_reg_exit: 809 ipa_reg_exit(ipa); 810 err_kfree_ipa: 811 kfree(ipa); 812 err_power_exit: 813 ipa_power_exit(power); 814 815 return ret; 816 } 817 818 static int ipa_remove(struct platform_device *pdev) 819 { 820 struct ipa *ipa = dev_get_drvdata(&pdev->dev); 821 struct ipa_power *power = ipa->power; 822 struct device *dev = &pdev->dev; 823 int ret; 824 825 /* Prevent the modem from triggering a call to ipa_setup(). This 826 * also ensures a modem-initiated setup that's underway completes. 827 */ 828 ipa_smp2p_irq_disable_setup(ipa); 829 830 ret = pm_runtime_get_sync(dev); 831 if (WARN_ON(ret < 0)) 832 goto out_power_put; 833 834 if (ipa->setup_complete) { 835 ret = ipa_modem_stop(ipa); 836 /* If starting or stopping is in progress, try once more */ 837 if (ret == -EBUSY) { 838 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 839 ret = ipa_modem_stop(ipa); 840 } 841 if (ret) 842 return ret; 843 844 ipa_teardown(ipa); 845 } 846 847 ipa_deconfig(ipa); 848 out_power_put: 849 pm_runtime_put_noidle(dev); 850 ipa_smp2p_exit(ipa); 851 ipa_table_exit(ipa); 852 ipa_endpoint_exit(ipa); 853 gsi_exit(&ipa->gsi); 854 ipa_mem_exit(ipa); 855 ipa_reg_exit(ipa); 856 kfree(ipa); 857 ipa_power_exit(power); 858 859 dev_info(dev, "IPA driver removed"); 860 861 return 0; 862 } 863 864 static void ipa_shutdown(struct platform_device *pdev) 865 { 866 int ret; 867 868 ret = ipa_remove(pdev); 869 if (ret) 870 dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret); 871 } 872 873 static const struct attribute_group *ipa_attribute_groups[] = { 874 &ipa_attribute_group, 875 &ipa_feature_attribute_group, 876 &ipa_endpoint_id_attribute_group, 877 &ipa_modem_attribute_group, 878 NULL, 879 }; 880 881 static struct platform_driver ipa_driver = { 882 .probe = ipa_probe, 883 .remove = ipa_remove, 884 .shutdown = ipa_shutdown, 885 .driver = { 886 .name = "ipa", 887 .pm = &ipa_pm_ops, 888 .of_match_table = ipa_match, 889 .dev_groups = ipa_attribute_groups, 890 }, 891 }; 892 893 module_platform_driver(ipa_driver); 894 895 MODULE_LICENSE("GPL v2"); 896 MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver"); 897