1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2020 Linaro Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/atomic.h> 9 #include <linux/bitfield.h> 10 #include <linux/device.h> 11 #include <linux/bug.h> 12 #include <linux/io.h> 13 #include <linux/firmware.h> 14 #include <linux/module.h> 15 #include <linux/of.h> 16 #include <linux/of_device.h> 17 #include <linux/of_address.h> 18 #include <linux/qcom_scm.h> 19 #include <linux/soc/qcom/mdt_loader.h> 20 21 #include "ipa.h" 22 #include "ipa_clock.h" 23 #include "ipa_data.h" 24 #include "ipa_endpoint.h" 25 #include "ipa_cmd.h" 26 #include "ipa_reg.h" 27 #include "ipa_mem.h" 28 #include "ipa_table.h" 29 #include "ipa_modem.h" 30 #include "ipa_uc.h" 31 #include "ipa_interrupt.h" 32 #include "gsi_trans.h" 33 34 /** 35 * DOC: The IP Accelerator 36 * 37 * This driver supports the Qualcomm IP Accelerator (IPA), which is a 38 * networking component found in many Qualcomm SoCs. The IPA is connected 39 * to the application processor (AP), but is also connected (and partially 40 * controlled by) other "execution environments" (EEs), such as a modem. 41 * 42 * The IPA is the conduit between the AP and the modem that carries network 43 * traffic. This driver presents a network interface representing the 44 * connection of the modem to external (e.g. LTE) networks. 45 * 46 * The IPA provides protocol checksum calculation, offloading this work 47 * from the AP. The IPA offers additional functionality, including routing, 48 * filtering, and NAT support, but that more advanced functionality is not 49 * currently supported. Despite that, some resources--including routing 50 * tables and filter tables--are defined in this driver because they must 51 * be initialized even when the advanced hardware features are not used. 52 * 53 * There are two distinct layers that implement the IPA hardware, and this 54 * is reflected in the organization of the driver. The generic software 55 * interface (GSI) is an integral component of the IPA, providing a 56 * well-defined communication layer between the AP subsystem and the IPA 57 * core. The GSI implements a set of "channels" used for communication 58 * between the AP and the IPA. 59 * 60 * The IPA layer uses GSI channels to implement its "endpoints". And while 61 * a GSI channel carries data between the AP and the IPA, a pair of IPA 62 * endpoints is used to carry traffic between two EEs. Specifically, the main 63 * modem network interface is implemented by two pairs of endpoints: a TX 64 * endpoint on the AP coupled with an RX endpoint on the modem; and another 65 * RX endpoint on the AP receiving data from a TX endpoint on the modem. 66 */ 67 68 /* The name of the GSI firmware file relative to /lib/firmware */ 69 #define IPA_FWS_PATH "ipa_fws.mdt" 70 #define IPA_PAS_ID 15 71 72 /* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */ 73 #define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */ 74 #define TAG_TIMESTAMP_SHIFT 14 75 #define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */ 76 77 /* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */ 78 #define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */ 79 80 /** 81 * ipa_suspend_handler() - Handle the suspend IPA interrupt 82 * @ipa: IPA pointer 83 * @irq_id: IPA interrupt type (unused) 84 * 85 * If an RX endpoint is in suspend state, and the IPA has a packet 86 * destined for that endpoint, the IPA generates a SUSPEND interrupt 87 * to inform the AP that it should resume the endpoint. If we get 88 * one of these interrupts we just resume everything. 89 */ 90 static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) 91 { 92 /* Just report the event, and let system resume handle the rest. 93 * More than one endpoint could signal this; if so, ignore 94 * all but the first. 95 */ 96 if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) 97 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); 98 99 /* Acknowledge/clear the suspend interrupt on all endpoints */ 100 ipa_interrupt_suspend_clear_all(ipa->interrupt); 101 } 102 103 /** 104 * ipa_setup() - Set up IPA hardware 105 * @ipa: IPA pointer 106 * 107 * Perform initialization that requires issuing immediate commands on 108 * the command TX endpoint. If the modem is doing GSI firmware load 109 * and initialization, this function will be called when an SMP2P 110 * interrupt has been signaled by the modem. Otherwise it will be 111 * called from ipa_probe() after GSI firmware has been successfully 112 * loaded, authenticated, and started by Trust Zone. 113 */ 114 int ipa_setup(struct ipa *ipa) 115 { 116 struct ipa_endpoint *exception_endpoint; 117 struct ipa_endpoint *command_endpoint; 118 struct device *dev = &ipa->pdev->dev; 119 int ret; 120 121 ret = gsi_setup(&ipa->gsi); 122 if (ret) 123 return ret; 124 125 ipa->interrupt = ipa_interrupt_setup(ipa); 126 if (IS_ERR(ipa->interrupt)) { 127 ret = PTR_ERR(ipa->interrupt); 128 goto err_gsi_teardown; 129 } 130 ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, 131 ipa_suspend_handler); 132 133 ipa_uc_setup(ipa); 134 135 ret = device_init_wakeup(dev, true); 136 if (ret) 137 goto err_uc_teardown; 138 139 ipa_endpoint_setup(ipa); 140 141 /* We need to use the AP command TX endpoint to perform other 142 * initialization, so we enable first. 143 */ 144 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 145 ret = ipa_endpoint_enable_one(command_endpoint); 146 if (ret) 147 goto err_endpoint_teardown; 148 149 ret = ipa_mem_setup(ipa); 150 if (ret) 151 goto err_command_disable; 152 153 ret = ipa_table_setup(ipa); 154 if (ret) 155 goto err_mem_teardown; 156 157 /* Enable the exception handling endpoint, and tell the hardware 158 * to use it by default. 159 */ 160 exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; 161 ret = ipa_endpoint_enable_one(exception_endpoint); 162 if (ret) 163 goto err_table_teardown; 164 165 ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); 166 167 /* We're all set. Now prepare for communication with the modem */ 168 ret = ipa_modem_setup(ipa); 169 if (ret) 170 goto err_default_route_clear; 171 172 ipa->setup_complete = true; 173 174 dev_info(dev, "IPA driver setup completed successfully\n"); 175 176 return 0; 177 178 err_default_route_clear: 179 ipa_endpoint_default_route_clear(ipa); 180 ipa_endpoint_disable_one(exception_endpoint); 181 err_table_teardown: 182 ipa_table_teardown(ipa); 183 err_mem_teardown: 184 ipa_mem_teardown(ipa); 185 err_command_disable: 186 ipa_endpoint_disable_one(command_endpoint); 187 err_endpoint_teardown: 188 ipa_endpoint_teardown(ipa); 189 (void)device_init_wakeup(dev, false); 190 err_uc_teardown: 191 ipa_uc_teardown(ipa); 192 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); 193 ipa_interrupt_teardown(ipa->interrupt); 194 err_gsi_teardown: 195 gsi_teardown(&ipa->gsi); 196 197 return ret; 198 } 199 200 /** 201 * ipa_teardown() - Inverse of ipa_setup() 202 * @ipa: IPA pointer 203 */ 204 static void ipa_teardown(struct ipa *ipa) 205 { 206 struct ipa_endpoint *exception_endpoint; 207 struct ipa_endpoint *command_endpoint; 208 209 ipa_modem_teardown(ipa); 210 ipa_endpoint_default_route_clear(ipa); 211 exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; 212 ipa_endpoint_disable_one(exception_endpoint); 213 ipa_table_teardown(ipa); 214 ipa_mem_teardown(ipa); 215 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; 216 ipa_endpoint_disable_one(command_endpoint); 217 ipa_endpoint_teardown(ipa); 218 (void)device_init_wakeup(&ipa->pdev->dev, false); 219 ipa_uc_teardown(ipa); 220 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); 221 ipa_interrupt_teardown(ipa->interrupt); 222 gsi_teardown(&ipa->gsi); 223 } 224 225 /* Configure QMB Core Master Port selection */ 226 static void ipa_hardware_config_comp(struct ipa *ipa) 227 { 228 u32 val; 229 230 /* Nothing to configure for IPA v3.5.1 */ 231 if (ipa->version == IPA_VERSION_3_5_1) 232 return; 233 234 val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); 235 236 if (ipa->version == IPA_VERSION_4_0) { 237 val &= ~IPA_QMB_SELECT_CONS_EN_FMASK; 238 val &= ~IPA_QMB_SELECT_PROD_EN_FMASK; 239 val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK; 240 } else if (ipa->version < IPA_VERSION_4_5) { 241 val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK; 242 } else { 243 /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */ 244 } 245 246 val |= GSI_MULTI_INORDER_RD_DIS_FMASK; 247 val |= GSI_MULTI_INORDER_WR_DIS_FMASK; 248 249 iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET); 250 } 251 252 /* Configure DDR and (possibly) PCIe max read/write QSB values */ 253 static void 254 ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) 255 { 256 const struct ipa_qsb_data *data0; 257 const struct ipa_qsb_data *data1; 258 u32 val; 259 260 /* assert(data->qsb_count > 0); */ 261 /* assert(data->qsb_count < 3); */ 262 263 /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ 264 data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; 265 if (data->qsb_count > 1) 266 data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE]; 267 268 /* Max outstanding write accesses for QSB masters */ 269 val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK); 270 if (data->qsb_count > 1) 271 val |= u32_encode_bits(data1->max_writes, 272 GEN_QMB_1_MAX_WRITES_FMASK); 273 iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); 274 275 /* Max outstanding read accesses for QSB masters */ 276 val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK); 277 if (ipa->version >= IPA_VERSION_4_0) 278 val |= u32_encode_bits(data0->max_reads_beats, 279 GEN_QMB_0_MAX_READS_BEATS_FMASK); 280 if (data->qsb_count > 1) { 281 val |= u32_encode_bits(data1->max_reads, 282 GEN_QMB_1_MAX_READS_FMASK); 283 if (ipa->version >= IPA_VERSION_4_0) 284 val |= u32_encode_bits(data1->max_reads_beats, 285 GEN_QMB_1_MAX_READS_BEATS_FMASK); 286 } 287 iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); 288 } 289 290 /* IPA uses unified Qtime starting at IPA v4.5, implementing various 291 * timestamps and timers independent of the IPA core clock rate. The 292 * Qtimer is based on a 56-bit timestamp incremented at each tick of 293 * a 19.2 MHz SoC crystal oscillator (XO clock). 294 * 295 * For IPA timestamps (tag, NAT, data path logging) a lower resolution 296 * timestamp is achieved by shifting the Qtimer timestamp value right 297 * some number of bits to produce the low-order bits of the coarser 298 * granularity timestamp. 299 * 300 * For timers, a common timer clock is derived from the XO clock using 301 * a divider (we use 192, to produce a 100kHz timer clock). From 302 * this common clock, three "pulse generators" are used to produce 303 * timer ticks at a configurable frequency. IPA timers (such as 304 * those used for aggregation or head-of-line block handling) now 305 * define their period based on one of these pulse generators. 306 */ 307 static void ipa_qtime_config(struct ipa *ipa) 308 { 309 u32 val; 310 311 /* Timer clock divider must be disabled when we change the rate */ 312 iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 313 314 /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */ 315 val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK); 316 val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK); 317 /* Configure tag and NAT Qtime timestamp resolution as well */ 318 val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK); 319 val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK); 320 iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET); 321 322 /* Set granularity of pulse generators used for other timers */ 323 val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK); 324 val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK); 325 val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK); 326 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET); 327 328 /* Actual divider is 1 more than value supplied here */ 329 val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK); 330 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 331 332 /* Divider value is set; re-enable the common timer clock divider */ 333 val |= u32_encode_bits(1, DIV_ENABLE_FMASK); 334 iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); 335 } 336 337 static void ipa_idle_indication_cfg(struct ipa *ipa, 338 u32 enter_idle_debounce_thresh, 339 bool const_non_idle_enable) 340 { 341 u32 offset; 342 u32 val; 343 344 val = u32_encode_bits(enter_idle_debounce_thresh, 345 ENTER_IDLE_DEBOUNCE_THRESH_FMASK); 346 if (const_non_idle_enable) 347 val |= CONST_NON_IDLE_ENABLE_FMASK; 348 349 offset = ipa_reg_idle_indication_cfg_offset(ipa->version); 350 iowrite32(val, ipa->reg_virt + offset); 351 } 352 353 /** 354 * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA 355 * @ipa: IPA pointer 356 * 357 * Configures when the IPA signals it is idle to the global clock 358 * controller, which can respond by scalling down the clock to 359 * save power. 360 */ 361 static void ipa_hardware_dcd_config(struct ipa *ipa) 362 { 363 /* Recommended values for IPA 3.5 and later according to IPA HPG */ 364 ipa_idle_indication_cfg(ipa, 256, false); 365 } 366 367 static void ipa_hardware_dcd_deconfig(struct ipa *ipa) 368 { 369 /* Power-on reset values */ 370 ipa_idle_indication_cfg(ipa, 0, true); 371 } 372 373 /** 374 * ipa_hardware_config() - Primitive hardware initialization 375 * @ipa: IPA pointer 376 * @data: IPA configuration data 377 */ 378 static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data) 379 { 380 enum ipa_version version = ipa->version; 381 u32 granularity; 382 u32 val; 383 384 /* IPA v4.5 has no backward compatibility register */ 385 if (version < IPA_VERSION_4_5) { 386 val = ipa_reg_bcr_val(version); 387 iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); 388 } 389 390 /* Implement some hardware workarounds */ 391 if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) { 392 /* Enable open global clocks (not needed for IPA v4.5) */ 393 val = GLOBAL_FMASK; 394 val |= GLOBAL_2X_CLK_FMASK; 395 iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET); 396 397 /* Disable PA mask to allow HOLB drop */ 398 val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); 399 val &= ~PA_MASK_EN_FMASK; 400 iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); 401 } 402 403 ipa_hardware_config_comp(ipa); 404 405 /* Configure system bus limits */ 406 ipa_hardware_config_qsb(ipa, data); 407 408 if (version < IPA_VERSION_4_5) { 409 /* Configure aggregation timer granularity */ 410 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY); 411 val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK); 412 iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); 413 } else { 414 ipa_qtime_config(ipa); 415 } 416 417 /* IPA v4.2 does not support hashed tables, so disable them */ 418 if (version == IPA_VERSION_4_2) { 419 u32 offset = ipa_reg_filt_rout_hash_en_offset(version); 420 421 iowrite32(0, ipa->reg_virt + offset); 422 } 423 424 /* Enable dynamic clock division */ 425 ipa_hardware_dcd_config(ipa); 426 } 427 428 /** 429 * ipa_hardware_deconfig() - Inverse of ipa_hardware_config() 430 * @ipa: IPA pointer 431 * 432 * This restores the power-on reset values (even if they aren't different) 433 */ 434 static void ipa_hardware_deconfig(struct ipa *ipa) 435 { 436 /* Mostly we just leave things as we set them. */ 437 ipa_hardware_dcd_deconfig(ipa); 438 } 439 440 #ifdef IPA_VALIDATION 441 442 static bool ipa_resource_limits_valid(struct ipa *ipa, 443 const struct ipa_resource_data *data) 444 { 445 u32 group_count; 446 u32 i; 447 u32 j; 448 449 /* We program at most 6 source or destination resource group limits */ 450 BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6); 451 452 group_count = ipa_resource_group_src_count(ipa->version); 453 if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX) 454 return false; 455 456 /* Return an error if a non-zero resource limit is specified 457 * for a resource group not supported by hardware. 458 */ 459 for (i = 0; i < data->resource_src_count; i++) { 460 const struct ipa_resource_src *resource; 461 462 resource = &data->resource_src[i]; 463 for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++) 464 if (resource->limits[j].min || resource->limits[j].max) 465 return false; 466 } 467 468 group_count = ipa_resource_group_dst_count(ipa->version); 469 if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX) 470 return false; 471 472 for (i = 0; i < data->resource_dst_count; i++) { 473 const struct ipa_resource_dst *resource; 474 475 resource = &data->resource_dst[i]; 476 for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++) 477 if (resource->limits[j].min || resource->limits[j].max) 478 return false; 479 } 480 481 return true; 482 } 483 484 #else /* !IPA_VALIDATION */ 485 486 static bool ipa_resource_limits_valid(struct ipa *ipa, 487 const struct ipa_resource_data *data) 488 { 489 return true; 490 } 491 492 #endif /* !IPA_VALIDATION */ 493 494 static void 495 ipa_resource_config_common(struct ipa *ipa, u32 offset, 496 const struct ipa_resource_limits *xlimits, 497 const struct ipa_resource_limits *ylimits) 498 { 499 u32 val; 500 501 val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK); 502 val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK); 503 if (ylimits) { 504 val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); 505 val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); 506 } 507 508 iowrite32(val, ipa->reg_virt + offset); 509 } 510 511 static void ipa_resource_config_src(struct ipa *ipa, 512 const struct ipa_resource_src *resource) 513 { 514 u32 group_count = ipa_resource_group_src_count(ipa->version); 515 const struct ipa_resource_limits *ylimits; 516 u32 offset; 517 518 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); 519 ylimits = group_count == 1 ? NULL : &resource->limits[1]; 520 ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); 521 522 if (group_count < 2) 523 return; 524 525 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); 526 ylimits = group_count == 3 ? NULL : &resource->limits[3]; 527 ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); 528 529 if (group_count < 4) 530 return; 531 532 offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); 533 ylimits = group_count == 5 ? NULL : &resource->limits[5]; 534 ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); 535 } 536 537 static void ipa_resource_config_dst(struct ipa *ipa, 538 const struct ipa_resource_dst *resource) 539 { 540 u32 group_count = ipa_resource_group_dst_count(ipa->version); 541 const struct ipa_resource_limits *ylimits; 542 u32 offset; 543 544 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); 545 ylimits = group_count == 1 ? NULL : &resource->limits[1]; 546 ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); 547 548 if (group_count < 2) 549 return; 550 551 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); 552 ylimits = group_count == 3 ? NULL : &resource->limits[3]; 553 ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); 554 555 if (group_count < 4) 556 return; 557 558 offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); 559 ylimits = group_count == 5 ? NULL : &resource->limits[5]; 560 ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); 561 } 562 563 static int 564 ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data) 565 { 566 u32 i; 567 568 if (!ipa_resource_limits_valid(ipa, data)) 569 return -EINVAL; 570 571 for (i = 0; i < data->resource_src_count; i++) 572 ipa_resource_config_src(ipa, &data->resource_src[i]); 573 574 for (i = 0; i < data->resource_dst_count; i++) 575 ipa_resource_config_dst(ipa, &data->resource_dst[i]); 576 577 return 0; 578 } 579 580 static void ipa_resource_deconfig(struct ipa *ipa) 581 { 582 /* Nothing to do */ 583 } 584 585 /** 586 * ipa_config() - Configure IPA hardware 587 * @ipa: IPA pointer 588 * @data: IPA configuration data 589 * 590 * Perform initialization requiring IPA clock to be enabled. 591 */ 592 static int ipa_config(struct ipa *ipa, const struct ipa_data *data) 593 { 594 int ret; 595 596 /* Get a clock reference to allow initialization. This reference 597 * is held after initialization completes, and won't get dropped 598 * unless/until a system suspend request arrives. 599 */ 600 ipa_clock_get(ipa); 601 602 ipa_hardware_config(ipa, data); 603 604 ret = ipa_endpoint_config(ipa); 605 if (ret) 606 goto err_hardware_deconfig; 607 608 ret = ipa_mem_config(ipa); 609 if (ret) 610 goto err_endpoint_deconfig; 611 612 ipa_table_config(ipa); 613 614 /* Assign resource limitation to each group */ 615 ret = ipa_resource_config(ipa, data->resource_data); 616 if (ret) 617 goto err_table_deconfig; 618 619 ret = ipa_modem_config(ipa); 620 if (ret) 621 goto err_resource_deconfig; 622 623 return 0; 624 625 err_resource_deconfig: 626 ipa_resource_deconfig(ipa); 627 err_table_deconfig: 628 ipa_table_deconfig(ipa); 629 ipa_mem_deconfig(ipa); 630 err_endpoint_deconfig: 631 ipa_endpoint_deconfig(ipa); 632 err_hardware_deconfig: 633 ipa_hardware_deconfig(ipa); 634 ipa_clock_put(ipa); 635 636 return ret; 637 } 638 639 /** 640 * ipa_deconfig() - Inverse of ipa_config() 641 * @ipa: IPA pointer 642 */ 643 static void ipa_deconfig(struct ipa *ipa) 644 { 645 ipa_modem_deconfig(ipa); 646 ipa_resource_deconfig(ipa); 647 ipa_table_deconfig(ipa); 648 ipa_mem_deconfig(ipa); 649 ipa_endpoint_deconfig(ipa); 650 ipa_hardware_deconfig(ipa); 651 ipa_clock_put(ipa); 652 } 653 654 static int ipa_firmware_load(struct device *dev) 655 { 656 const struct firmware *fw; 657 struct device_node *node; 658 struct resource res; 659 phys_addr_t phys; 660 ssize_t size; 661 void *virt; 662 int ret; 663 664 node = of_parse_phandle(dev->of_node, "memory-region", 0); 665 if (!node) { 666 dev_err(dev, "DT error getting \"memory-region\" property\n"); 667 return -EINVAL; 668 } 669 670 ret = of_address_to_resource(node, 0, &res); 671 if (ret) { 672 dev_err(dev, "error %d getting \"memory-region\" resource\n", 673 ret); 674 return ret; 675 } 676 677 ret = request_firmware(&fw, IPA_FWS_PATH, dev); 678 if (ret) { 679 dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH); 680 return ret; 681 } 682 683 phys = res.start; 684 size = (size_t)resource_size(&res); 685 virt = memremap(phys, size, MEMREMAP_WC); 686 if (!virt) { 687 dev_err(dev, "unable to remap firmware memory\n"); 688 ret = -ENOMEM; 689 goto out_release_firmware; 690 } 691 692 ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID, 693 virt, phys, size, NULL); 694 if (ret) 695 dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH); 696 else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID))) 697 dev_err(dev, "error %d authenticating \"%s\"\n", ret, 698 IPA_FWS_PATH); 699 700 memunmap(virt); 701 out_release_firmware: 702 release_firmware(fw); 703 704 return ret; 705 } 706 707 static const struct of_device_id ipa_match[] = { 708 { 709 .compatible = "qcom,sdm845-ipa", 710 .data = &ipa_data_sdm845, 711 }, 712 { 713 .compatible = "qcom,sc7180-ipa", 714 .data = &ipa_data_sc7180, 715 }, 716 { }, 717 }; 718 MODULE_DEVICE_TABLE(of, ipa_match); 719 720 /* Check things that can be validated at build time. This just 721 * groups these things BUILD_BUG_ON() calls don't clutter the rest 722 * of the code. 723 * */ 724 static void ipa_validate_build(void) 725 { 726 #ifdef IPA_VALIDATE 727 /* At one time we assumed a 64-bit build, allowing some do_div() 728 * calls to be replaced by simple division or modulo operations. 729 * We currently only perform divide and modulo operations on u32, 730 * u16, or size_t objects, and of those only size_t has any chance 731 * of being a 64-bit value. (It should be guaranteed 32 bits wide 732 * on a 32-bit build, but there is no harm in verifying that.) 733 */ 734 BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4); 735 736 /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ 737 BUILD_BUG_ON(GSI_EE_AP != 0); 738 739 /* There's no point if we have no channels or event rings */ 740 BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX); 741 BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX); 742 743 /* GSI hardware design limits */ 744 BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32); 745 BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31); 746 747 /* The number of TREs in a transaction is limited by the channel's 748 * TLV FIFO size. A transaction structure uses 8-bit fields 749 * to represents the number of TREs it has allocated and used. 750 */ 751 BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX); 752 753 /* This is used as a divisor */ 754 BUILD_BUG_ON(!IPA_AGGR_GRANULARITY); 755 756 /* Aggregation granularity value can't be 0, and must fit */ 757 BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); 758 BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > 759 field_max(AGGR_GRANULARITY_FMASK)); 760 #endif /* IPA_VALIDATE */ 761 } 762 763 /** 764 * ipa_probe() - IPA platform driver probe function 765 * @pdev: Platform device pointer 766 * 767 * Return: 0 if successful, or a negative error code (possibly 768 * EPROBE_DEFER) 769 * 770 * This is the main entry point for the IPA driver. Initialization proceeds 771 * in several stages: 772 * - The "init" stage involves activities that can be initialized without 773 * access to the IPA hardware. 774 * - The "config" stage requires the IPA clock to be active so IPA registers 775 * can be accessed, but does not require the use of IPA immediate commands. 776 * - The "setup" stage uses IPA immediate commands, and so requires the GSI 777 * layer to be initialized. 778 * 779 * A Boolean Device Tree "modem-init" property determines whether GSI 780 * initialization will be performed by the AP (Trust Zone) or the modem. 781 * If the AP does GSI initialization, the setup phase is entered after 782 * this has completed successfully. Otherwise the modem initializes 783 * the GSI layer and signals it has finished by sending an SMP2P interrupt 784 * to the AP; this triggers the start if IPA setup. 785 */ 786 static int ipa_probe(struct platform_device *pdev) 787 { 788 struct device *dev = &pdev->dev; 789 const struct ipa_data *data; 790 struct ipa_clock *clock; 791 bool modem_init; 792 struct ipa *ipa; 793 int ret; 794 795 ipa_validate_build(); 796 797 /* Get configuration data early; needed for clock initialization */ 798 data = of_device_get_match_data(dev); 799 if (!data) { 800 /* This is really IPA_VALIDATE (should never happen) */ 801 dev_err(dev, "matched hardware not supported\n"); 802 return -ENODEV; 803 } 804 805 /* If we need Trust Zone, make sure it's available */ 806 modem_init = of_property_read_bool(dev->of_node, "modem-init"); 807 if (!modem_init) 808 if (!qcom_scm_is_available()) 809 return -EPROBE_DEFER; 810 811 /* The clock and interconnects might not be ready when we're 812 * probed, so might return -EPROBE_DEFER. 813 */ 814 clock = ipa_clock_init(dev, data->clock_data); 815 if (IS_ERR(clock)) 816 return PTR_ERR(clock); 817 818 /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ 819 ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); 820 if (!ipa) { 821 ret = -ENOMEM; 822 goto err_clock_exit; 823 } 824 825 ipa->pdev = pdev; 826 dev_set_drvdata(dev, ipa); 827 ipa->clock = clock; 828 ipa->version = data->version; 829 init_completion(&ipa->completion); 830 831 ret = ipa_reg_init(ipa); 832 if (ret) 833 goto err_kfree_ipa; 834 835 ret = ipa_mem_init(ipa, data->mem_data); 836 if (ret) 837 goto err_reg_exit; 838 839 ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, 840 data->endpoint_data); 841 if (ret) 842 goto err_mem_exit; 843 844 /* Result is a non-zero mask of endpoints that support filtering */ 845 ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count, 846 data->endpoint_data); 847 if (!ipa->filter_map) { 848 ret = -EINVAL; 849 goto err_gsi_exit; 850 } 851 852 ret = ipa_table_init(ipa); 853 if (ret) 854 goto err_endpoint_exit; 855 856 ret = ipa_modem_init(ipa, modem_init); 857 if (ret) 858 goto err_table_exit; 859 860 ret = ipa_config(ipa, data); 861 if (ret) 862 goto err_modem_exit; 863 864 dev_info(dev, "IPA driver initialized"); 865 866 /* If the modem is doing early initialization, it will trigger a 867 * call to ipa_setup() call when it has finished. In that case 868 * we're done here. 869 */ 870 if (modem_init) 871 return 0; 872 873 /* Otherwise we need to load the firmware and have Trust Zone validate 874 * and install it. If that succeeds we can proceed with setup. 875 */ 876 ret = ipa_firmware_load(dev); 877 if (ret) 878 goto err_deconfig; 879 880 ret = ipa_setup(ipa); 881 if (ret) 882 goto err_deconfig; 883 884 return 0; 885 886 err_deconfig: 887 ipa_deconfig(ipa); 888 err_modem_exit: 889 ipa_modem_exit(ipa); 890 err_table_exit: 891 ipa_table_exit(ipa); 892 err_endpoint_exit: 893 ipa_endpoint_exit(ipa); 894 err_gsi_exit: 895 gsi_exit(&ipa->gsi); 896 err_mem_exit: 897 ipa_mem_exit(ipa); 898 err_reg_exit: 899 ipa_reg_exit(ipa); 900 err_kfree_ipa: 901 kfree(ipa); 902 err_clock_exit: 903 ipa_clock_exit(clock); 904 905 return ret; 906 } 907 908 static int ipa_remove(struct platform_device *pdev) 909 { 910 struct ipa *ipa = dev_get_drvdata(&pdev->dev); 911 struct ipa_clock *clock = ipa->clock; 912 int ret; 913 914 if (ipa->setup_complete) { 915 ret = ipa_modem_stop(ipa); 916 /* If starting or stopping is in progress, try once more */ 917 if (ret == -EBUSY) { 918 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); 919 ret = ipa_modem_stop(ipa); 920 } 921 if (ret) 922 return ret; 923 924 ipa_teardown(ipa); 925 } 926 927 ipa_deconfig(ipa); 928 ipa_modem_exit(ipa); 929 ipa_table_exit(ipa); 930 ipa_endpoint_exit(ipa); 931 gsi_exit(&ipa->gsi); 932 ipa_mem_exit(ipa); 933 ipa_reg_exit(ipa); 934 kfree(ipa); 935 ipa_clock_exit(clock); 936 937 return 0; 938 } 939 940 static void ipa_shutdown(struct platform_device *pdev) 941 { 942 int ret; 943 944 ret = ipa_remove(pdev); 945 if (ret) 946 dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret); 947 } 948 949 /** 950 * ipa_suspend() - Power management system suspend callback 951 * @dev: IPA device structure 952 * 953 * Return: Always returns zero 954 * 955 * Called by the PM framework when a system suspend operation is invoked. 956 * Suspends endpoints and releases the clock reference held to keep 957 * the IPA clock running until this point. 958 */ 959 static int ipa_suspend(struct device *dev) 960 { 961 struct ipa *ipa = dev_get_drvdata(dev); 962 963 /* When a suspended RX endpoint has a packet ready to receive, we 964 * get an IPA SUSPEND interrupt. We trigger a system resume in 965 * that case, but only on the first such interrupt since suspend. 966 */ 967 __clear_bit(IPA_FLAG_RESUMED, ipa->flags); 968 969 ipa_endpoint_suspend(ipa); 970 971 ipa_clock_put(ipa); 972 973 return 0; 974 } 975 976 /** 977 * ipa_resume() - Power management system resume callback 978 * @dev: IPA device structure 979 * 980 * Return: Always returns 0 981 * 982 * Called by the PM framework when a system resume operation is invoked. 983 * Takes an IPA clock reference to keep the clock running until suspend, 984 * and resumes endpoints. 985 */ 986 static int ipa_resume(struct device *dev) 987 { 988 struct ipa *ipa = dev_get_drvdata(dev); 989 990 /* This clock reference will keep the IPA out of suspend 991 * until we get a power management suspend request. 992 */ 993 ipa_clock_get(ipa); 994 995 ipa_endpoint_resume(ipa); 996 997 return 0; 998 } 999 1000 static const struct dev_pm_ops ipa_pm_ops = { 1001 .suspend = ipa_suspend, 1002 .resume = ipa_resume, 1003 }; 1004 1005 static struct platform_driver ipa_driver = { 1006 .probe = ipa_probe, 1007 .remove = ipa_remove, 1008 .shutdown = ipa_shutdown, 1009 .driver = { 1010 .name = "ipa", 1011 .pm = &ipa_pm_ops, 1012 .of_match_table = ipa_match, 1013 }, 1014 }; 1015 1016 module_platform_driver(ipa_driver); 1017 1018 MODULE_LICENSE("GPL v2"); 1019 MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver"); 1020