1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 6 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/module.h> 11 #include <linux/interrupt.h> 12 #include <linux/spinlock.h> 13 #include <linux/bitops.h> 14 15 #include "core.h" 16 #include "debug.h" 17 #include "coredump.h" 18 19 #include "targaddrs.h" 20 #include "bmi.h" 21 22 #include "hif.h" 23 #include "htc.h" 24 25 #include "ce.h" 26 #include "pci.h" 27 28 enum ath10k_pci_reset_mode { 29 ATH10K_PCI_RESET_AUTO = 0, 30 ATH10K_PCI_RESET_WARM_ONLY = 1, 31 }; 32 33 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 34 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 35 36 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 37 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 38 39 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 40 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 41 42 /* how long wait to wait for target to initialise, in ms */ 43 #define ATH10K_PCI_TARGET_WAIT 3000 44 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 45 46 /* Maximum number of bytes that can be handled atomically by 47 * diag read and write. 48 */ 49 #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 50 51 #define QCA99X0_PCIE_BAR0_START_REG 0x81030 52 #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c 53 #define QCA99X0_CPU_MEM_DATA_REG 0x4d010 54 55 static const struct pci_device_id ath10k_pci_id_table[] = { 56 /* PCI-E QCA988X V2 (Ubiquiti branded) */ 57 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, 58 59 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 60 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 61 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 62 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 63 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ 64 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ 65 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 66 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ 67 {} 68 }; 69 70 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 71 /* QCA988X pre 2.0 chips are not supported because they need some nasty 72 * hacks. ath10k doesn't have them and these devices crash horribly 73 * because of that. 74 */ 75 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, 76 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 77 78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 81 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 82 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 83 84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 87 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 88 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 89 90 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 91 92 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, 93 94 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, 95 96 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 97 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 98 99 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, 100 }; 101 102 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 103 static int ath10k_pci_cold_reset(struct ath10k *ar); 104 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 105 static int ath10k_pci_init_irq(struct ath10k *ar); 106 static int ath10k_pci_deinit_irq(struct ath10k *ar); 107 static int ath10k_pci_request_irq(struct ath10k *ar); 108 static void ath10k_pci_free_irq(struct ath10k *ar); 109 static int ath10k_pci_bmi_wait(struct ath10k *ar, 110 struct ath10k_ce_pipe *tx_pipe, 111 struct ath10k_ce_pipe *rx_pipe, 112 struct bmi_xfer *xfer); 113 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 114 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); 115 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 116 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 117 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 118 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 119 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); 120 121 static const struct ce_attr pci_host_ce_config_wlan[] = { 122 /* CE0: host->target HTC control and raw streams */ 123 { 124 .flags = CE_ATTR_FLAGS, 125 .src_nentries = 16, 126 .src_sz_max = 256, 127 .dest_nentries = 0, 128 .send_cb = ath10k_pci_htc_tx_cb, 129 }, 130 131 /* CE1: target->host HTT + HTC control */ 132 { 133 .flags = CE_ATTR_FLAGS, 134 .src_nentries = 0, 135 .src_sz_max = 2048, 136 .dest_nentries = 512, 137 .recv_cb = ath10k_pci_htt_htc_rx_cb, 138 }, 139 140 /* CE2: target->host WMI */ 141 { 142 .flags = CE_ATTR_FLAGS, 143 .src_nentries = 0, 144 .src_sz_max = 2048, 145 .dest_nentries = 128, 146 .recv_cb = ath10k_pci_htc_rx_cb, 147 }, 148 149 /* CE3: host->target WMI */ 150 { 151 .flags = CE_ATTR_FLAGS, 152 .src_nentries = 32, 153 .src_sz_max = 2048, 154 .dest_nentries = 0, 155 .send_cb = ath10k_pci_htc_tx_cb, 156 }, 157 158 /* CE4: host->target HTT */ 159 { 160 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 161 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 162 .src_sz_max = 256, 163 .dest_nentries = 0, 164 .send_cb = ath10k_pci_htt_tx_cb, 165 }, 166 167 /* CE5: target->host HTT (HIF->HTT) */ 168 { 169 .flags = CE_ATTR_FLAGS, 170 .src_nentries = 0, 171 .src_sz_max = 512, 172 .dest_nentries = 512, 173 .recv_cb = ath10k_pci_htt_rx_cb, 174 }, 175 176 /* CE6: target autonomous hif_memcpy */ 177 { 178 .flags = CE_ATTR_FLAGS, 179 .src_nentries = 0, 180 .src_sz_max = 0, 181 .dest_nentries = 0, 182 }, 183 184 /* CE7: ce_diag, the Diagnostic Window */ 185 { 186 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, 187 .src_nentries = 2, 188 .src_sz_max = DIAG_TRANSFER_LIMIT, 189 .dest_nentries = 2, 190 }, 191 192 /* CE8: target->host pktlog */ 193 { 194 .flags = CE_ATTR_FLAGS, 195 .src_nentries = 0, 196 .src_sz_max = 2048, 197 .dest_nentries = 128, 198 .recv_cb = ath10k_pci_pktlog_rx_cb, 199 }, 200 201 /* CE9 target autonomous qcache memcpy */ 202 { 203 .flags = CE_ATTR_FLAGS, 204 .src_nentries = 0, 205 .src_sz_max = 0, 206 .dest_nentries = 0, 207 }, 208 209 /* CE10: target autonomous hif memcpy */ 210 { 211 .flags = CE_ATTR_FLAGS, 212 .src_nentries = 0, 213 .src_sz_max = 0, 214 .dest_nentries = 0, 215 }, 216 217 /* CE11: target autonomous hif memcpy */ 218 { 219 .flags = CE_ATTR_FLAGS, 220 .src_nentries = 0, 221 .src_sz_max = 0, 222 .dest_nentries = 0, 223 }, 224 }; 225 226 /* Target firmware's Copy Engine configuration. */ 227 static const struct ce_pipe_config pci_target_ce_config_wlan[] = { 228 /* CE0: host->target HTC control and raw streams */ 229 { 230 .pipenum = __cpu_to_le32(0), 231 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 232 .nentries = __cpu_to_le32(32), 233 .nbytes_max = __cpu_to_le32(256), 234 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 235 .reserved = __cpu_to_le32(0), 236 }, 237 238 /* CE1: target->host HTT + HTC control */ 239 { 240 .pipenum = __cpu_to_le32(1), 241 .pipedir = __cpu_to_le32(PIPEDIR_IN), 242 .nentries = __cpu_to_le32(32), 243 .nbytes_max = __cpu_to_le32(2048), 244 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 245 .reserved = __cpu_to_le32(0), 246 }, 247 248 /* CE2: target->host WMI */ 249 { 250 .pipenum = __cpu_to_le32(2), 251 .pipedir = __cpu_to_le32(PIPEDIR_IN), 252 .nentries = __cpu_to_le32(64), 253 .nbytes_max = __cpu_to_le32(2048), 254 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 255 .reserved = __cpu_to_le32(0), 256 }, 257 258 /* CE3: host->target WMI */ 259 { 260 .pipenum = __cpu_to_le32(3), 261 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 262 .nentries = __cpu_to_le32(32), 263 .nbytes_max = __cpu_to_le32(2048), 264 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 265 .reserved = __cpu_to_le32(0), 266 }, 267 268 /* CE4: host->target HTT */ 269 { 270 .pipenum = __cpu_to_le32(4), 271 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 272 .nentries = __cpu_to_le32(256), 273 .nbytes_max = __cpu_to_le32(256), 274 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 275 .reserved = __cpu_to_le32(0), 276 }, 277 278 /* NB: 50% of src nentries, since tx has 2 frags */ 279 280 /* CE5: target->host HTT (HIF->HTT) */ 281 { 282 .pipenum = __cpu_to_le32(5), 283 .pipedir = __cpu_to_le32(PIPEDIR_IN), 284 .nentries = __cpu_to_le32(32), 285 .nbytes_max = __cpu_to_le32(512), 286 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 287 .reserved = __cpu_to_le32(0), 288 }, 289 290 /* CE6: Reserved for target autonomous hif_memcpy */ 291 { 292 .pipenum = __cpu_to_le32(6), 293 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 294 .nentries = __cpu_to_le32(32), 295 .nbytes_max = __cpu_to_le32(4096), 296 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 297 .reserved = __cpu_to_le32(0), 298 }, 299 300 /* CE7 used only by Host */ 301 { 302 .pipenum = __cpu_to_le32(7), 303 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 304 .nentries = __cpu_to_le32(0), 305 .nbytes_max = __cpu_to_le32(0), 306 .flags = __cpu_to_le32(0), 307 .reserved = __cpu_to_le32(0), 308 }, 309 310 /* CE8 target->host packtlog */ 311 { 312 .pipenum = __cpu_to_le32(8), 313 .pipedir = __cpu_to_le32(PIPEDIR_IN), 314 .nentries = __cpu_to_le32(64), 315 .nbytes_max = __cpu_to_le32(2048), 316 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 317 .reserved = __cpu_to_le32(0), 318 }, 319 320 /* CE9 target autonomous qcache memcpy */ 321 { 322 .pipenum = __cpu_to_le32(9), 323 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 324 .nentries = __cpu_to_le32(32), 325 .nbytes_max = __cpu_to_le32(2048), 326 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 327 .reserved = __cpu_to_le32(0), 328 }, 329 330 /* It not necessary to send target wlan configuration for CE10 & CE11 331 * as these CEs are not actively used in target. 332 */ 333 }; 334 335 /* 336 * Map from service/endpoint to Copy Engine. 337 * This table is derived from the CE_PCI TABLE, above. 338 * It is passed to the Target at startup for use by firmware. 339 */ 340 static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = { 341 { 342 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 343 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 344 __cpu_to_le32(3), 345 }, 346 { 347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 348 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 349 __cpu_to_le32(2), 350 }, 351 { 352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 353 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 354 __cpu_to_le32(3), 355 }, 356 { 357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 358 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 359 __cpu_to_le32(2), 360 }, 361 { 362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 363 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 364 __cpu_to_le32(3), 365 }, 366 { 367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 368 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 369 __cpu_to_le32(2), 370 }, 371 { 372 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 373 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 374 __cpu_to_le32(3), 375 }, 376 { 377 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 378 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 379 __cpu_to_le32(2), 380 }, 381 { 382 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 383 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 384 __cpu_to_le32(3), 385 }, 386 { 387 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 388 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 389 __cpu_to_le32(2), 390 }, 391 { 392 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 393 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 394 __cpu_to_le32(0), 395 }, 396 { 397 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 398 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 399 __cpu_to_le32(1), 400 }, 401 { /* not used */ 402 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 403 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 404 __cpu_to_le32(0), 405 }, 406 { /* not used */ 407 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 408 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 409 __cpu_to_le32(1), 410 }, 411 { 412 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 413 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 414 __cpu_to_le32(4), 415 }, 416 { 417 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 418 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 419 __cpu_to_le32(5), 420 }, 421 422 /* (Additions here) */ 423 424 { /* must be last */ 425 __cpu_to_le32(0), 426 __cpu_to_le32(0), 427 __cpu_to_le32(0), 428 }, 429 }; 430 431 static bool ath10k_pci_is_awake(struct ath10k *ar) 432 { 433 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 434 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 435 RTC_STATE_ADDRESS); 436 437 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 438 } 439 440 static void __ath10k_pci_wake(struct ath10k *ar) 441 { 442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 443 444 lockdep_assert_held(&ar_pci->ps_lock); 445 446 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 447 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 448 449 iowrite32(PCIE_SOC_WAKE_V_MASK, 450 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 451 PCIE_SOC_WAKE_ADDRESS); 452 } 453 454 static void __ath10k_pci_sleep(struct ath10k *ar) 455 { 456 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 457 458 lockdep_assert_held(&ar_pci->ps_lock); 459 460 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 461 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 462 463 iowrite32(PCIE_SOC_WAKE_RESET, 464 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 465 PCIE_SOC_WAKE_ADDRESS); 466 ar_pci->ps_awake = false; 467 } 468 469 static int ath10k_pci_wake_wait(struct ath10k *ar) 470 { 471 int tot_delay = 0; 472 int curr_delay = 5; 473 474 while (tot_delay < PCIE_WAKE_TIMEOUT) { 475 if (ath10k_pci_is_awake(ar)) { 476 if (tot_delay > PCIE_WAKE_LATE_US) 477 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n", 478 tot_delay / 1000); 479 return 0; 480 } 481 482 udelay(curr_delay); 483 tot_delay += curr_delay; 484 485 if (curr_delay < 50) 486 curr_delay += 5; 487 } 488 489 return -ETIMEDOUT; 490 } 491 492 static int ath10k_pci_force_wake(struct ath10k *ar) 493 { 494 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 495 unsigned long flags; 496 int ret = 0; 497 498 if (ar_pci->pci_ps) 499 return ret; 500 501 spin_lock_irqsave(&ar_pci->ps_lock, flags); 502 503 if (!ar_pci->ps_awake) { 504 iowrite32(PCIE_SOC_WAKE_V_MASK, 505 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 506 PCIE_SOC_WAKE_ADDRESS); 507 508 ret = ath10k_pci_wake_wait(ar); 509 if (ret == 0) 510 ar_pci->ps_awake = true; 511 } 512 513 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 514 515 return ret; 516 } 517 518 static void ath10k_pci_force_sleep(struct ath10k *ar) 519 { 520 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 521 unsigned long flags; 522 523 spin_lock_irqsave(&ar_pci->ps_lock, flags); 524 525 iowrite32(PCIE_SOC_WAKE_RESET, 526 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 527 PCIE_SOC_WAKE_ADDRESS); 528 ar_pci->ps_awake = false; 529 530 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 531 } 532 533 static int ath10k_pci_wake(struct ath10k *ar) 534 { 535 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 536 unsigned long flags; 537 int ret = 0; 538 539 if (ar_pci->pci_ps == 0) 540 return ret; 541 542 spin_lock_irqsave(&ar_pci->ps_lock, flags); 543 544 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 545 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 546 547 /* This function can be called very frequently. To avoid excessive 548 * CPU stalls for MMIO reads use a cache var to hold the device state. 549 */ 550 if (!ar_pci->ps_awake) { 551 __ath10k_pci_wake(ar); 552 553 ret = ath10k_pci_wake_wait(ar); 554 if (ret == 0) 555 ar_pci->ps_awake = true; 556 } 557 558 if (ret == 0) { 559 ar_pci->ps_wake_refcount++; 560 WARN_ON(ar_pci->ps_wake_refcount == 0); 561 } 562 563 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 564 565 return ret; 566 } 567 568 static void ath10k_pci_sleep(struct ath10k *ar) 569 { 570 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 571 unsigned long flags; 572 573 if (ar_pci->pci_ps == 0) 574 return; 575 576 spin_lock_irqsave(&ar_pci->ps_lock, flags); 577 578 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 579 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 580 581 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 582 goto skip; 583 584 ar_pci->ps_wake_refcount--; 585 586 mod_timer(&ar_pci->ps_timer, jiffies + 587 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 588 589 skip: 590 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 591 } 592 593 static void ath10k_pci_ps_timer(struct timer_list *t) 594 { 595 struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer); 596 struct ath10k *ar = ar_pci->ar; 597 unsigned long flags; 598 599 spin_lock_irqsave(&ar_pci->ps_lock, flags); 600 601 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 602 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 603 604 if (ar_pci->ps_wake_refcount > 0) 605 goto skip; 606 607 __ath10k_pci_sleep(ar); 608 609 skip: 610 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 611 } 612 613 static void ath10k_pci_sleep_sync(struct ath10k *ar) 614 { 615 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 616 unsigned long flags; 617 618 if (ar_pci->pci_ps == 0) { 619 ath10k_pci_force_sleep(ar); 620 return; 621 } 622 623 timer_delete_sync(&ar_pci->ps_timer); 624 625 spin_lock_irqsave(&ar_pci->ps_lock, flags); 626 WARN_ON(ar_pci->ps_wake_refcount > 0); 627 __ath10k_pci_sleep(ar); 628 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 629 } 630 631 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) 632 { 633 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 634 int ret; 635 636 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 637 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 638 offset, offset + sizeof(value), ar_pci->mem_len); 639 return; 640 } 641 642 ret = ath10k_pci_wake(ar); 643 if (ret) { 644 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 645 value, offset, ret); 646 return; 647 } 648 649 iowrite32(value, ar_pci->mem + offset); 650 ath10k_pci_sleep(ar); 651 } 652 653 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) 654 { 655 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 656 u32 val; 657 int ret; 658 659 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 660 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 661 offset, offset + sizeof(val), ar_pci->mem_len); 662 return 0; 663 } 664 665 ret = ath10k_pci_wake(ar); 666 if (ret) { 667 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 668 offset, ret); 669 return 0xffffffff; 670 } 671 672 val = ioread32(ar_pci->mem + offset); 673 ath10k_pci_sleep(ar); 674 675 return val; 676 } 677 678 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 679 { 680 struct ath10k_ce *ce = ath10k_ce_priv(ar); 681 682 ce->bus_ops->write32(ar, offset, value); 683 } 684 685 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 686 { 687 struct ath10k_ce *ce = ath10k_ce_priv(ar); 688 689 return ce->bus_ops->read32(ar, offset); 690 } 691 692 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 693 { 694 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 695 } 696 697 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 698 { 699 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 700 } 701 702 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 703 { 704 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 705 } 706 707 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 708 { 709 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 710 } 711 712 bool ath10k_pci_irq_pending(struct ath10k *ar) 713 { 714 u32 cause; 715 716 /* Check if the shared legacy irq is for us */ 717 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 718 PCIE_INTR_CAUSE_ADDRESS); 719 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 720 return true; 721 722 return false; 723 } 724 725 void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar) 726 { 727 /* IMPORTANT: INTR_CLR register has to be set after 728 * INTR_ENABLE is set to 0, otherwise interrupt can not be 729 * really cleared. 730 */ 731 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 732 0); 733 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 734 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 735 736 /* IMPORTANT: this extra read transaction is required to 737 * flush the posted write buffer. 738 */ 739 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 740 PCIE_INTR_ENABLE_ADDRESS); 741 } 742 743 void ath10k_pci_enable_intx_irq(struct ath10k *ar) 744 { 745 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 746 PCIE_INTR_ENABLE_ADDRESS, 747 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 748 749 /* IMPORTANT: this extra read transaction is required to 750 * flush the posted write buffer. 751 */ 752 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 753 PCIE_INTR_ENABLE_ADDRESS); 754 } 755 756 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 757 { 758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 759 760 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) 761 return "msi"; 762 763 return "legacy"; 764 } 765 766 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 767 { 768 struct ath10k *ar = pipe->hif_ce_state; 769 struct ath10k_ce *ce = ath10k_ce_priv(ar); 770 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 771 struct sk_buff *skb; 772 dma_addr_t paddr; 773 int ret; 774 775 skb = dev_alloc_skb(pipe->buf_sz); 776 if (!skb) 777 return -ENOMEM; 778 779 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 780 781 paddr = dma_map_single(ar->dev, skb->data, 782 skb->len + skb_tailroom(skb), 783 DMA_FROM_DEVICE); 784 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 785 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 786 dev_kfree_skb_any(skb); 787 return -EIO; 788 } 789 790 ATH10K_SKB_RXCB(skb)->paddr = paddr; 791 792 spin_lock_bh(&ce->ce_lock); 793 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); 794 spin_unlock_bh(&ce->ce_lock); 795 if (ret) { 796 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 797 DMA_FROM_DEVICE); 798 dev_kfree_skb_any(skb); 799 return ret; 800 } 801 802 return 0; 803 } 804 805 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 806 { 807 struct ath10k *ar = pipe->hif_ce_state; 808 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 809 struct ath10k_ce *ce = ath10k_ce_priv(ar); 810 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 811 int ret, num; 812 813 if (pipe->buf_sz == 0) 814 return; 815 816 if (!ce_pipe->dest_ring) 817 return; 818 819 spin_lock_bh(&ce->ce_lock); 820 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 821 spin_unlock_bh(&ce->ce_lock); 822 823 while (num >= 0) { 824 ret = __ath10k_pci_rx_post_buf(pipe); 825 if (ret) { 826 if (ret == -ENOSPC) 827 break; 828 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 829 mod_timer(&ar_pci->rx_post_retry, jiffies + 830 ATH10K_PCI_RX_POST_RETRY_MS); 831 break; 832 } 833 num--; 834 } 835 } 836 837 void ath10k_pci_rx_post(struct ath10k *ar) 838 { 839 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 840 int i; 841 842 for (i = 0; i < CE_COUNT; i++) 843 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 844 } 845 846 void ath10k_pci_rx_replenish_retry(struct timer_list *t) 847 { 848 struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, 849 rx_post_retry); 850 struct ath10k *ar = ar_pci->ar; 851 852 ath10k_pci_rx_post(ar); 853 } 854 855 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 856 { 857 u32 val = 0, region = addr & 0xfffff; 858 859 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) 860 & 0x7ff) << 21; 861 val |= 0x100000 | region; 862 return val; 863 } 864 865 /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. 866 * Support to access target space below 1M for qca6174 and qca9377. 867 * If target space is below 1M, the bit[20] of converted CE addr is 0. 868 * Otherwise bit[20] of converted CE addr is 1. 869 */ 870 static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 871 { 872 u32 val = 0, region = addr & 0xfffff; 873 874 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) 875 & 0x7ff) << 21; 876 val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; 877 return val; 878 } 879 880 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 881 { 882 u32 val = 0, region = addr & 0xfffff; 883 884 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 885 val |= 0x100000 | region; 886 return val; 887 } 888 889 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 890 { 891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 892 893 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) 894 return -EOPNOTSUPP; 895 896 return ar_pci->targ_cpu_to_ce_addr(ar, addr); 897 } 898 899 /* 900 * Diagnostic read/write access is provided for startup/config/debug usage. 901 * Caller must guarantee proper alignment, when applicable, and single user 902 * at any moment. 903 */ 904 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 905 int nbytes) 906 { 907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 908 int ret = 0; 909 u32 *buf; 910 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 911 struct ath10k_ce_pipe *ce_diag; 912 /* Host buffer address in CE space */ 913 u32 ce_data; 914 dma_addr_t ce_data_base = 0; 915 void *data_buf; 916 int i; 917 918 mutex_lock(&ar_pci->ce_diag_mutex); 919 ce_diag = ar_pci->ce_diag; 920 921 /* 922 * Allocate a temporary bounce buffer to hold caller's data 923 * to be DMA'ed from Target. This guarantees 924 * 1) 4-byte alignment 925 * 2) Buffer in DMA-able space 926 */ 927 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 928 929 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, 930 GFP_ATOMIC); 931 if (!data_buf) { 932 ret = -ENOMEM; 933 goto done; 934 } 935 936 /* The address supplied by the caller is in the 937 * Target CPU virtual address space. 938 * 939 * In order to use this address with the diagnostic CE, 940 * convert it from Target CPU virtual address space 941 * to CE address space 942 */ 943 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 944 945 remaining_bytes = nbytes; 946 ce_data = ce_data_base; 947 while (remaining_bytes) { 948 nbytes = min_t(unsigned int, remaining_bytes, 949 DIAG_TRANSFER_LIMIT); 950 951 ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); 952 if (ret != 0) 953 goto done; 954 955 /* Request CE to send from Target(!) address to Host buffer */ 956 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); 957 if (ret) 958 goto done; 959 960 i = 0; 961 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { 962 udelay(DIAG_ACCESS_CE_WAIT_US); 963 i += DIAG_ACCESS_CE_WAIT_US; 964 965 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 966 ret = -EBUSY; 967 goto done; 968 } 969 } 970 971 i = 0; 972 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, 973 &completed_nbytes) != 0) { 974 udelay(DIAG_ACCESS_CE_WAIT_US); 975 i += DIAG_ACCESS_CE_WAIT_US; 976 977 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 978 ret = -EBUSY; 979 goto done; 980 } 981 } 982 983 if (nbytes != completed_nbytes) { 984 ret = -EIO; 985 goto done; 986 } 987 988 if (*buf != ce_data) { 989 ret = -EIO; 990 goto done; 991 } 992 993 remaining_bytes -= nbytes; 994 memcpy(data, data_buf, nbytes); 995 996 address += nbytes; 997 data += nbytes; 998 } 999 1000 done: 1001 1002 if (data_buf) 1003 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 1004 ce_data_base); 1005 1006 mutex_unlock(&ar_pci->ce_diag_mutex); 1007 1008 return ret; 1009 } 1010 1011 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 1012 { 1013 __le32 val = 0; 1014 int ret; 1015 1016 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 1017 *value = __le32_to_cpu(val); 1018 1019 return ret; 1020 } 1021 1022 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 1023 u32 src, u32 len) 1024 { 1025 u32 host_addr, addr; 1026 int ret; 1027 1028 host_addr = host_interest_item_address(src); 1029 1030 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 1031 if (ret != 0) { 1032 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 1033 src, ret); 1034 return ret; 1035 } 1036 1037 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 1038 if (ret != 0) { 1039 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 1040 addr, len, ret); 1041 return ret; 1042 } 1043 1044 return 0; 1045 } 1046 1047 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1048 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1049 1050 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1051 const void *data, int nbytes) 1052 { 1053 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1054 int ret = 0; 1055 u32 *buf; 1056 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 1057 struct ath10k_ce_pipe *ce_diag; 1058 void *data_buf; 1059 dma_addr_t ce_data_base = 0; 1060 int i; 1061 1062 mutex_lock(&ar_pci->ce_diag_mutex); 1063 ce_diag = ar_pci->ce_diag; 1064 1065 /* 1066 * Allocate a temporary bounce buffer to hold caller's data 1067 * to be DMA'ed to Target. This guarantees 1068 * 1) 4-byte alignment 1069 * 2) Buffer in DMA-able space 1070 */ 1071 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 1072 1073 data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, 1074 GFP_ATOMIC); 1075 if (!data_buf) { 1076 ret = -ENOMEM; 1077 goto done; 1078 } 1079 1080 /* 1081 * The address supplied by the caller is in the 1082 * Target CPU virtual address space. 1083 * 1084 * In order to use this address with the diagnostic CE, 1085 * convert it from 1086 * Target CPU virtual address space 1087 * to 1088 * CE address space 1089 */ 1090 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1091 1092 remaining_bytes = nbytes; 1093 while (remaining_bytes) { 1094 /* FIXME: check cast */ 1095 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1096 1097 /* Copy caller's data to allocated DMA buf */ 1098 memcpy(data_buf, data, nbytes); 1099 1100 /* Set up to receive directly into Target(!) address */ 1101 ret = ath10k_ce_rx_post_buf(ce_diag, &address, address); 1102 if (ret != 0) 1103 goto done; 1104 1105 /* 1106 * Request CE to send caller-supplied data that 1107 * was copied to bounce buffer to Target(!) address. 1108 */ 1109 ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0); 1110 if (ret != 0) 1111 goto done; 1112 1113 i = 0; 1114 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { 1115 udelay(DIAG_ACCESS_CE_WAIT_US); 1116 i += DIAG_ACCESS_CE_WAIT_US; 1117 1118 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 1119 ret = -EBUSY; 1120 goto done; 1121 } 1122 } 1123 1124 i = 0; 1125 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, 1126 &completed_nbytes) != 0) { 1127 udelay(DIAG_ACCESS_CE_WAIT_US); 1128 i += DIAG_ACCESS_CE_WAIT_US; 1129 1130 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 1131 ret = -EBUSY; 1132 goto done; 1133 } 1134 } 1135 1136 if (nbytes != completed_nbytes) { 1137 ret = -EIO; 1138 goto done; 1139 } 1140 1141 if (*buf != address) { 1142 ret = -EIO; 1143 goto done; 1144 } 1145 1146 remaining_bytes -= nbytes; 1147 address += nbytes; 1148 data += nbytes; 1149 } 1150 1151 done: 1152 if (data_buf) { 1153 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 1154 ce_data_base); 1155 } 1156 1157 if (ret != 0) 1158 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1159 address, ret); 1160 1161 mutex_unlock(&ar_pci->ce_diag_mutex); 1162 1163 return ret; 1164 } 1165 1166 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1167 { 1168 __le32 val = __cpu_to_le32(value); 1169 1170 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1171 } 1172 1173 /* Called by lower (CE) layer when a send to Target completes. */ 1174 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) 1175 { 1176 struct ath10k *ar = ce_state->ar; 1177 struct sk_buff_head list; 1178 struct sk_buff *skb; 1179 1180 __skb_queue_head_init(&list); 1181 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1182 /* no need to call tx completion for NULL pointers */ 1183 if (skb == NULL) 1184 continue; 1185 1186 __skb_queue_tail(&list, skb); 1187 } 1188 1189 while ((skb = __skb_dequeue(&list))) 1190 ath10k_htc_tx_completion_handler(ar, skb); 1191 } 1192 1193 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, 1194 void (*callback)(struct ath10k *ar, 1195 struct sk_buff *skb)) 1196 { 1197 struct ath10k *ar = ce_state->ar; 1198 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1199 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1200 struct sk_buff *skb; 1201 struct sk_buff_head list; 1202 void *transfer_context; 1203 unsigned int nbytes, max_nbytes; 1204 1205 __skb_queue_head_init(&list); 1206 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1207 &nbytes) == 0) { 1208 skb = transfer_context; 1209 max_nbytes = skb->len + skb_tailroom(skb); 1210 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1211 max_nbytes, DMA_FROM_DEVICE); 1212 1213 if (unlikely(max_nbytes < nbytes)) { 1214 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1215 nbytes, max_nbytes); 1216 dev_kfree_skb_any(skb); 1217 continue; 1218 } 1219 1220 skb_put(skb, nbytes); 1221 __skb_queue_tail(&list, skb); 1222 } 1223 1224 while ((skb = __skb_dequeue(&list))) { 1225 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1226 ce_state->id, skb->len); 1227 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1228 skb->data, skb->len); 1229 1230 callback(ar, skb); 1231 } 1232 1233 ath10k_pci_rx_post_pipe(pipe_info); 1234 } 1235 1236 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, 1237 void (*callback)(struct ath10k *ar, 1238 struct sk_buff *skb)) 1239 { 1240 struct ath10k *ar = ce_state->ar; 1241 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1242 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1243 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; 1244 struct sk_buff *skb; 1245 struct sk_buff_head list; 1246 void *transfer_context; 1247 unsigned int nbytes, max_nbytes, nentries; 1248 int orig_len; 1249 1250 /* No need to acquire ce_lock for CE5, since this is the only place CE5 1251 * is processed other than init and deinit. Before releasing CE5 1252 * buffers, interrupts are disabled. Thus CE5 access is serialized. 1253 */ 1254 __skb_queue_head_init(&list); 1255 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, 1256 &nbytes) == 0) { 1257 skb = transfer_context; 1258 max_nbytes = skb->len + skb_tailroom(skb); 1259 1260 if (unlikely(max_nbytes < nbytes)) { 1261 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1262 nbytes, max_nbytes); 1263 continue; 1264 } 1265 1266 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1267 max_nbytes, DMA_FROM_DEVICE); 1268 skb_put(skb, nbytes); 1269 __skb_queue_tail(&list, skb); 1270 } 1271 1272 nentries = skb_queue_len(&list); 1273 while ((skb = __skb_dequeue(&list))) { 1274 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1275 ce_state->id, skb->len); 1276 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1277 skb->data, skb->len); 1278 1279 orig_len = skb->len; 1280 callback(ar, skb); 1281 skb_push(skb, orig_len - skb->len); 1282 skb_reset_tail_pointer(skb); 1283 skb_trim(skb, 0); 1284 1285 /*let device gain the buffer again*/ 1286 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1287 skb->len + skb_tailroom(skb), 1288 DMA_FROM_DEVICE); 1289 } 1290 ath10k_ce_rx_update_write_idx(ce_pipe, nentries); 1291 } 1292 1293 /* Called by lower (CE) layer when data is received from the Target. */ 1294 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1295 { 1296 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1297 } 1298 1299 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1300 { 1301 /* CE4 polling needs to be done whenever CE pipe which transports 1302 * HTT Rx (target->host) is processed. 1303 */ 1304 ath10k_ce_per_engine_service(ce_state->ar, 4); 1305 1306 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1307 } 1308 1309 /* Called by lower (CE) layer when data is received from the Target. 1310 * Only 10.4 firmware uses separate CE to transfer pktlog data. 1311 */ 1312 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) 1313 { 1314 ath10k_pci_process_rx_cb(ce_state, 1315 ath10k_htt_rx_pktlog_completion_handler); 1316 } 1317 1318 /* Called by lower (CE) layer when a send to HTT Target completes. */ 1319 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1320 { 1321 struct ath10k *ar = ce_state->ar; 1322 struct sk_buff *skb; 1323 1324 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1325 /* no need to call tx completion for NULL pointers */ 1326 if (!skb) 1327 continue; 1328 1329 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1330 skb->len, DMA_TO_DEVICE); 1331 ath10k_htt_hif_tx_complete(ar, skb); 1332 } 1333 } 1334 1335 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) 1336 { 1337 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 1338 ath10k_htt_t2h_msg_handler(ar, skb); 1339 } 1340 1341 /* Called by lower (CE) layer when HTT data is received from the Target. */ 1342 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) 1343 { 1344 /* CE4 polling needs to be done whenever CE pipe which transports 1345 * HTT Rx (target->host) is processed. 1346 */ 1347 ath10k_ce_per_engine_service(ce_state->ar, 4); 1348 1349 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1350 } 1351 1352 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1353 struct ath10k_hif_sg_item *items, int n_items) 1354 { 1355 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1356 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1357 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1358 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1359 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1360 unsigned int nentries_mask; 1361 unsigned int sw_index; 1362 unsigned int write_index; 1363 int err, i = 0; 1364 1365 spin_lock_bh(&ce->ce_lock); 1366 1367 nentries_mask = src_ring->nentries_mask; 1368 sw_index = src_ring->sw_index; 1369 write_index = src_ring->write_index; 1370 1371 if (unlikely(CE_RING_DELTA(nentries_mask, 1372 write_index, sw_index - 1) < n_items)) { 1373 err = -ENOBUFS; 1374 goto err; 1375 } 1376 1377 for (i = 0; i < n_items - 1; i++) { 1378 ath10k_dbg(ar, ATH10K_DBG_PCI, 1379 "pci tx item %d paddr %pad len %d n_items %d\n", 1380 i, &items[i].paddr, items[i].len, n_items); 1381 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1382 items[i].vaddr, items[i].len); 1383 1384 err = ath10k_ce_send_nolock(ce_pipe, 1385 items[i].transfer_context, 1386 items[i].paddr, 1387 items[i].len, 1388 items[i].transfer_id, 1389 CE_SEND_FLAG_GATHER); 1390 if (err) 1391 goto err; 1392 } 1393 1394 /* `i` is equal to `n_items -1` after for() */ 1395 1396 ath10k_dbg(ar, ATH10K_DBG_PCI, 1397 "pci tx item %d paddr %pad len %d n_items %d\n", 1398 i, &items[i].paddr, items[i].len, n_items); 1399 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1400 items[i].vaddr, items[i].len); 1401 1402 err = ath10k_ce_send_nolock(ce_pipe, 1403 items[i].transfer_context, 1404 items[i].paddr, 1405 items[i].len, 1406 items[i].transfer_id, 1407 0); 1408 if (err) 1409 goto err; 1410 1411 spin_unlock_bh(&ce->ce_lock); 1412 return 0; 1413 1414 err: 1415 for (; i > 0; i--) 1416 __ath10k_ce_send_revert(ce_pipe); 1417 1418 spin_unlock_bh(&ce->ce_lock); 1419 return err; 1420 } 1421 1422 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1423 size_t buf_len) 1424 { 1425 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1426 } 1427 1428 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1429 { 1430 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1431 1432 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1433 1434 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1435 } 1436 1437 static void ath10k_pci_dump_registers(struct ath10k *ar, 1438 struct ath10k_fw_crash_data *crash_data) 1439 { 1440 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1441 int i, ret; 1442 1443 lockdep_assert_held(&ar->dump_mutex); 1444 1445 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1446 hi_failure_state, 1447 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1448 if (ret) { 1449 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1450 return; 1451 } 1452 1453 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1454 1455 ath10k_err(ar, "firmware register dump:\n"); 1456 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1457 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1458 i, 1459 __le32_to_cpu(reg_dump_values[i]), 1460 __le32_to_cpu(reg_dump_values[i + 1]), 1461 __le32_to_cpu(reg_dump_values[i + 2]), 1462 __le32_to_cpu(reg_dump_values[i + 3])); 1463 1464 if (!crash_data) 1465 return; 1466 1467 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1468 crash_data->registers[i] = reg_dump_values[i]; 1469 } 1470 1471 static int ath10k_pci_dump_memory_section(struct ath10k *ar, 1472 const struct ath10k_mem_region *mem_region, 1473 u8 *buf, size_t buf_len) 1474 { 1475 const struct ath10k_mem_section *cur_section, *next_section; 1476 unsigned int count, section_size, skip_size; 1477 int ret, i, j; 1478 1479 if (!mem_region || !buf) 1480 return 0; 1481 1482 cur_section = &mem_region->section_table.sections[0]; 1483 1484 if (mem_region->start > cur_section->start) { 1485 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", 1486 mem_region->start, cur_section->start); 1487 return 0; 1488 } 1489 1490 skip_size = cur_section->start - mem_region->start; 1491 1492 /* fill the gap between the first register section and register 1493 * start address 1494 */ 1495 for (i = 0; i < skip_size; i++) { 1496 *buf = ATH10K_MAGIC_NOT_COPIED; 1497 buf++; 1498 } 1499 1500 count = 0; 1501 1502 for (i = 0; cur_section != NULL; i++) { 1503 section_size = cur_section->end - cur_section->start; 1504 1505 if (section_size <= 0) { 1506 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", 1507 cur_section->start, 1508 cur_section->end); 1509 break; 1510 } 1511 1512 if ((i + 1) == mem_region->section_table.size) { 1513 /* last section */ 1514 next_section = NULL; 1515 skip_size = 0; 1516 } else { 1517 next_section = cur_section + 1; 1518 1519 if (cur_section->end > next_section->start) { 1520 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", 1521 next_section->start, 1522 cur_section->end); 1523 break; 1524 } 1525 1526 skip_size = next_section->start - cur_section->end; 1527 } 1528 1529 if (buf_len < (skip_size + section_size)) { 1530 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); 1531 break; 1532 } 1533 1534 buf_len -= skip_size + section_size; 1535 1536 /* read section to dest memory */ 1537 ret = ath10k_pci_diag_read_mem(ar, cur_section->start, 1538 buf, section_size); 1539 if (ret) { 1540 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", 1541 cur_section->start, ret); 1542 break; 1543 } 1544 1545 buf += section_size; 1546 count += section_size; 1547 1548 /* fill in the gap between this section and the next */ 1549 for (j = 0; j < skip_size; j++) { 1550 *buf = ATH10K_MAGIC_NOT_COPIED; 1551 buf++; 1552 } 1553 1554 count += skip_size; 1555 1556 if (!next_section) 1557 /* this was the last section */ 1558 break; 1559 1560 cur_section = next_section; 1561 } 1562 1563 return count; 1564 } 1565 1566 static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) 1567 { 1568 u32 val; 1569 1570 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1571 FW_RAM_CONFIG_ADDRESS, config); 1572 1573 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1574 FW_RAM_CONFIG_ADDRESS); 1575 if (val != config) { 1576 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n", 1577 val, config); 1578 return -EIO; 1579 } 1580 1581 return 0; 1582 } 1583 1584 /* Always returns the length */ 1585 static int ath10k_pci_dump_memory_sram(struct ath10k *ar, 1586 const struct ath10k_mem_region *region, 1587 u8 *buf) 1588 { 1589 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1590 u32 base_addr, i; 1591 1592 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); 1593 base_addr += region->start; 1594 1595 for (i = 0; i < region->len; i += 4) { 1596 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); 1597 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); 1598 } 1599 1600 return region->len; 1601 } 1602 1603 /* if an error happened returns < 0, otherwise the length */ 1604 static int ath10k_pci_dump_memory_reg(struct ath10k *ar, 1605 const struct ath10k_mem_region *region, 1606 u8 *buf) 1607 { 1608 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1609 u32 i; 1610 int ret; 1611 1612 mutex_lock(&ar->conf_mutex); 1613 if (ar->state != ATH10K_STATE_ON) { 1614 ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n"); 1615 ret = -EIO; 1616 goto done; 1617 } 1618 1619 for (i = 0; i < region->len; i += 4) 1620 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); 1621 1622 ret = region->len; 1623 done: 1624 mutex_unlock(&ar->conf_mutex); 1625 return ret; 1626 } 1627 1628 /* if an error happened returns < 0, otherwise the length */ 1629 static int ath10k_pci_dump_memory_generic(struct ath10k *ar, 1630 const struct ath10k_mem_region *current_region, 1631 u8 *buf) 1632 { 1633 int ret; 1634 1635 if (current_region->section_table.size > 0) 1636 /* Copy each section individually. */ 1637 return ath10k_pci_dump_memory_section(ar, 1638 current_region, 1639 buf, 1640 current_region->len); 1641 1642 /* No individual memory sections defined so we can 1643 * copy the entire memory region. 1644 */ 1645 ret = ath10k_pci_diag_read_mem(ar, 1646 current_region->start, 1647 buf, 1648 current_region->len); 1649 if (ret) { 1650 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", 1651 current_region->name, ret); 1652 return ret; 1653 } 1654 1655 return current_region->len; 1656 } 1657 1658 static void ath10k_pci_dump_memory(struct ath10k *ar, 1659 struct ath10k_fw_crash_data *crash_data) 1660 { 1661 const struct ath10k_hw_mem_layout *mem_layout; 1662 const struct ath10k_mem_region *current_region; 1663 struct ath10k_dump_ram_data_hdr *hdr; 1664 u32 count, shift; 1665 size_t buf_len; 1666 int ret, i; 1667 u8 *buf; 1668 1669 lockdep_assert_held(&ar->dump_mutex); 1670 1671 if (!crash_data) 1672 return; 1673 1674 mem_layout = ath10k_coredump_get_mem_layout(ar); 1675 if (!mem_layout) 1676 return; 1677 1678 current_region = &mem_layout->region_table.regions[0]; 1679 1680 buf = crash_data->ramdump_buf; 1681 buf_len = crash_data->ramdump_buf_len; 1682 1683 memset(buf, 0, buf_len); 1684 1685 for (i = 0; i < mem_layout->region_table.size; i++) { 1686 count = 0; 1687 1688 if (current_region->len > buf_len) { 1689 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", 1690 current_region->name, 1691 current_region->len, 1692 buf_len); 1693 break; 1694 } 1695 1696 /* To get IRAM dump, the host driver needs to switch target 1697 * ram config from DRAM to IRAM. 1698 */ 1699 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || 1700 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { 1701 shift = current_region->start >> 20; 1702 1703 ret = ath10k_pci_set_ram_config(ar, shift); 1704 if (ret) { 1705 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n", 1706 current_region->name, ret); 1707 break; 1708 } 1709 } 1710 1711 /* Reserve space for the header. */ 1712 hdr = (void *)buf; 1713 buf += sizeof(*hdr); 1714 buf_len -= sizeof(*hdr); 1715 1716 switch (current_region->type) { 1717 case ATH10K_MEM_REGION_TYPE_IOSRAM: 1718 count = ath10k_pci_dump_memory_sram(ar, current_region, buf); 1719 break; 1720 case ATH10K_MEM_REGION_TYPE_IOREG: 1721 ret = ath10k_pci_dump_memory_reg(ar, current_region, buf); 1722 if (ret < 0) 1723 break; 1724 1725 count = ret; 1726 break; 1727 default: 1728 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); 1729 if (ret < 0) 1730 break; 1731 1732 count = ret; 1733 break; 1734 } 1735 1736 hdr->region_type = cpu_to_le32(current_region->type); 1737 hdr->start = cpu_to_le32(current_region->start); 1738 hdr->length = cpu_to_le32(count); 1739 1740 if (count == 0) 1741 /* Note: the header remains, just with zero length. */ 1742 break; 1743 1744 buf += count; 1745 buf_len -= count; 1746 1747 current_region++; 1748 } 1749 } 1750 1751 static void ath10k_pci_fw_dump_work(struct work_struct *work) 1752 { 1753 struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, 1754 dump_work); 1755 struct ath10k_fw_crash_data *crash_data; 1756 struct ath10k *ar = ar_pci->ar; 1757 char guid[UUID_STRING_LEN + 1]; 1758 1759 mutex_lock(&ar->dump_mutex); 1760 1761 spin_lock_bh(&ar->data_lock); 1762 ar->stats.fw_crash_counter++; 1763 spin_unlock_bh(&ar->data_lock); 1764 1765 crash_data = ath10k_coredump_new(ar); 1766 1767 if (crash_data) 1768 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); 1769 else 1770 scnprintf(guid, sizeof(guid), "n/a"); 1771 1772 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); 1773 ath10k_print_driver_info(ar); 1774 ath10k_pci_dump_registers(ar, crash_data); 1775 ath10k_ce_dump_registers(ar, crash_data); 1776 ath10k_pci_dump_memory(ar, crash_data); 1777 1778 mutex_unlock(&ar->dump_mutex); 1779 1780 ath10k_core_start_recovery(ar); 1781 } 1782 1783 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1784 { 1785 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1786 1787 queue_work(ar->workqueue, &ar_pci->dump_work); 1788 } 1789 1790 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1791 int force) 1792 { 1793 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1794 1795 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1796 1797 if (!force) { 1798 int resources; 1799 /* 1800 * Decide whether to actually poll for completions, or just 1801 * wait for a later chance. 1802 * If there seem to be plenty of resources left, then just wait 1803 * since checking involves reading a CE register, which is a 1804 * relatively expensive operation. 1805 */ 1806 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1807 1808 /* 1809 * If at least 50% of the total resources are still available, 1810 * don't bother checking again yet. 1811 */ 1812 if (resources > (ar_pci->attr[pipe].src_nentries >> 1)) 1813 return; 1814 } 1815 ath10k_ce_per_engine_service(ar, pipe); 1816 } 1817 1818 static void ath10k_pci_rx_retry_sync(struct ath10k *ar) 1819 { 1820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1821 1822 timer_delete_sync(&ar_pci->rx_post_retry); 1823 } 1824 1825 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1826 u8 *ul_pipe, u8 *dl_pipe) 1827 { 1828 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1829 const struct ce_service_to_pipe *entry; 1830 bool ul_set = false, dl_set = false; 1831 int i; 1832 1833 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1834 1835 for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) { 1836 entry = &ar_pci->serv_to_pipe[i]; 1837 1838 if (__le32_to_cpu(entry->service_id) != service_id) 1839 continue; 1840 1841 switch (__le32_to_cpu(entry->pipedir)) { 1842 case PIPEDIR_NONE: 1843 break; 1844 case PIPEDIR_IN: 1845 WARN_ON(dl_set); 1846 *dl_pipe = __le32_to_cpu(entry->pipenum); 1847 dl_set = true; 1848 break; 1849 case PIPEDIR_OUT: 1850 WARN_ON(ul_set); 1851 *ul_pipe = __le32_to_cpu(entry->pipenum); 1852 ul_set = true; 1853 break; 1854 case PIPEDIR_INOUT: 1855 WARN_ON(dl_set); 1856 WARN_ON(ul_set); 1857 *dl_pipe = __le32_to_cpu(entry->pipenum); 1858 *ul_pipe = __le32_to_cpu(entry->pipenum); 1859 dl_set = true; 1860 ul_set = true; 1861 break; 1862 } 1863 } 1864 1865 if (!ul_set || !dl_set) 1866 return -ENOENT; 1867 1868 return 0; 1869 } 1870 1871 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1872 u8 *ul_pipe, u8 *dl_pipe) 1873 { 1874 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1875 1876 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1877 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1878 ul_pipe, dl_pipe); 1879 } 1880 1881 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1882 { 1883 u32 val; 1884 1885 switch (ar->hw_rev) { 1886 case ATH10K_HW_QCA988X: 1887 case ATH10K_HW_QCA9887: 1888 case ATH10K_HW_QCA6174: 1889 case ATH10K_HW_QCA9377: 1890 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1891 CORE_CTRL_ADDRESS); 1892 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1893 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1894 CORE_CTRL_ADDRESS, val); 1895 break; 1896 case ATH10K_HW_QCA99X0: 1897 case ATH10K_HW_QCA9984: 1898 case ATH10K_HW_QCA9888: 1899 case ATH10K_HW_QCA4019: 1900 /* TODO: Find appropriate register configuration for QCA99X0 1901 * to mask irq/MSI. 1902 */ 1903 break; 1904 case ATH10K_HW_WCN3990: 1905 break; 1906 } 1907 } 1908 1909 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1910 { 1911 u32 val; 1912 1913 switch (ar->hw_rev) { 1914 case ATH10K_HW_QCA988X: 1915 case ATH10K_HW_QCA9887: 1916 case ATH10K_HW_QCA6174: 1917 case ATH10K_HW_QCA9377: 1918 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1919 CORE_CTRL_ADDRESS); 1920 val |= CORE_CTRL_PCIE_REG_31_MASK; 1921 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1922 CORE_CTRL_ADDRESS, val); 1923 break; 1924 case ATH10K_HW_QCA99X0: 1925 case ATH10K_HW_QCA9984: 1926 case ATH10K_HW_QCA9888: 1927 case ATH10K_HW_QCA4019: 1928 /* TODO: Find appropriate register configuration for QCA99X0 1929 * to unmask irq/MSI. 1930 */ 1931 break; 1932 case ATH10K_HW_WCN3990: 1933 break; 1934 } 1935 } 1936 1937 static void ath10k_pci_irq_disable(struct ath10k *ar) 1938 { 1939 ath10k_ce_disable_interrupts(ar); 1940 ath10k_pci_disable_and_clear_intx_irq(ar); 1941 ath10k_pci_irq_msi_fw_mask(ar); 1942 } 1943 1944 static void ath10k_pci_irq_sync(struct ath10k *ar) 1945 { 1946 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1947 1948 synchronize_irq(ar_pci->pdev->irq); 1949 } 1950 1951 static void ath10k_pci_irq_enable(struct ath10k *ar) 1952 { 1953 ath10k_ce_enable_interrupts(ar); 1954 ath10k_pci_enable_intx_irq(ar); 1955 ath10k_pci_irq_msi_fw_unmask(ar); 1956 } 1957 1958 static int ath10k_pci_hif_start(struct ath10k *ar) 1959 { 1960 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1961 1962 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1963 1964 ath10k_core_napi_enable(ar); 1965 1966 ath10k_pci_irq_enable(ar); 1967 ath10k_pci_rx_post(ar); 1968 1969 pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1970 PCI_EXP_LNKCTL_ASPMC, 1971 ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); 1972 1973 return 0; 1974 } 1975 1976 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1977 { 1978 struct ath10k *ar; 1979 struct ath10k_ce_pipe *ce_pipe; 1980 struct ath10k_ce_ring *ce_ring; 1981 struct sk_buff *skb; 1982 int i; 1983 1984 ar = pci_pipe->hif_ce_state; 1985 ce_pipe = pci_pipe->ce_hdl; 1986 ce_ring = ce_pipe->dest_ring; 1987 1988 if (!ce_ring) 1989 return; 1990 1991 if (!pci_pipe->buf_sz) 1992 return; 1993 1994 for (i = 0; i < ce_ring->nentries; i++) { 1995 skb = ce_ring->per_transfer_context[i]; 1996 if (!skb) 1997 continue; 1998 1999 ce_ring->per_transfer_context[i] = NULL; 2000 2001 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 2002 skb->len + skb_tailroom(skb), 2003 DMA_FROM_DEVICE); 2004 dev_kfree_skb_any(skb); 2005 } 2006 } 2007 2008 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 2009 { 2010 struct ath10k *ar; 2011 struct ath10k_ce_pipe *ce_pipe; 2012 struct ath10k_ce_ring *ce_ring; 2013 struct sk_buff *skb; 2014 int i; 2015 2016 ar = pci_pipe->hif_ce_state; 2017 ce_pipe = pci_pipe->ce_hdl; 2018 ce_ring = ce_pipe->src_ring; 2019 2020 if (!ce_ring) 2021 return; 2022 2023 if (!pci_pipe->buf_sz) 2024 return; 2025 2026 for (i = 0; i < ce_ring->nentries; i++) { 2027 skb = ce_ring->per_transfer_context[i]; 2028 if (!skb) 2029 continue; 2030 2031 ce_ring->per_transfer_context[i] = NULL; 2032 2033 ath10k_htc_tx_completion_handler(ar, skb); 2034 } 2035 } 2036 2037 /* 2038 * Cleanup residual buffers for device shutdown: 2039 * buffers that were enqueued for receive 2040 * buffers that were to be sent 2041 * Note: Buffers that had completed but which were 2042 * not yet processed are on a completion queue. They 2043 * are handled when the completion thread shuts down. 2044 */ 2045 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 2046 { 2047 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2048 int pipe_num; 2049 2050 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 2051 struct ath10k_pci_pipe *pipe_info; 2052 2053 pipe_info = &ar_pci->pipe_info[pipe_num]; 2054 ath10k_pci_rx_pipe_cleanup(pipe_info); 2055 ath10k_pci_tx_pipe_cleanup(pipe_info); 2056 } 2057 } 2058 2059 void ath10k_pci_ce_deinit(struct ath10k *ar) 2060 { 2061 int i; 2062 2063 for (i = 0; i < CE_COUNT; i++) 2064 ath10k_ce_deinit_pipe(ar, i); 2065 } 2066 2067 void ath10k_pci_flush(struct ath10k *ar) 2068 { 2069 ath10k_pci_rx_retry_sync(ar); 2070 ath10k_pci_buffer_cleanup(ar); 2071 } 2072 2073 static void ath10k_pci_hif_stop(struct ath10k *ar) 2074 { 2075 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2076 unsigned long flags; 2077 2078 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 2079 2080 ath10k_pci_irq_disable(ar); 2081 ath10k_pci_irq_sync(ar); 2082 2083 ath10k_core_napi_sync_disable(ar); 2084 2085 cancel_work_sync(&ar_pci->dump_work); 2086 2087 /* Most likely the device has HTT Rx ring configured. The only way to 2088 * prevent the device from accessing (and possible corrupting) host 2089 * memory is to reset the chip now. 2090 * 2091 * There's also no known way of masking MSI interrupts on the device. 2092 * For ranged MSI the CE-related interrupts can be masked. However 2093 * regardless how many MSI interrupts are assigned the first one 2094 * is always used for firmware indications (crashes) and cannot be 2095 * masked. To prevent the device from asserting the interrupt reset it 2096 * before proceeding with cleanup. 2097 */ 2098 ath10k_pci_safe_chip_reset(ar); 2099 2100 ath10k_pci_flush(ar); 2101 2102 spin_lock_irqsave(&ar_pci->ps_lock, flags); 2103 WARN_ON(ar_pci->ps_wake_refcount > 0); 2104 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 2105 } 2106 2107 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 2108 void *req, u32 req_len, 2109 void *resp, u32 *resp_len) 2110 { 2111 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2112 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 2113 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 2114 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 2115 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 2116 dma_addr_t req_paddr = 0; 2117 dma_addr_t resp_paddr = 0; 2118 struct bmi_xfer xfer = {}; 2119 void *treq, *tresp = NULL; 2120 int ret = 0; 2121 2122 might_sleep(); 2123 2124 if (resp && !resp_len) 2125 return -EINVAL; 2126 2127 if (resp && resp_len && *resp_len == 0) 2128 return -EINVAL; 2129 2130 treq = kmemdup(req, req_len, GFP_KERNEL); 2131 if (!treq) 2132 return -ENOMEM; 2133 2134 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 2135 ret = dma_mapping_error(ar->dev, req_paddr); 2136 if (ret) { 2137 ret = -EIO; 2138 goto err_dma; 2139 } 2140 2141 if (resp && resp_len) { 2142 tresp = kzalloc(*resp_len, GFP_KERNEL); 2143 if (!tresp) { 2144 ret = -ENOMEM; 2145 goto err_req; 2146 } 2147 2148 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 2149 DMA_FROM_DEVICE); 2150 ret = dma_mapping_error(ar->dev, resp_paddr); 2151 if (ret) { 2152 ret = -EIO; 2153 goto err_req; 2154 } 2155 2156 xfer.wait_for_resp = true; 2157 xfer.resp_len = 0; 2158 2159 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 2160 } 2161 2162 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 2163 if (ret) 2164 goto err_resp; 2165 2166 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer); 2167 if (ret) { 2168 dma_addr_t unused_buffer; 2169 unsigned int unused_nbytes; 2170 unsigned int unused_id; 2171 2172 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 2173 &unused_nbytes, &unused_id); 2174 } else { 2175 /* non-zero means we did not time out */ 2176 ret = 0; 2177 } 2178 2179 err_resp: 2180 if (resp) { 2181 dma_addr_t unused_buffer; 2182 2183 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 2184 dma_unmap_single(ar->dev, resp_paddr, 2185 *resp_len, DMA_FROM_DEVICE); 2186 } 2187 err_req: 2188 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 2189 2190 if (ret == 0 && resp_len) { 2191 *resp_len = min(*resp_len, xfer.resp_len); 2192 memcpy(resp, tresp, *resp_len); 2193 } 2194 err_dma: 2195 kfree(treq); 2196 kfree(tresp); 2197 2198 return ret; 2199 } 2200 2201 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 2202 { 2203 struct bmi_xfer *xfer; 2204 2205 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) 2206 return; 2207 2208 xfer->tx_done = true; 2209 } 2210 2211 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 2212 { 2213 struct ath10k *ar = ce_state->ar; 2214 struct bmi_xfer *xfer; 2215 unsigned int nbytes; 2216 2217 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, 2218 &nbytes)) 2219 return; 2220 2221 if (WARN_ON_ONCE(!xfer)) 2222 return; 2223 2224 if (!xfer->wait_for_resp) { 2225 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 2226 return; 2227 } 2228 2229 xfer->resp_len = nbytes; 2230 xfer->rx_done = true; 2231 } 2232 2233 static int ath10k_pci_bmi_wait(struct ath10k *ar, 2234 struct ath10k_ce_pipe *tx_pipe, 2235 struct ath10k_ce_pipe *rx_pipe, 2236 struct bmi_xfer *xfer) 2237 { 2238 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 2239 unsigned long started = jiffies; 2240 unsigned long dur; 2241 int ret; 2242 2243 while (time_before_eq(jiffies, timeout)) { 2244 ath10k_pci_bmi_send_done(tx_pipe); 2245 ath10k_pci_bmi_recv_data(rx_pipe); 2246 2247 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { 2248 ret = 0; 2249 goto out; 2250 } 2251 2252 schedule(); 2253 } 2254 2255 ret = -ETIMEDOUT; 2256 2257 out: 2258 dur = jiffies - started; 2259 if (dur > HZ) 2260 ath10k_dbg(ar, ATH10K_DBG_BMI, 2261 "bmi cmd took %lu jiffies hz %d ret %d\n", 2262 dur, HZ, ret); 2263 return ret; 2264 } 2265 2266 /* 2267 * Send an interrupt to the device to wake up the Target CPU 2268 * so it has an opportunity to notice any changed state. 2269 */ 2270 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 2271 { 2272 u32 addr, val; 2273 2274 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; 2275 val = ath10k_pci_read32(ar, addr); 2276 val |= CORE_CTRL_CPU_INTR_MASK; 2277 ath10k_pci_write32(ar, addr, val); 2278 2279 return 0; 2280 } 2281 2282 static int ath10k_pci_get_num_banks(struct ath10k *ar) 2283 { 2284 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2285 2286 switch (ar_pci->pdev->device) { 2287 case QCA988X_2_0_DEVICE_ID_UBNT: 2288 case QCA988X_2_0_DEVICE_ID: 2289 case QCA99X0_2_0_DEVICE_ID: 2290 case QCA9888_2_0_DEVICE_ID: 2291 case QCA9984_1_0_DEVICE_ID: 2292 case QCA9887_1_0_DEVICE_ID: 2293 return 1; 2294 case QCA6164_2_1_DEVICE_ID: 2295 case QCA6174_2_1_DEVICE_ID: 2296 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { 2297 case QCA6174_HW_1_0_CHIP_ID_REV: 2298 case QCA6174_HW_1_1_CHIP_ID_REV: 2299 case QCA6174_HW_2_1_CHIP_ID_REV: 2300 case QCA6174_HW_2_2_CHIP_ID_REV: 2301 return 3; 2302 case QCA6174_HW_1_3_CHIP_ID_REV: 2303 return 2; 2304 case QCA6174_HW_3_0_CHIP_ID_REV: 2305 case QCA6174_HW_3_1_CHIP_ID_REV: 2306 case QCA6174_HW_3_2_CHIP_ID_REV: 2307 return 9; 2308 } 2309 break; 2310 case QCA9377_1_0_DEVICE_ID: 2311 return 9; 2312 } 2313 2314 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 2315 return 1; 2316 } 2317 2318 static int ath10k_bus_get_num_banks(struct ath10k *ar) 2319 { 2320 struct ath10k_ce *ce = ath10k_ce_priv(ar); 2321 2322 return ce->bus_ops->get_num_banks(ar); 2323 } 2324 2325 int ath10k_pci_init_config(struct ath10k *ar) 2326 { 2327 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2328 u32 interconnect_targ_addr; 2329 u32 pcie_state_targ_addr = 0; 2330 u32 pipe_cfg_targ_addr = 0; 2331 u32 svc_to_pipe_map = 0; 2332 u32 pcie_config_flags = 0; 2333 u32 ealloc_value; 2334 u32 ealloc_targ_addr; 2335 u32 flag2_value; 2336 u32 flag2_targ_addr; 2337 int ret = 0; 2338 2339 /* Download to Target the CE Config and the service-to-CE map */ 2340 interconnect_targ_addr = 2341 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 2342 2343 /* Supply Target-side CE configuration */ 2344 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 2345 &pcie_state_targ_addr); 2346 if (ret != 0) { 2347 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 2348 return ret; 2349 } 2350 2351 if (pcie_state_targ_addr == 0) { 2352 ret = -EIO; 2353 ath10k_err(ar, "Invalid pcie state addr\n"); 2354 return ret; 2355 } 2356 2357 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2358 offsetof(struct pcie_state, 2359 pipe_cfg_addr)), 2360 &pipe_cfg_targ_addr); 2361 if (ret != 0) { 2362 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 2363 return ret; 2364 } 2365 2366 if (pipe_cfg_targ_addr == 0) { 2367 ret = -EIO; 2368 ath10k_err(ar, "Invalid pipe cfg addr\n"); 2369 return ret; 2370 } 2371 2372 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 2373 ar_pci->pipe_config, 2374 sizeof(struct ce_pipe_config) * 2375 NUM_TARGET_CE_CONFIG_WLAN); 2376 2377 if (ret != 0) { 2378 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 2379 return ret; 2380 } 2381 2382 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2383 offsetof(struct pcie_state, 2384 svc_to_pipe_map)), 2385 &svc_to_pipe_map); 2386 if (ret != 0) { 2387 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 2388 return ret; 2389 } 2390 2391 if (svc_to_pipe_map == 0) { 2392 ret = -EIO; 2393 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 2394 return ret; 2395 } 2396 2397 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 2398 ar_pci->serv_to_pipe, 2399 sizeof(pci_target_service_to_ce_map_wlan)); 2400 if (ret != 0) { 2401 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 2402 return ret; 2403 } 2404 2405 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2406 offsetof(struct pcie_state, 2407 config_flags)), 2408 &pcie_config_flags); 2409 if (ret != 0) { 2410 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 2411 return ret; 2412 } 2413 2414 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 2415 2416 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 2417 offsetof(struct pcie_state, 2418 config_flags)), 2419 pcie_config_flags); 2420 if (ret != 0) { 2421 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 2422 return ret; 2423 } 2424 2425 /* configure early allocation */ 2426 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 2427 2428 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 2429 if (ret != 0) { 2430 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); 2431 return ret; 2432 } 2433 2434 /* first bank is switched to IRAM */ 2435 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2436 HI_EARLY_ALLOC_MAGIC_MASK); 2437 ealloc_value |= ((ath10k_bus_get_num_banks(ar) << 2438 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2439 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2440 2441 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 2442 if (ret != 0) { 2443 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 2444 return ret; 2445 } 2446 2447 /* Tell Target to proceed with initialization */ 2448 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 2449 2450 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 2451 if (ret != 0) { 2452 ath10k_err(ar, "Failed to get option val: %d\n", ret); 2453 return ret; 2454 } 2455 2456 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 2457 2458 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 2459 if (ret != 0) { 2460 ath10k_err(ar, "Failed to set option val: %d\n", ret); 2461 return ret; 2462 } 2463 2464 return 0; 2465 } 2466 2467 static void ath10k_pci_override_ce_config(struct ath10k *ar) 2468 { 2469 struct ce_attr *attr; 2470 struct ce_pipe_config *config; 2471 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2472 2473 /* For QCA6174 we're overriding the Copy Engine 5 configuration, 2474 * since it is currently used for other feature. 2475 */ 2476 2477 /* Override Host's Copy Engine 5 configuration */ 2478 attr = &ar_pci->attr[5]; 2479 attr->src_sz_max = 0; 2480 attr->dest_nentries = 0; 2481 2482 /* Override Target firmware's Copy Engine configuration */ 2483 config = &ar_pci->pipe_config[5]; 2484 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); 2485 config->nbytes_max = __cpu_to_le32(2048); 2486 2487 /* Map from service/endpoint to Copy Engine */ 2488 ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1); 2489 } 2490 2491 int ath10k_pci_alloc_pipes(struct ath10k *ar) 2492 { 2493 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2494 struct ath10k_pci_pipe *pipe; 2495 struct ath10k_ce *ce = ath10k_ce_priv(ar); 2496 int i, ret; 2497 2498 for (i = 0; i < CE_COUNT; i++) { 2499 pipe = &ar_pci->pipe_info[i]; 2500 pipe->ce_hdl = &ce->ce_states[i]; 2501 pipe->pipe_num = i; 2502 pipe->hif_ce_state = ar; 2503 2504 ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]); 2505 if (ret) { 2506 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2507 i, ret); 2508 return ret; 2509 } 2510 2511 /* Last CE is Diagnostic Window */ 2512 if (i == CE_DIAG_PIPE) { 2513 ar_pci->ce_diag = pipe->ce_hdl; 2514 continue; 2515 } 2516 2517 pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max); 2518 } 2519 2520 return 0; 2521 } 2522 2523 void ath10k_pci_free_pipes(struct ath10k *ar) 2524 { 2525 int i; 2526 2527 for (i = 0; i < CE_COUNT; i++) 2528 ath10k_ce_free_pipe(ar, i); 2529 } 2530 2531 int ath10k_pci_init_pipes(struct ath10k *ar) 2532 { 2533 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2534 int i, ret; 2535 2536 for (i = 0; i < CE_COUNT; i++) { 2537 ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]); 2538 if (ret) { 2539 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2540 i, ret); 2541 return ret; 2542 } 2543 } 2544 2545 return 0; 2546 } 2547 2548 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2549 { 2550 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2551 FW_IND_EVENT_PENDING; 2552 } 2553 2554 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2555 { 2556 u32 val; 2557 2558 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2559 val &= ~FW_IND_EVENT_PENDING; 2560 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2561 } 2562 2563 static bool ath10k_pci_has_device_gone(struct ath10k *ar) 2564 { 2565 u32 val; 2566 2567 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2568 return (val == 0xffffffff); 2569 } 2570 2571 /* this function effectively clears target memory controller assert line */ 2572 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2573 { 2574 u32 val; 2575 2576 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2577 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2578 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2579 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2580 2581 msleep(10); 2582 2583 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2584 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2585 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2586 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2587 2588 msleep(10); 2589 } 2590 2591 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2592 { 2593 u32 val; 2594 2595 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2596 2597 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2598 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2599 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2600 } 2601 2602 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2603 { 2604 u32 val; 2605 2606 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2607 2608 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2609 val | SOC_RESET_CONTROL_CE_RST_MASK); 2610 msleep(10); 2611 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2612 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2613 } 2614 2615 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2616 { 2617 u32 val; 2618 2619 val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS); 2620 ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS, 2621 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2622 } 2623 2624 static int ath10k_pci_warm_reset(struct ath10k *ar) 2625 { 2626 int ret; 2627 2628 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2629 2630 spin_lock_bh(&ar->data_lock); 2631 ar->stats.fw_warm_reset_counter++; 2632 spin_unlock_bh(&ar->data_lock); 2633 2634 ath10k_pci_irq_disable(ar); 2635 2636 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2637 * were to access copy engine while host performs copy engine reset 2638 * then it is possible for the device to confuse pci-e controller to 2639 * the point of bringing host system to a complete stop (i.e. hang). 2640 */ 2641 ath10k_pci_warm_reset_si0(ar); 2642 ath10k_pci_warm_reset_cpu(ar); 2643 ath10k_pci_init_pipes(ar); 2644 ath10k_pci_wait_for_target_init(ar); 2645 2646 ath10k_pci_warm_reset_clear_lf(ar); 2647 ath10k_pci_warm_reset_ce(ar); 2648 ath10k_pci_warm_reset_cpu(ar); 2649 ath10k_pci_init_pipes(ar); 2650 2651 ret = ath10k_pci_wait_for_target_init(ar); 2652 if (ret) { 2653 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2654 return ret; 2655 } 2656 2657 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2658 2659 return 0; 2660 } 2661 2662 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) 2663 { 2664 ath10k_pci_irq_disable(ar); 2665 return ath10k_pci_qca99x0_chip_reset(ar); 2666 } 2667 2668 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2669 { 2670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2671 2672 if (!ar_pci->pci_soft_reset) 2673 return -EOPNOTSUPP; 2674 2675 return ar_pci->pci_soft_reset(ar); 2676 } 2677 2678 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2679 { 2680 int i, ret; 2681 u32 val; 2682 2683 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2684 2685 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2686 * It is thus preferred to use warm reset which is safer but may not be 2687 * able to recover the device from all possible fail scenarios. 2688 * 2689 * Warm reset doesn't always work on first try so attempt it a few 2690 * times before giving up. 2691 */ 2692 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2693 ret = ath10k_pci_warm_reset(ar); 2694 if (ret) { 2695 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2696 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2697 ret); 2698 continue; 2699 } 2700 2701 /* FIXME: Sometimes copy engine doesn't recover after warm 2702 * reset. In most cases this needs cold reset. In some of these 2703 * cases the device is in such a state that a cold reset may 2704 * lock up the host. 2705 * 2706 * Reading any host interest register via copy engine is 2707 * sufficient to verify if device is capable of booting 2708 * firmware blob. 2709 */ 2710 ret = ath10k_pci_init_pipes(ar); 2711 if (ret) { 2712 ath10k_warn(ar, "failed to init copy engine: %d\n", 2713 ret); 2714 continue; 2715 } 2716 2717 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2718 &val); 2719 if (ret) { 2720 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2721 ret); 2722 continue; 2723 } 2724 2725 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2726 return 0; 2727 } 2728 2729 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2730 ath10k_warn(ar, "refusing cold reset as requested\n"); 2731 return -EPERM; 2732 } 2733 2734 ret = ath10k_pci_cold_reset(ar); 2735 if (ret) { 2736 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2737 return ret; 2738 } 2739 2740 ret = ath10k_pci_wait_for_target_init(ar); 2741 if (ret) { 2742 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2743 ret); 2744 return ret; 2745 } 2746 2747 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2748 2749 return 0; 2750 } 2751 2752 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2753 { 2754 int ret; 2755 2756 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2757 2758 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2759 2760 ret = ath10k_pci_cold_reset(ar); 2761 if (ret) { 2762 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2763 return ret; 2764 } 2765 2766 ret = ath10k_pci_wait_for_target_init(ar); 2767 if (ret) { 2768 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2769 ret); 2770 return ret; 2771 } 2772 2773 ret = ath10k_pci_warm_reset(ar); 2774 if (ret) { 2775 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2776 return ret; 2777 } 2778 2779 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2780 2781 return 0; 2782 } 2783 2784 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2785 { 2786 int ret; 2787 2788 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2789 2790 ret = ath10k_pci_cold_reset(ar); 2791 if (ret) { 2792 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2793 return ret; 2794 } 2795 2796 ret = ath10k_pci_wait_for_target_init(ar); 2797 if (ret) { 2798 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2799 ret); 2800 return ret; 2801 } 2802 2803 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2804 2805 return 0; 2806 } 2807 2808 static int ath10k_pci_chip_reset(struct ath10k *ar) 2809 { 2810 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2811 2812 if (WARN_ON(!ar_pci->pci_hard_reset)) 2813 return -EOPNOTSUPP; 2814 2815 return ar_pci->pci_hard_reset(ar); 2816 } 2817 2818 static int ath10k_pci_hif_power_up(struct ath10k *ar, 2819 enum ath10k_firmware_mode fw_mode) 2820 { 2821 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2822 int ret; 2823 2824 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2825 2826 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2827 &ar_pci->link_ctl); 2828 pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2829 PCI_EXP_LNKCTL_ASPMC); 2830 2831 /* 2832 * Bring the target up cleanly. 2833 * 2834 * The target may be in an undefined state with an AUX-powered Target 2835 * and a Host in WoW mode. If the Host crashes, loses power, or is 2836 * restarted (without unloading the driver) then the Target is left 2837 * (aux) powered and running. On a subsequent driver load, the Target 2838 * is in an unexpected state. We try to catch that here in order to 2839 * reset the Target and retry the probe. 2840 */ 2841 ret = ath10k_pci_chip_reset(ar); 2842 if (ret) { 2843 if (ath10k_pci_has_fw_crashed(ar)) { 2844 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2845 ath10k_pci_fw_crashed_clear(ar); 2846 ath10k_pci_fw_crashed_dump(ar); 2847 } 2848 2849 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2850 goto err_sleep; 2851 } 2852 2853 ret = ath10k_pci_init_pipes(ar); 2854 if (ret) { 2855 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2856 goto err_sleep; 2857 } 2858 2859 ret = ath10k_pci_init_config(ar); 2860 if (ret) { 2861 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2862 goto err_ce; 2863 } 2864 2865 ret = ath10k_pci_wake_target_cpu(ar); 2866 if (ret) { 2867 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2868 goto err_ce; 2869 } 2870 2871 return 0; 2872 2873 err_ce: 2874 ath10k_pci_ce_deinit(ar); 2875 2876 err_sleep: 2877 return ret; 2878 } 2879 2880 void ath10k_pci_hif_power_down(struct ath10k *ar) 2881 { 2882 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2883 2884 /* Currently hif_power_up performs effectively a reset and hif_stop 2885 * resets the chip as well so there's no point in resetting here. 2886 */ 2887 } 2888 2889 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2890 { 2891 /* Nothing to do; the important stuff is in the driver suspend. */ 2892 return 0; 2893 } 2894 2895 static int ath10k_pci_suspend(struct ath10k *ar) 2896 { 2897 /* The grace timer can still be counting down and ar->ps_awake be true. 2898 * It is known that the device may be asleep after resuming regardless 2899 * of the SoC powersave state before suspending. Hence make sure the 2900 * device is asleep before proceeding. 2901 */ 2902 ath10k_pci_sleep_sync(ar); 2903 2904 return 0; 2905 } 2906 2907 static int ath10k_pci_hif_resume(struct ath10k *ar) 2908 { 2909 /* Nothing to do; the important stuff is in the driver resume. */ 2910 return 0; 2911 } 2912 2913 static int ath10k_pci_resume(struct ath10k *ar) 2914 { 2915 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2916 struct pci_dev *pdev = ar_pci->pdev; 2917 u32 val; 2918 int ret = 0; 2919 2920 ret = ath10k_pci_force_wake(ar); 2921 if (ret) { 2922 ath10k_err(ar, "failed to wake up target: %d\n", ret); 2923 return ret; 2924 } 2925 2926 /* Suspend/Resume resets the PCI configuration space, so we have to 2927 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2928 * from interfering with C3 CPU state. pci_restore_state won't help 2929 * here since it only restores the first 64 bytes pci config header. 2930 */ 2931 pci_read_config_dword(pdev, 0x40, &val); 2932 if ((val & 0x0000ff00) != 0) 2933 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2934 2935 return ret; 2936 } 2937 2938 static bool ath10k_pci_validate_cal(void *data, size_t size) 2939 { 2940 __le16 *cal_words = data; 2941 u16 checksum = 0; 2942 size_t i; 2943 2944 if (size % 2 != 0) 2945 return false; 2946 2947 for (i = 0; i < size / 2; i++) 2948 checksum ^= le16_to_cpu(cal_words[i]); 2949 2950 return checksum == 0xffff; 2951 } 2952 2953 static void ath10k_pci_enable_eeprom(struct ath10k *ar) 2954 { 2955 /* Enable SI clock */ 2956 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); 2957 2958 /* Configure GPIOs for I2C operation */ 2959 ath10k_pci_write32(ar, 2960 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2961 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, 2962 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, 2963 GPIO_PIN0_CONFIG) | 2964 SM(1, GPIO_PIN0_PAD_PULL)); 2965 2966 ath10k_pci_write32(ar, 2967 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2968 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, 2969 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | 2970 SM(1, GPIO_PIN0_PAD_PULL)); 2971 2972 ath10k_pci_write32(ar, 2973 GPIO_BASE_ADDRESS + 2974 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, 2975 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); 2976 2977 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ 2978 ath10k_pci_write32(ar, 2979 SI_BASE_ADDRESS + SI_CONFIG_OFFSET, 2980 SM(1, SI_CONFIG_ERR_INT) | 2981 SM(1, SI_CONFIG_BIDIR_OD_DATA) | 2982 SM(1, SI_CONFIG_I2C) | 2983 SM(1, SI_CONFIG_POS_SAMPLE) | 2984 SM(1, SI_CONFIG_INACTIVE_DATA) | 2985 SM(1, SI_CONFIG_INACTIVE_CLK) | 2986 SM(8, SI_CONFIG_DIVIDER)); 2987 } 2988 2989 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) 2990 { 2991 u32 reg; 2992 int wait_limit; 2993 2994 /* set device select byte and for the read operation */ 2995 reg = QCA9887_EEPROM_SELECT_READ | 2996 SM(addr, QCA9887_EEPROM_ADDR_LO) | 2997 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); 2998 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); 2999 3000 /* write transmit data, transfer length, and START bit */ 3001 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, 3002 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | 3003 SM(4, SI_CS_TX_CNT)); 3004 3005 /* wait max 1 sec */ 3006 wait_limit = 100000; 3007 3008 /* wait for SI_CS_DONE_INT */ 3009 do { 3010 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); 3011 if (MS(reg, SI_CS_DONE_INT)) 3012 break; 3013 3014 wait_limit--; 3015 udelay(10); 3016 } while (wait_limit > 0); 3017 3018 if (!MS(reg, SI_CS_DONE_INT)) { 3019 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", 3020 addr); 3021 return -ETIMEDOUT; 3022 } 3023 3024 /* clear SI_CS_DONE_INT */ 3025 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); 3026 3027 if (MS(reg, SI_CS_DONE_ERR)) { 3028 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); 3029 return -EIO; 3030 } 3031 3032 /* extract receive data */ 3033 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); 3034 *out = reg; 3035 3036 return 0; 3037 } 3038 3039 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, 3040 size_t *data_len) 3041 { 3042 u8 *caldata = NULL; 3043 size_t calsize, i; 3044 int ret; 3045 3046 if (!QCA_REV_9887(ar)) 3047 return -EOPNOTSUPP; 3048 3049 calsize = ar->hw_params.cal_data_len; 3050 caldata = kmalloc(calsize, GFP_KERNEL); 3051 if (!caldata) 3052 return -ENOMEM; 3053 3054 ath10k_pci_enable_eeprom(ar); 3055 3056 for (i = 0; i < calsize; i++) { 3057 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); 3058 if (ret) 3059 goto err_free; 3060 } 3061 3062 if (!ath10k_pci_validate_cal(caldata, calsize)) 3063 goto err_free; 3064 3065 *data = caldata; 3066 *data_len = calsize; 3067 3068 return 0; 3069 3070 err_free: 3071 kfree(caldata); 3072 3073 return -EINVAL; 3074 } 3075 3076 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 3077 .tx_sg = ath10k_pci_hif_tx_sg, 3078 .diag_read = ath10k_pci_hif_diag_read, 3079 .diag_write = ath10k_pci_diag_write_mem, 3080 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 3081 .start = ath10k_pci_hif_start, 3082 .stop = ath10k_pci_hif_stop, 3083 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 3084 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 3085 .send_complete_check = ath10k_pci_hif_send_complete_check, 3086 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 3087 .power_up = ath10k_pci_hif_power_up, 3088 .power_down = ath10k_pci_hif_power_down, 3089 .read32 = ath10k_pci_read32, 3090 .write32 = ath10k_pci_write32, 3091 .suspend = ath10k_pci_hif_suspend, 3092 .resume = ath10k_pci_hif_resume, 3093 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, 3094 }; 3095 3096 /* 3097 * Top-level interrupt handler for all PCI interrupts from a Target. 3098 * When a block of MSI interrupts is allocated, this top-level handler 3099 * is not used; instead, we directly call the correct sub-handler. 3100 */ 3101 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 3102 { 3103 struct ath10k *ar = arg; 3104 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3105 int ret; 3106 3107 if (ath10k_pci_has_device_gone(ar)) 3108 return IRQ_NONE; 3109 3110 ret = ath10k_pci_force_wake(ar); 3111 if (ret) { 3112 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); 3113 return IRQ_NONE; 3114 } 3115 3116 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) && 3117 !ath10k_pci_irq_pending(ar)) 3118 return IRQ_NONE; 3119 3120 ath10k_pci_disable_and_clear_intx_irq(ar); 3121 ath10k_pci_irq_msi_fw_mask(ar); 3122 napi_schedule(&ar->napi); 3123 3124 return IRQ_HANDLED; 3125 } 3126 3127 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) 3128 { 3129 struct ath10k *ar = container_of(ctx, struct ath10k, napi); 3130 int done = 0; 3131 3132 if (ath10k_pci_has_fw_crashed(ar)) { 3133 ath10k_pci_fw_crashed_clear(ar); 3134 ath10k_pci_fw_crashed_dump(ar); 3135 napi_complete(ctx); 3136 return done; 3137 } 3138 3139 ath10k_ce_per_engine_service_any(ar); 3140 3141 done = ath10k_htt_txrx_compl_task(ar, budget); 3142 3143 if (done < budget) { 3144 napi_complete_done(ctx, done); 3145 /* In case of MSI, it is possible that interrupts are received 3146 * while NAPI poll is inprogress. So pending interrupts that are 3147 * received after processing all copy engine pipes by NAPI poll 3148 * will not be handled again. This is causing failure to 3149 * complete boot sequence in x86 platform. So before enabling 3150 * interrupts safer to check for pending interrupts for 3151 * immediate servicing. 3152 */ 3153 if (ath10k_ce_interrupt_summary(ar)) { 3154 napi_schedule(ctx); 3155 goto out; 3156 } 3157 ath10k_pci_enable_intx_irq(ar); 3158 ath10k_pci_irq_msi_fw_unmask(ar); 3159 } 3160 3161 out: 3162 return done; 3163 } 3164 3165 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 3166 { 3167 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3168 int ret; 3169 3170 ret = request_irq(ar_pci->pdev->irq, 3171 ath10k_pci_interrupt_handler, 3172 IRQF_SHARED, "ath10k_pci", ar); 3173 if (ret) { 3174 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 3175 ar_pci->pdev->irq, ret); 3176 return ret; 3177 } 3178 3179 return 0; 3180 } 3181 3182 static int ath10k_pci_request_irq_intx(struct ath10k *ar) 3183 { 3184 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3185 int ret; 3186 3187 ret = request_irq(ar_pci->pdev->irq, 3188 ath10k_pci_interrupt_handler, 3189 IRQF_SHARED, "ath10k_pci", ar); 3190 if (ret) { 3191 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 3192 ar_pci->pdev->irq, ret); 3193 return ret; 3194 } 3195 3196 return 0; 3197 } 3198 3199 static int ath10k_pci_request_irq(struct ath10k *ar) 3200 { 3201 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3202 3203 switch (ar_pci->oper_irq_mode) { 3204 case ATH10K_PCI_IRQ_INTX: 3205 return ath10k_pci_request_irq_intx(ar); 3206 case ATH10K_PCI_IRQ_MSI: 3207 return ath10k_pci_request_irq_msi(ar); 3208 default: 3209 return -EINVAL; 3210 } 3211 } 3212 3213 static void ath10k_pci_free_irq(struct ath10k *ar) 3214 { 3215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3216 3217 free_irq(ar_pci->pdev->irq, ar); 3218 } 3219 3220 void ath10k_pci_init_napi(struct ath10k *ar) 3221 { 3222 netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll); 3223 } 3224 3225 static int ath10k_pci_init_irq(struct ath10k *ar) 3226 { 3227 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3228 int ret; 3229 3230 ath10k_pci_init_napi(ar); 3231 3232 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 3233 ath10k_info(ar, "limiting irq mode to: %d\n", 3234 ath10k_pci_irq_mode); 3235 3236 /* Try MSI */ 3237 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) { 3238 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; 3239 ret = pci_enable_msi(ar_pci->pdev); 3240 if (ret == 0) 3241 return 0; 3242 3243 /* MHI failed, try legacy irq next */ 3244 } 3245 3246 /* Try legacy irq 3247 * 3248 * A potential race occurs here: The CORE_BASE write 3249 * depends on target correctly decoding AXI address but 3250 * host won't know when target writes BAR to CORE_CTRL. 3251 * This write might get lost if target has NOT written BAR. 3252 * For now, fix the race by repeating the write in below 3253 * synchronization checking. 3254 */ 3255 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX; 3256 3257 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 3258 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3259 3260 return 0; 3261 } 3262 3263 static void ath10k_pci_deinit_irq_intx(struct ath10k *ar) 3264 { 3265 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 3266 0); 3267 } 3268 3269 static int ath10k_pci_deinit_irq(struct ath10k *ar) 3270 { 3271 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3272 3273 switch (ar_pci->oper_irq_mode) { 3274 case ATH10K_PCI_IRQ_INTX: 3275 ath10k_pci_deinit_irq_intx(ar); 3276 break; 3277 default: 3278 pci_disable_msi(ar_pci->pdev); 3279 break; 3280 } 3281 3282 return 0; 3283 } 3284 3285 int ath10k_pci_wait_for_target_init(struct ath10k *ar) 3286 { 3287 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3288 unsigned long timeout; 3289 u32 val; 3290 3291 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 3292 3293 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 3294 3295 do { 3296 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 3297 3298 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 3299 val); 3300 3301 /* target should never return this */ 3302 if (val == 0xffffffff) 3303 continue; 3304 3305 /* the device has crashed so don't bother trying anymore */ 3306 if (val & FW_IND_EVENT_PENDING) 3307 break; 3308 3309 if (val & FW_IND_INITIALIZED) 3310 break; 3311 3312 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) 3313 /* Fix potential race by repeating CORE_BASE writes */ 3314 ath10k_pci_enable_intx_irq(ar); 3315 3316 mdelay(10); 3317 } while (time_before(jiffies, timeout)); 3318 3319 ath10k_pci_disable_and_clear_intx_irq(ar); 3320 ath10k_pci_irq_msi_fw_mask(ar); 3321 3322 if (val == 0xffffffff) { 3323 ath10k_err(ar, "failed to read device register, device is gone\n"); 3324 return -EIO; 3325 } 3326 3327 if (val & FW_IND_EVENT_PENDING) { 3328 ath10k_warn(ar, "device has crashed during init\n"); 3329 return -ECOMM; 3330 } 3331 3332 if (!(val & FW_IND_INITIALIZED)) { 3333 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 3334 val); 3335 return -ETIMEDOUT; 3336 } 3337 3338 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 3339 return 0; 3340 } 3341 3342 static int ath10k_pci_cold_reset(struct ath10k *ar) 3343 { 3344 u32 val; 3345 3346 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 3347 3348 spin_lock_bh(&ar->data_lock); 3349 3350 ar->stats.fw_cold_reset_counter++; 3351 3352 spin_unlock_bh(&ar->data_lock); 3353 3354 /* Put Target, including PCIe, into RESET. */ 3355 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 3356 val |= 1; 3357 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3358 3359 /* After writing into SOC_GLOBAL_RESET to put device into 3360 * reset and pulling out of reset pcie may not be stable 3361 * for any immediate pcie register access and cause bus error, 3362 * add delay before any pcie access request to fix this issue. 3363 */ 3364 msleep(20); 3365 3366 /* Pull Target, including PCIe, out of RESET. */ 3367 val &= ~1; 3368 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3369 3370 msleep(20); 3371 3372 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 3373 3374 return 0; 3375 } 3376 3377 static int ath10k_pci_claim(struct ath10k *ar) 3378 { 3379 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3380 struct pci_dev *pdev = ar_pci->pdev; 3381 int ret; 3382 3383 pci_set_drvdata(pdev, ar); 3384 3385 ret = pci_enable_device(pdev); 3386 if (ret) { 3387 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 3388 return ret; 3389 } 3390 3391 ret = pci_request_region(pdev, BAR_NUM, "ath"); 3392 if (ret) { 3393 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 3394 ret); 3395 goto err_device; 3396 } 3397 3398 /* Target expects 32 bit DMA. Enforce it. */ 3399 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3400 if (ret) { 3401 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 3402 goto err_region; 3403 } 3404 3405 pci_set_master(pdev); 3406 3407 /* Arrange for access to Target SoC registers. */ 3408 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 3409 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 3410 if (!ar_pci->mem) { 3411 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 3412 ret = -EIO; 3413 goto err_region; 3414 } 3415 3416 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 3417 return 0; 3418 3419 err_region: 3420 pci_release_region(pdev, BAR_NUM); 3421 3422 err_device: 3423 pci_disable_device(pdev); 3424 3425 return ret; 3426 } 3427 3428 static void ath10k_pci_release(struct ath10k *ar) 3429 { 3430 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3431 struct pci_dev *pdev = ar_pci->pdev; 3432 3433 pci_iounmap(pdev, ar_pci->mem); 3434 pci_release_region(pdev, BAR_NUM); 3435 pci_disable_device(pdev); 3436 } 3437 3438 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 3439 { 3440 const struct ath10k_pci_supp_chip *supp_chip; 3441 int i; 3442 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 3443 3444 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 3445 supp_chip = &ath10k_pci_supp_chips[i]; 3446 3447 if (supp_chip->dev_id == dev_id && 3448 supp_chip->rev_id == rev_id) 3449 return true; 3450 } 3451 3452 return false; 3453 } 3454 3455 int ath10k_pci_setup_resource(struct ath10k *ar) 3456 { 3457 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3458 struct ath10k_ce *ce = ath10k_ce_priv(ar); 3459 int ret; 3460 3461 spin_lock_init(&ce->ce_lock); 3462 spin_lock_init(&ar_pci->ps_lock); 3463 mutex_init(&ar_pci->ce_diag_mutex); 3464 3465 INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); 3466 3467 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); 3468 3469 ar_pci->attr = kmemdup(pci_host_ce_config_wlan, 3470 sizeof(pci_host_ce_config_wlan), 3471 GFP_KERNEL); 3472 if (!ar_pci->attr) 3473 return -ENOMEM; 3474 3475 ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, 3476 sizeof(pci_target_ce_config_wlan), 3477 GFP_KERNEL); 3478 if (!ar_pci->pipe_config) { 3479 ret = -ENOMEM; 3480 goto err_free_attr; 3481 } 3482 3483 ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, 3484 sizeof(pci_target_service_to_ce_map_wlan), 3485 GFP_KERNEL); 3486 if (!ar_pci->serv_to_pipe) { 3487 ret = -ENOMEM; 3488 goto err_free_pipe_config; 3489 } 3490 3491 if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) 3492 ath10k_pci_override_ce_config(ar); 3493 3494 ret = ath10k_pci_alloc_pipes(ar); 3495 if (ret) { 3496 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3497 ret); 3498 goto err_free_serv_to_pipe; 3499 } 3500 3501 return 0; 3502 3503 err_free_serv_to_pipe: 3504 kfree(ar_pci->serv_to_pipe); 3505 err_free_pipe_config: 3506 kfree(ar_pci->pipe_config); 3507 err_free_attr: 3508 kfree(ar_pci->attr); 3509 return ret; 3510 } 3511 3512 void ath10k_pci_release_resource(struct ath10k *ar) 3513 { 3514 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3515 3516 ath10k_pci_rx_retry_sync(ar); 3517 netif_napi_del(&ar->napi); 3518 ath10k_pci_ce_deinit(ar); 3519 ath10k_pci_free_pipes(ar); 3520 kfree(ar_pci->attr); 3521 kfree(ar_pci->pipe_config); 3522 kfree(ar_pci->serv_to_pipe); 3523 } 3524 3525 static const struct ath10k_bus_ops ath10k_pci_bus_ops = { 3526 .read32 = ath10k_bus_pci_read32, 3527 .write32 = ath10k_bus_pci_write32, 3528 .get_num_banks = ath10k_pci_get_num_banks, 3529 }; 3530 3531 static int ath10k_pci_probe(struct pci_dev *pdev, 3532 const struct pci_device_id *pci_dev) 3533 { 3534 int ret = 0; 3535 struct ath10k *ar; 3536 struct ath10k_pci *ar_pci; 3537 enum ath10k_hw_rev hw_rev; 3538 struct ath10k_bus_params bus_params = {}; 3539 bool pci_ps, is_qca988x = false; 3540 int (*pci_soft_reset)(struct ath10k *ar); 3541 int (*pci_hard_reset)(struct ath10k *ar); 3542 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); 3543 3544 switch (pci_dev->device) { 3545 case QCA988X_2_0_DEVICE_ID_UBNT: 3546 case QCA988X_2_0_DEVICE_ID: 3547 hw_rev = ATH10K_HW_QCA988X; 3548 pci_ps = false; 3549 is_qca988x = true; 3550 pci_soft_reset = ath10k_pci_warm_reset; 3551 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3552 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3553 break; 3554 case QCA9887_1_0_DEVICE_ID: 3555 hw_rev = ATH10K_HW_QCA9887; 3556 pci_ps = false; 3557 pci_soft_reset = ath10k_pci_warm_reset; 3558 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3559 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3560 break; 3561 case QCA6164_2_1_DEVICE_ID: 3562 case QCA6174_2_1_DEVICE_ID: 3563 hw_rev = ATH10K_HW_QCA6174; 3564 pci_ps = true; 3565 pci_soft_reset = ath10k_pci_warm_reset; 3566 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3567 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; 3568 break; 3569 case QCA99X0_2_0_DEVICE_ID: 3570 hw_rev = ATH10K_HW_QCA99X0; 3571 pci_ps = false; 3572 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3573 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3574 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3575 break; 3576 case QCA9984_1_0_DEVICE_ID: 3577 hw_rev = ATH10K_HW_QCA9984; 3578 pci_ps = false; 3579 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3580 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3581 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3582 break; 3583 case QCA9888_2_0_DEVICE_ID: 3584 hw_rev = ATH10K_HW_QCA9888; 3585 pci_ps = false; 3586 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3587 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3588 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3589 break; 3590 case QCA9377_1_0_DEVICE_ID: 3591 hw_rev = ATH10K_HW_QCA9377; 3592 pci_ps = true; 3593 pci_soft_reset = ath10k_pci_warm_reset; 3594 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3595 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; 3596 break; 3597 default: 3598 WARN_ON(1); 3599 return -EOPNOTSUPP; 3600 } 3601 3602 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 3603 hw_rev, &ath10k_pci_hif_ops); 3604 if (!ar) { 3605 dev_err(&pdev->dev, "failed to allocate core\n"); 3606 return -ENOMEM; 3607 } 3608 3609 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 3610 pdev->vendor, pdev->device, 3611 pdev->subsystem_vendor, pdev->subsystem_device); 3612 3613 ar_pci = ath10k_pci_priv(ar); 3614 ar_pci->pdev = pdev; 3615 ar_pci->dev = &pdev->dev; 3616 ar_pci->ar = ar; 3617 ar->dev_id = pci_dev->device; 3618 ar_pci->pci_ps = pci_ps; 3619 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; 3620 ar_pci->pci_soft_reset = pci_soft_reset; 3621 ar_pci->pci_hard_reset = pci_hard_reset; 3622 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; 3623 ar->ce_priv = &ar_pci->ce; 3624 3625 ar->id.vendor = pdev->vendor; 3626 ar->id.device = pdev->device; 3627 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3628 ar->id.subsystem_device = pdev->subsystem_device; 3629 3630 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); 3631 3632 ret = ath10k_pci_setup_resource(ar); 3633 if (ret) { 3634 ath10k_err(ar, "failed to setup resource: %d\n", ret); 3635 goto err_core_destroy; 3636 } 3637 3638 ret = ath10k_pci_claim(ar); 3639 if (ret) { 3640 ath10k_err(ar, "failed to claim device: %d\n", ret); 3641 goto err_free_pipes; 3642 } 3643 3644 ret = ath10k_pci_force_wake(ar); 3645 if (ret) { 3646 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3647 goto err_sleep; 3648 } 3649 3650 ath10k_pci_ce_deinit(ar); 3651 ath10k_pci_irq_disable(ar); 3652 3653 ret = ath10k_pci_init_irq(ar); 3654 if (ret) { 3655 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3656 goto err_sleep; 3657 } 3658 3659 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", 3660 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, 3661 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 3662 3663 ret = ath10k_pci_request_irq(ar); 3664 if (ret) { 3665 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 3666 goto err_deinit_irq; 3667 } 3668 3669 bus_params.dev_type = ATH10K_DEV_TYPE_LL; 3670 bus_params.link_can_suspend = true; 3671 /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that 3672 * fall off the bus during chip_reset. These chips have the same pci 3673 * device id as the QCA9880 BR4A or 2R4E. So that's why the check. 3674 */ 3675 if (is_qca988x) { 3676 bus_params.chip_id = 3677 ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3678 if (bus_params.chip_id != 0xffffffff) { 3679 if (!ath10k_pci_chip_is_supported(pdev->device, 3680 bus_params.chip_id)) { 3681 ret = -ENODEV; 3682 goto err_unsupported; 3683 } 3684 } 3685 } 3686 3687 ret = ath10k_pci_chip_reset(ar); 3688 if (ret) { 3689 ath10k_err(ar, "failed to reset chip: %d\n", ret); 3690 goto err_free_irq; 3691 } 3692 3693 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3694 if (bus_params.chip_id == 0xffffffff) { 3695 ret = -ENODEV; 3696 goto err_unsupported; 3697 } 3698 3699 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { 3700 ret = -ENODEV; 3701 goto err_unsupported; 3702 } 3703 3704 ret = ath10k_core_register(ar, &bus_params); 3705 if (ret) { 3706 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3707 goto err_free_irq; 3708 } 3709 3710 return 0; 3711 3712 err_unsupported: 3713 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3714 pdev->device, bus_params.chip_id); 3715 3716 err_free_irq: 3717 ath10k_pci_free_irq(ar); 3718 3719 err_deinit_irq: 3720 ath10k_pci_release_resource(ar); 3721 3722 err_sleep: 3723 ath10k_pci_sleep_sync(ar); 3724 ath10k_pci_release(ar); 3725 3726 err_free_pipes: 3727 ath10k_pci_free_pipes(ar); 3728 3729 err_core_destroy: 3730 ath10k_core_destroy(ar); 3731 3732 return ret; 3733 } 3734 3735 static void ath10k_pci_remove(struct pci_dev *pdev) 3736 { 3737 struct ath10k *ar = pci_get_drvdata(pdev); 3738 3739 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3740 3741 if (!ar) 3742 return; 3743 3744 ath10k_core_unregister(ar); 3745 ath10k_pci_free_irq(ar); 3746 ath10k_pci_deinit_irq(ar); 3747 ath10k_pci_release_resource(ar); 3748 ath10k_pci_sleep_sync(ar); 3749 ath10k_pci_release(ar); 3750 ath10k_core_destroy(ar); 3751 } 3752 3753 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3754 3755 static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) 3756 { 3757 struct ath10k *ar = dev_get_drvdata(dev); 3758 int ret; 3759 3760 ret = ath10k_pci_suspend(ar); 3761 if (ret) 3762 ath10k_warn(ar, "failed to suspend hif: %d\n", ret); 3763 3764 return ret; 3765 } 3766 3767 static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) 3768 { 3769 struct ath10k *ar = dev_get_drvdata(dev); 3770 int ret; 3771 3772 ret = ath10k_pci_resume(ar); 3773 if (ret) 3774 ath10k_warn(ar, "failed to resume hif: %d\n", ret); 3775 3776 return ret; 3777 } 3778 3779 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, 3780 ath10k_pci_pm_suspend, 3781 ath10k_pci_pm_resume); 3782 3783 static struct pci_driver ath10k_pci_driver = { 3784 .name = "ath10k_pci", 3785 .id_table = ath10k_pci_id_table, 3786 .probe = ath10k_pci_probe, 3787 .remove = ath10k_pci_remove, 3788 #ifdef CONFIG_PM 3789 .driver.pm = &ath10k_pci_pm_ops, 3790 #endif 3791 }; 3792 3793 static int __init ath10k_pci_init(void) 3794 { 3795 int ret1, ret2; 3796 3797 ret1 = pci_register_driver(&ath10k_pci_driver); 3798 if (ret1) 3799 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3800 ret1); 3801 3802 ret2 = ath10k_ahb_init(); 3803 if (ret2) 3804 printk(KERN_ERR "ahb init failed: %d\n", ret2); 3805 3806 if (ret1 && ret2) 3807 return ret1; 3808 3809 /* registered to at least one bus */ 3810 return 0; 3811 } 3812 module_init(ath10k_pci_init); 3813 3814 static void __exit ath10k_pci_exit(void) 3815 { 3816 pci_unregister_driver(&ath10k_pci_driver); 3817 ath10k_ahb_exit(); 3818 } 3819 3820 module_exit(ath10k_pci_exit); 3821 3822 MODULE_AUTHOR("Qualcomm Atheros"); 3823 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices"); 3824 MODULE_LICENSE("Dual BSD/GPL"); 3825 3826 /* QCA988x 2.0 firmware files */ 3827 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3828 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3829 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3830 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3831 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); 3832 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3833 3834 /* QCA9887 1.0 firmware files */ 3835 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3836 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); 3837 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3838 3839 /* QCA6174 2.1 firmware files */ 3840 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3841 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3842 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE); 3843 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3844 3845 /* QCA6174 3.1 firmware files */ 3846 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3847 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3848 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); 3849 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); 3850 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3851 3852 /* QCA9377 1.0 firmware files */ 3853 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); 3854 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3855 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); 3856