1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/pci.h> 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/spinlock.h> 22 #include <linux/bitops.h> 23 24 #include "core.h" 25 #include "debug.h" 26 27 #include "targaddrs.h" 28 #include "bmi.h" 29 30 #include "hif.h" 31 #include "htc.h" 32 33 #include "ce.h" 34 #include "pci.h" 35 36 enum ath10k_pci_irq_mode { 37 ATH10K_PCI_IRQ_AUTO = 0, 38 ATH10K_PCI_IRQ_LEGACY = 1, 39 ATH10K_PCI_IRQ_MSI = 2, 40 }; 41 42 enum ath10k_pci_reset_mode { 43 ATH10K_PCI_RESET_AUTO = 0, 44 ATH10K_PCI_RESET_WARM_ONLY = 1, 45 }; 46 47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 49 50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 52 53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 55 56 /* how long wait to wait for target to initialise, in ms */ 57 #define ATH10K_PCI_TARGET_WAIT 3000 58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 59 60 static const struct pci_device_id ath10k_pci_id_table[] = { 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 63 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 64 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 65 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 66 {0} 67 }; 68 69 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 70 /* QCA988X pre 2.0 chips are not supported because they need some nasty 71 * hacks. ath10k doesn't have them and these devices crash horribly 72 * because of that. 73 */ 74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 75 76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 81 82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 87 88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 89 90 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 92 }; 93 94 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 95 static int ath10k_pci_cold_reset(struct ath10k *ar); 96 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 97 static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 98 static int ath10k_pci_init_irq(struct ath10k *ar); 99 static int ath10k_pci_deinit_irq(struct ath10k *ar); 100 static int ath10k_pci_request_irq(struct ath10k *ar); 101 static void ath10k_pci_free_irq(struct ath10k *ar); 102 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 103 struct ath10k_ce_pipe *rx_pipe, 104 struct bmi_xfer *xfer); 105 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 106 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); 107 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 108 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 109 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 110 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 111 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); 112 113 static struct ce_attr host_ce_config_wlan[] = { 114 /* CE0: host->target HTC control and raw streams */ 115 { 116 .flags = CE_ATTR_FLAGS, 117 .src_nentries = 16, 118 .src_sz_max = 256, 119 .dest_nentries = 0, 120 .send_cb = ath10k_pci_htc_tx_cb, 121 }, 122 123 /* CE1: target->host HTT + HTC control */ 124 { 125 .flags = CE_ATTR_FLAGS, 126 .src_nentries = 0, 127 .src_sz_max = 2048, 128 .dest_nentries = 512, 129 .recv_cb = ath10k_pci_htt_htc_rx_cb, 130 }, 131 132 /* CE2: target->host WMI */ 133 { 134 .flags = CE_ATTR_FLAGS, 135 .src_nentries = 0, 136 .src_sz_max = 2048, 137 .dest_nentries = 128, 138 .recv_cb = ath10k_pci_htc_rx_cb, 139 }, 140 141 /* CE3: host->target WMI */ 142 { 143 .flags = CE_ATTR_FLAGS, 144 .src_nentries = 32, 145 .src_sz_max = 2048, 146 .dest_nentries = 0, 147 .send_cb = ath10k_pci_htc_tx_cb, 148 }, 149 150 /* CE4: host->target HTT */ 151 { 152 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 153 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 154 .src_sz_max = 256, 155 .dest_nentries = 0, 156 .send_cb = ath10k_pci_htt_tx_cb, 157 }, 158 159 /* CE5: target->host HTT (HIF->HTT) */ 160 { 161 .flags = CE_ATTR_FLAGS, 162 .src_nentries = 0, 163 .src_sz_max = 512, 164 .dest_nentries = 512, 165 .recv_cb = ath10k_pci_htt_rx_cb, 166 }, 167 168 /* CE6: target autonomous hif_memcpy */ 169 { 170 .flags = CE_ATTR_FLAGS, 171 .src_nentries = 0, 172 .src_sz_max = 0, 173 .dest_nentries = 0, 174 }, 175 176 /* CE7: ce_diag, the Diagnostic Window */ 177 { 178 .flags = CE_ATTR_FLAGS, 179 .src_nentries = 2, 180 .src_sz_max = DIAG_TRANSFER_LIMIT, 181 .dest_nentries = 2, 182 }, 183 184 /* CE8: target->host pktlog */ 185 { 186 .flags = CE_ATTR_FLAGS, 187 .src_nentries = 0, 188 .src_sz_max = 2048, 189 .dest_nentries = 128, 190 .recv_cb = ath10k_pci_pktlog_rx_cb, 191 }, 192 193 /* CE9 target autonomous qcache memcpy */ 194 { 195 .flags = CE_ATTR_FLAGS, 196 .src_nentries = 0, 197 .src_sz_max = 0, 198 .dest_nentries = 0, 199 }, 200 201 /* CE10: target autonomous hif memcpy */ 202 { 203 .flags = CE_ATTR_FLAGS, 204 .src_nentries = 0, 205 .src_sz_max = 0, 206 .dest_nentries = 0, 207 }, 208 209 /* CE11: target autonomous hif memcpy */ 210 { 211 .flags = CE_ATTR_FLAGS, 212 .src_nentries = 0, 213 .src_sz_max = 0, 214 .dest_nentries = 0, 215 }, 216 }; 217 218 /* Target firmware's Copy Engine configuration. */ 219 static struct ce_pipe_config target_ce_config_wlan[] = { 220 /* CE0: host->target HTC control and raw streams */ 221 { 222 .pipenum = __cpu_to_le32(0), 223 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 224 .nentries = __cpu_to_le32(32), 225 .nbytes_max = __cpu_to_le32(256), 226 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 227 .reserved = __cpu_to_le32(0), 228 }, 229 230 /* CE1: target->host HTT + HTC control */ 231 { 232 .pipenum = __cpu_to_le32(1), 233 .pipedir = __cpu_to_le32(PIPEDIR_IN), 234 .nentries = __cpu_to_le32(32), 235 .nbytes_max = __cpu_to_le32(2048), 236 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 237 .reserved = __cpu_to_le32(0), 238 }, 239 240 /* CE2: target->host WMI */ 241 { 242 .pipenum = __cpu_to_le32(2), 243 .pipedir = __cpu_to_le32(PIPEDIR_IN), 244 .nentries = __cpu_to_le32(64), 245 .nbytes_max = __cpu_to_le32(2048), 246 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 247 .reserved = __cpu_to_le32(0), 248 }, 249 250 /* CE3: host->target WMI */ 251 { 252 .pipenum = __cpu_to_le32(3), 253 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 254 .nentries = __cpu_to_le32(32), 255 .nbytes_max = __cpu_to_le32(2048), 256 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 257 .reserved = __cpu_to_le32(0), 258 }, 259 260 /* CE4: host->target HTT */ 261 { 262 .pipenum = __cpu_to_le32(4), 263 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 264 .nentries = __cpu_to_le32(256), 265 .nbytes_max = __cpu_to_le32(256), 266 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 267 .reserved = __cpu_to_le32(0), 268 }, 269 270 /* NB: 50% of src nentries, since tx has 2 frags */ 271 272 /* CE5: target->host HTT (HIF->HTT) */ 273 { 274 .pipenum = __cpu_to_le32(5), 275 .pipedir = __cpu_to_le32(PIPEDIR_IN), 276 .nentries = __cpu_to_le32(32), 277 .nbytes_max = __cpu_to_le32(512), 278 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 279 .reserved = __cpu_to_le32(0), 280 }, 281 282 /* CE6: Reserved for target autonomous hif_memcpy */ 283 { 284 .pipenum = __cpu_to_le32(6), 285 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 286 .nentries = __cpu_to_le32(32), 287 .nbytes_max = __cpu_to_le32(4096), 288 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 289 .reserved = __cpu_to_le32(0), 290 }, 291 292 /* CE7 used only by Host */ 293 { 294 .pipenum = __cpu_to_le32(7), 295 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 296 .nentries = __cpu_to_le32(0), 297 .nbytes_max = __cpu_to_le32(0), 298 .flags = __cpu_to_le32(0), 299 .reserved = __cpu_to_le32(0), 300 }, 301 302 /* CE8 target->host packtlog */ 303 { 304 .pipenum = __cpu_to_le32(8), 305 .pipedir = __cpu_to_le32(PIPEDIR_IN), 306 .nentries = __cpu_to_le32(64), 307 .nbytes_max = __cpu_to_le32(2048), 308 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 309 .reserved = __cpu_to_le32(0), 310 }, 311 312 /* CE9 target autonomous qcache memcpy */ 313 { 314 .pipenum = __cpu_to_le32(9), 315 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 316 .nentries = __cpu_to_le32(32), 317 .nbytes_max = __cpu_to_le32(2048), 318 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 319 .reserved = __cpu_to_le32(0), 320 }, 321 322 /* It not necessary to send target wlan configuration for CE10 & CE11 323 * as these CEs are not actively used in target. 324 */ 325 }; 326 327 /* 328 * Map from service/endpoint to Copy Engine. 329 * This table is derived from the CE_PCI TABLE, above. 330 * It is passed to the Target at startup for use by firmware. 331 */ 332 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 333 { 334 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 335 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 336 __cpu_to_le32(3), 337 }, 338 { 339 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 340 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 341 __cpu_to_le32(2), 342 }, 343 { 344 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 345 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 346 __cpu_to_le32(3), 347 }, 348 { 349 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 350 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 351 __cpu_to_le32(2), 352 }, 353 { 354 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 355 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 356 __cpu_to_le32(3), 357 }, 358 { 359 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 360 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 361 __cpu_to_le32(2), 362 }, 363 { 364 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 365 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 366 __cpu_to_le32(3), 367 }, 368 { 369 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 370 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 371 __cpu_to_le32(2), 372 }, 373 { 374 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 375 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 376 __cpu_to_le32(3), 377 }, 378 { 379 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 380 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 381 __cpu_to_le32(2), 382 }, 383 { 384 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 385 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 386 __cpu_to_le32(0), 387 }, 388 { 389 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 390 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 391 __cpu_to_le32(1), 392 }, 393 { /* not used */ 394 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 395 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 396 __cpu_to_le32(0), 397 }, 398 { /* not used */ 399 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 400 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 401 __cpu_to_le32(1), 402 }, 403 { 404 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 405 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 406 __cpu_to_le32(4), 407 }, 408 { 409 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 410 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 411 __cpu_to_le32(5), 412 }, 413 414 /* (Additions here) */ 415 416 { /* must be last */ 417 __cpu_to_le32(0), 418 __cpu_to_le32(0), 419 __cpu_to_le32(0), 420 }, 421 }; 422 423 static bool ath10k_pci_is_awake(struct ath10k *ar) 424 { 425 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 426 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 427 RTC_STATE_ADDRESS); 428 429 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 430 } 431 432 static void __ath10k_pci_wake(struct ath10k *ar) 433 { 434 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 435 436 lockdep_assert_held(&ar_pci->ps_lock); 437 438 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 439 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 440 441 iowrite32(PCIE_SOC_WAKE_V_MASK, 442 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 443 PCIE_SOC_WAKE_ADDRESS); 444 } 445 446 static void __ath10k_pci_sleep(struct ath10k *ar) 447 { 448 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 449 450 lockdep_assert_held(&ar_pci->ps_lock); 451 452 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 453 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 454 455 iowrite32(PCIE_SOC_WAKE_RESET, 456 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 457 PCIE_SOC_WAKE_ADDRESS); 458 ar_pci->ps_awake = false; 459 } 460 461 static int ath10k_pci_wake_wait(struct ath10k *ar) 462 { 463 int tot_delay = 0; 464 int curr_delay = 5; 465 466 while (tot_delay < PCIE_WAKE_TIMEOUT) { 467 if (ath10k_pci_is_awake(ar)) { 468 if (tot_delay > PCIE_WAKE_LATE_US) 469 ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", 470 tot_delay / 1000); 471 return 0; 472 } 473 474 udelay(curr_delay); 475 tot_delay += curr_delay; 476 477 if (curr_delay < 50) 478 curr_delay += 5; 479 } 480 481 return -ETIMEDOUT; 482 } 483 484 static int ath10k_pci_force_wake(struct ath10k *ar) 485 { 486 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 487 unsigned long flags; 488 int ret = 0; 489 490 if (ar_pci->pci_ps) 491 return ret; 492 493 spin_lock_irqsave(&ar_pci->ps_lock, flags); 494 495 if (!ar_pci->ps_awake) { 496 iowrite32(PCIE_SOC_WAKE_V_MASK, 497 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 498 PCIE_SOC_WAKE_ADDRESS); 499 500 ret = ath10k_pci_wake_wait(ar); 501 if (ret == 0) 502 ar_pci->ps_awake = true; 503 } 504 505 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 506 507 return ret; 508 } 509 510 static void ath10k_pci_force_sleep(struct ath10k *ar) 511 { 512 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 513 unsigned long flags; 514 515 spin_lock_irqsave(&ar_pci->ps_lock, flags); 516 517 iowrite32(PCIE_SOC_WAKE_RESET, 518 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 519 PCIE_SOC_WAKE_ADDRESS); 520 ar_pci->ps_awake = false; 521 522 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 523 } 524 525 static int ath10k_pci_wake(struct ath10k *ar) 526 { 527 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 528 unsigned long flags; 529 int ret = 0; 530 531 if (ar_pci->pci_ps == 0) 532 return ret; 533 534 spin_lock_irqsave(&ar_pci->ps_lock, flags); 535 536 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 537 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 538 539 /* This function can be called very frequently. To avoid excessive 540 * CPU stalls for MMIO reads use a cache var to hold the device state. 541 */ 542 if (!ar_pci->ps_awake) { 543 __ath10k_pci_wake(ar); 544 545 ret = ath10k_pci_wake_wait(ar); 546 if (ret == 0) 547 ar_pci->ps_awake = true; 548 } 549 550 if (ret == 0) { 551 ar_pci->ps_wake_refcount++; 552 WARN_ON(ar_pci->ps_wake_refcount == 0); 553 } 554 555 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 556 557 return ret; 558 } 559 560 static void ath10k_pci_sleep(struct ath10k *ar) 561 { 562 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 563 unsigned long flags; 564 565 if (ar_pci->pci_ps == 0) 566 return; 567 568 spin_lock_irqsave(&ar_pci->ps_lock, flags); 569 570 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 571 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 572 573 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 574 goto skip; 575 576 ar_pci->ps_wake_refcount--; 577 578 mod_timer(&ar_pci->ps_timer, jiffies + 579 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 580 581 skip: 582 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 583 } 584 585 static void ath10k_pci_ps_timer(unsigned long ptr) 586 { 587 struct ath10k *ar = (void *)ptr; 588 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 589 unsigned long flags; 590 591 spin_lock_irqsave(&ar_pci->ps_lock, flags); 592 593 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 594 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 595 596 if (ar_pci->ps_wake_refcount > 0) 597 goto skip; 598 599 __ath10k_pci_sleep(ar); 600 601 skip: 602 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 603 } 604 605 static void ath10k_pci_sleep_sync(struct ath10k *ar) 606 { 607 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 608 unsigned long flags; 609 610 if (ar_pci->pci_ps == 0) { 611 ath10k_pci_force_sleep(ar); 612 return; 613 } 614 615 del_timer_sync(&ar_pci->ps_timer); 616 617 spin_lock_irqsave(&ar_pci->ps_lock, flags); 618 WARN_ON(ar_pci->ps_wake_refcount > 0); 619 __ath10k_pci_sleep(ar); 620 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 621 } 622 623 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 624 { 625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 626 int ret; 627 628 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 629 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 630 offset, offset + sizeof(value), ar_pci->mem_len); 631 return; 632 } 633 634 ret = ath10k_pci_wake(ar); 635 if (ret) { 636 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 637 value, offset, ret); 638 return; 639 } 640 641 iowrite32(value, ar_pci->mem + offset); 642 ath10k_pci_sleep(ar); 643 } 644 645 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 646 { 647 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 648 u32 val; 649 int ret; 650 651 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 652 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 653 offset, offset + sizeof(val), ar_pci->mem_len); 654 return 0; 655 } 656 657 ret = ath10k_pci_wake(ar); 658 if (ret) { 659 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 660 offset, ret); 661 return 0xffffffff; 662 } 663 664 val = ioread32(ar_pci->mem + offset); 665 ath10k_pci_sleep(ar); 666 667 return val; 668 } 669 670 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 671 { 672 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 673 } 674 675 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 676 { 677 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 678 } 679 680 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 681 { 682 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 683 } 684 685 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 686 { 687 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 688 } 689 690 static bool ath10k_pci_irq_pending(struct ath10k *ar) 691 { 692 u32 cause; 693 694 /* Check if the shared legacy irq is for us */ 695 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 696 PCIE_INTR_CAUSE_ADDRESS); 697 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 698 return true; 699 700 return false; 701 } 702 703 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 704 { 705 /* IMPORTANT: INTR_CLR register has to be set after 706 * INTR_ENABLE is set to 0, otherwise interrupt can not be 707 * really cleared. */ 708 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 709 0); 710 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 711 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 712 713 /* IMPORTANT: this extra read transaction is required to 714 * flush the posted write buffer. */ 715 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 716 PCIE_INTR_ENABLE_ADDRESS); 717 } 718 719 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 720 { 721 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 722 PCIE_INTR_ENABLE_ADDRESS, 723 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 724 725 /* IMPORTANT: this extra read transaction is required to 726 * flush the posted write buffer. */ 727 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 728 PCIE_INTR_ENABLE_ADDRESS); 729 } 730 731 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 732 { 733 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 734 735 if (ar_pci->num_msi_intrs > 1) 736 return "msi-x"; 737 738 if (ar_pci->num_msi_intrs == 1) 739 return "msi"; 740 741 return "legacy"; 742 } 743 744 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 745 { 746 struct ath10k *ar = pipe->hif_ce_state; 747 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 748 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 749 struct sk_buff *skb; 750 dma_addr_t paddr; 751 int ret; 752 753 skb = dev_alloc_skb(pipe->buf_sz); 754 if (!skb) 755 return -ENOMEM; 756 757 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 758 759 paddr = dma_map_single(ar->dev, skb->data, 760 skb->len + skb_tailroom(skb), 761 DMA_FROM_DEVICE); 762 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 763 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 764 dev_kfree_skb_any(skb); 765 return -EIO; 766 } 767 768 ATH10K_SKB_RXCB(skb)->paddr = paddr; 769 770 spin_lock_bh(&ar_pci->ce_lock); 771 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); 772 spin_unlock_bh(&ar_pci->ce_lock); 773 if (ret) { 774 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 775 DMA_FROM_DEVICE); 776 dev_kfree_skb_any(skb); 777 return ret; 778 } 779 780 return 0; 781 } 782 783 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 784 { 785 struct ath10k *ar = pipe->hif_ce_state; 786 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 787 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 788 int ret, num; 789 790 if (pipe->buf_sz == 0) 791 return; 792 793 if (!ce_pipe->dest_ring) 794 return; 795 796 spin_lock_bh(&ar_pci->ce_lock); 797 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 798 spin_unlock_bh(&ar_pci->ce_lock); 799 while (num--) { 800 ret = __ath10k_pci_rx_post_buf(pipe); 801 if (ret) { 802 if (ret == -ENOSPC) 803 break; 804 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 805 mod_timer(&ar_pci->rx_post_retry, jiffies + 806 ATH10K_PCI_RX_POST_RETRY_MS); 807 break; 808 } 809 } 810 } 811 812 static void ath10k_pci_rx_post(struct ath10k *ar) 813 { 814 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 815 int i; 816 817 for (i = 0; i < CE_COUNT; i++) 818 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 819 } 820 821 static void ath10k_pci_rx_replenish_retry(unsigned long ptr) 822 { 823 struct ath10k *ar = (void *)ptr; 824 825 ath10k_pci_rx_post(ar); 826 } 827 828 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 829 { 830 u32 val = 0; 831 832 switch (ar->hw_rev) { 833 case ATH10K_HW_QCA988X: 834 case ATH10K_HW_QCA6174: 835 case ATH10K_HW_QCA9377: 836 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 837 CORE_CTRL_ADDRESS) & 838 0x7ff) << 21; 839 break; 840 case ATH10K_HW_QCA99X0: 841 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 842 break; 843 } 844 845 val |= 0x100000 | (addr & 0xfffff); 846 return val; 847 } 848 849 /* 850 * Diagnostic read/write access is provided for startup/config/debug usage. 851 * Caller must guarantee proper alignment, when applicable, and single user 852 * at any moment. 853 */ 854 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 855 int nbytes) 856 { 857 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 858 int ret = 0; 859 u32 buf; 860 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 861 unsigned int id; 862 unsigned int flags; 863 struct ath10k_ce_pipe *ce_diag; 864 /* Host buffer address in CE space */ 865 u32 ce_data; 866 dma_addr_t ce_data_base = 0; 867 void *data_buf = NULL; 868 int i; 869 870 spin_lock_bh(&ar_pci->ce_lock); 871 872 ce_diag = ar_pci->ce_diag; 873 874 /* 875 * Allocate a temporary bounce buffer to hold caller's data 876 * to be DMA'ed from Target. This guarantees 877 * 1) 4-byte alignment 878 * 2) Buffer in DMA-able space 879 */ 880 orig_nbytes = nbytes; 881 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 882 orig_nbytes, 883 &ce_data_base, 884 GFP_ATOMIC); 885 886 if (!data_buf) { 887 ret = -ENOMEM; 888 goto done; 889 } 890 memset(data_buf, 0, orig_nbytes); 891 892 remaining_bytes = orig_nbytes; 893 ce_data = ce_data_base; 894 while (remaining_bytes) { 895 nbytes = min_t(unsigned int, remaining_bytes, 896 DIAG_TRANSFER_LIMIT); 897 898 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); 899 if (ret != 0) 900 goto done; 901 902 /* Request CE to send from Target(!) address to Host buffer */ 903 /* 904 * The address supplied by the caller is in the 905 * Target CPU virtual address space. 906 * 907 * In order to use this address with the diagnostic CE, 908 * convert it from Target CPU virtual address space 909 * to CE address space 910 */ 911 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 912 913 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 914 0); 915 if (ret) 916 goto done; 917 918 i = 0; 919 while (ath10k_ce_completed_send_next_nolock(ce_diag, 920 NULL) != 0) { 921 mdelay(1); 922 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 923 ret = -EBUSY; 924 goto done; 925 } 926 } 927 928 i = 0; 929 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 930 &completed_nbytes, 931 &id, &flags) != 0) { 932 mdelay(1); 933 934 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 935 ret = -EBUSY; 936 goto done; 937 } 938 } 939 940 if (nbytes != completed_nbytes) { 941 ret = -EIO; 942 goto done; 943 } 944 945 if (buf != ce_data) { 946 ret = -EIO; 947 goto done; 948 } 949 950 remaining_bytes -= nbytes; 951 address += nbytes; 952 ce_data += nbytes; 953 } 954 955 done: 956 if (ret == 0) 957 memcpy(data, data_buf, orig_nbytes); 958 else 959 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", 960 address, ret); 961 962 if (data_buf) 963 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 964 ce_data_base); 965 966 spin_unlock_bh(&ar_pci->ce_lock); 967 968 return ret; 969 } 970 971 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 972 { 973 __le32 val = 0; 974 int ret; 975 976 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 977 *value = __le32_to_cpu(val); 978 979 return ret; 980 } 981 982 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 983 u32 src, u32 len) 984 { 985 u32 host_addr, addr; 986 int ret; 987 988 host_addr = host_interest_item_address(src); 989 990 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 991 if (ret != 0) { 992 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 993 src, ret); 994 return ret; 995 } 996 997 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 998 if (ret != 0) { 999 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 1000 addr, len, ret); 1001 return ret; 1002 } 1003 1004 return 0; 1005 } 1006 1007 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1008 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1009 1010 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1011 const void *data, int nbytes) 1012 { 1013 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1014 int ret = 0; 1015 u32 buf; 1016 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1017 unsigned int id; 1018 unsigned int flags; 1019 struct ath10k_ce_pipe *ce_diag; 1020 void *data_buf = NULL; 1021 u32 ce_data; /* Host buffer address in CE space */ 1022 dma_addr_t ce_data_base = 0; 1023 int i; 1024 1025 spin_lock_bh(&ar_pci->ce_lock); 1026 1027 ce_diag = ar_pci->ce_diag; 1028 1029 /* 1030 * Allocate a temporary bounce buffer to hold caller's data 1031 * to be DMA'ed to Target. This guarantees 1032 * 1) 4-byte alignment 1033 * 2) Buffer in DMA-able space 1034 */ 1035 orig_nbytes = nbytes; 1036 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1037 orig_nbytes, 1038 &ce_data_base, 1039 GFP_ATOMIC); 1040 if (!data_buf) { 1041 ret = -ENOMEM; 1042 goto done; 1043 } 1044 1045 /* Copy caller's data to allocated DMA buf */ 1046 memcpy(data_buf, data, orig_nbytes); 1047 1048 /* 1049 * The address supplied by the caller is in the 1050 * Target CPU virtual address space. 1051 * 1052 * In order to use this address with the diagnostic CE, 1053 * convert it from 1054 * Target CPU virtual address space 1055 * to 1056 * CE address space 1057 */ 1058 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1059 1060 remaining_bytes = orig_nbytes; 1061 ce_data = ce_data_base; 1062 while (remaining_bytes) { 1063 /* FIXME: check cast */ 1064 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1065 1066 /* Set up to receive directly into Target(!) address */ 1067 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); 1068 if (ret != 0) 1069 goto done; 1070 1071 /* 1072 * Request CE to send caller-supplied data that 1073 * was copied to bounce buffer to Target(!) address. 1074 */ 1075 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1076 nbytes, 0, 0); 1077 if (ret != 0) 1078 goto done; 1079 1080 i = 0; 1081 while (ath10k_ce_completed_send_next_nolock(ce_diag, 1082 NULL) != 0) { 1083 mdelay(1); 1084 1085 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1086 ret = -EBUSY; 1087 goto done; 1088 } 1089 } 1090 1091 i = 0; 1092 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 1093 &completed_nbytes, 1094 &id, &flags) != 0) { 1095 mdelay(1); 1096 1097 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1098 ret = -EBUSY; 1099 goto done; 1100 } 1101 } 1102 1103 if (nbytes != completed_nbytes) { 1104 ret = -EIO; 1105 goto done; 1106 } 1107 1108 if (buf != address) { 1109 ret = -EIO; 1110 goto done; 1111 } 1112 1113 remaining_bytes -= nbytes; 1114 address += nbytes; 1115 ce_data += nbytes; 1116 } 1117 1118 done: 1119 if (data_buf) { 1120 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1121 ce_data_base); 1122 } 1123 1124 if (ret != 0) 1125 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1126 address, ret); 1127 1128 spin_unlock_bh(&ar_pci->ce_lock); 1129 1130 return ret; 1131 } 1132 1133 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1134 { 1135 __le32 val = __cpu_to_le32(value); 1136 1137 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1138 } 1139 1140 /* Called by lower (CE) layer when a send to Target completes. */ 1141 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) 1142 { 1143 struct ath10k *ar = ce_state->ar; 1144 struct sk_buff_head list; 1145 struct sk_buff *skb; 1146 1147 __skb_queue_head_init(&list); 1148 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1149 /* no need to call tx completion for NULL pointers */ 1150 if (skb == NULL) 1151 continue; 1152 1153 __skb_queue_tail(&list, skb); 1154 } 1155 1156 while ((skb = __skb_dequeue(&list))) 1157 ath10k_htc_tx_completion_handler(ar, skb); 1158 } 1159 1160 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, 1161 void (*callback)(struct ath10k *ar, 1162 struct sk_buff *skb)) 1163 { 1164 struct ath10k *ar = ce_state->ar; 1165 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1166 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1167 struct sk_buff *skb; 1168 struct sk_buff_head list; 1169 void *transfer_context; 1170 u32 ce_data; 1171 unsigned int nbytes, max_nbytes; 1172 unsigned int transfer_id; 1173 unsigned int flags; 1174 1175 __skb_queue_head_init(&list); 1176 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1177 &ce_data, &nbytes, &transfer_id, 1178 &flags) == 0) { 1179 skb = transfer_context; 1180 max_nbytes = skb->len + skb_tailroom(skb); 1181 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1182 max_nbytes, DMA_FROM_DEVICE); 1183 1184 if (unlikely(max_nbytes < nbytes)) { 1185 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1186 nbytes, max_nbytes); 1187 dev_kfree_skb_any(skb); 1188 continue; 1189 } 1190 1191 skb_put(skb, nbytes); 1192 __skb_queue_tail(&list, skb); 1193 } 1194 1195 while ((skb = __skb_dequeue(&list))) { 1196 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1197 ce_state->id, skb->len); 1198 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1199 skb->data, skb->len); 1200 1201 callback(ar, skb); 1202 } 1203 1204 ath10k_pci_rx_post_pipe(pipe_info); 1205 } 1206 1207 /* Called by lower (CE) layer when data is received from the Target. */ 1208 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1209 { 1210 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1211 } 1212 1213 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1214 { 1215 /* CE4 polling needs to be done whenever CE pipe which transports 1216 * HTT Rx (target->host) is processed. 1217 */ 1218 ath10k_ce_per_engine_service(ce_state->ar, 4); 1219 1220 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1221 } 1222 1223 /* Called by lower (CE) layer when data is received from the Target. 1224 * Only 10.4 firmware uses separate CE to transfer pktlog data. 1225 */ 1226 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) 1227 { 1228 ath10k_pci_process_rx_cb(ce_state, 1229 ath10k_htt_rx_pktlog_completion_handler); 1230 } 1231 1232 /* Called by lower (CE) layer when a send to HTT Target completes. */ 1233 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1234 { 1235 struct ath10k *ar = ce_state->ar; 1236 struct sk_buff *skb; 1237 1238 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1239 /* no need to call tx completion for NULL pointers */ 1240 if (!skb) 1241 continue; 1242 1243 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1244 skb->len, DMA_TO_DEVICE); 1245 ath10k_htt_hif_tx_complete(ar, skb); 1246 } 1247 } 1248 1249 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) 1250 { 1251 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 1252 ath10k_htt_t2h_msg_handler(ar, skb); 1253 } 1254 1255 /* Called by lower (CE) layer when HTT data is received from the Target. */ 1256 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) 1257 { 1258 /* CE4 polling needs to be done whenever CE pipe which transports 1259 * HTT Rx (target->host) is processed. 1260 */ 1261 ath10k_ce_per_engine_service(ce_state->ar, 4); 1262 1263 ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1264 } 1265 1266 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1267 struct ath10k_hif_sg_item *items, int n_items) 1268 { 1269 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1270 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1271 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1272 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1273 unsigned int nentries_mask; 1274 unsigned int sw_index; 1275 unsigned int write_index; 1276 int err, i = 0; 1277 1278 spin_lock_bh(&ar_pci->ce_lock); 1279 1280 nentries_mask = src_ring->nentries_mask; 1281 sw_index = src_ring->sw_index; 1282 write_index = src_ring->write_index; 1283 1284 if (unlikely(CE_RING_DELTA(nentries_mask, 1285 write_index, sw_index - 1) < n_items)) { 1286 err = -ENOBUFS; 1287 goto err; 1288 } 1289 1290 for (i = 0; i < n_items - 1; i++) { 1291 ath10k_dbg(ar, ATH10K_DBG_PCI, 1292 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1293 i, items[i].paddr, items[i].len, n_items); 1294 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1295 items[i].vaddr, items[i].len); 1296 1297 err = ath10k_ce_send_nolock(ce_pipe, 1298 items[i].transfer_context, 1299 items[i].paddr, 1300 items[i].len, 1301 items[i].transfer_id, 1302 CE_SEND_FLAG_GATHER); 1303 if (err) 1304 goto err; 1305 } 1306 1307 /* `i` is equal to `n_items -1` after for() */ 1308 1309 ath10k_dbg(ar, ATH10K_DBG_PCI, 1310 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1311 i, items[i].paddr, items[i].len, n_items); 1312 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1313 items[i].vaddr, items[i].len); 1314 1315 err = ath10k_ce_send_nolock(ce_pipe, 1316 items[i].transfer_context, 1317 items[i].paddr, 1318 items[i].len, 1319 items[i].transfer_id, 1320 0); 1321 if (err) 1322 goto err; 1323 1324 spin_unlock_bh(&ar_pci->ce_lock); 1325 return 0; 1326 1327 err: 1328 for (; i > 0; i--) 1329 __ath10k_ce_send_revert(ce_pipe); 1330 1331 spin_unlock_bh(&ar_pci->ce_lock); 1332 return err; 1333 } 1334 1335 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1336 size_t buf_len) 1337 { 1338 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1339 } 1340 1341 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1342 { 1343 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1344 1345 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1346 1347 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1348 } 1349 1350 static void ath10k_pci_dump_registers(struct ath10k *ar, 1351 struct ath10k_fw_crash_data *crash_data) 1352 { 1353 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1354 int i, ret; 1355 1356 lockdep_assert_held(&ar->data_lock); 1357 1358 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1359 hi_failure_state, 1360 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1361 if (ret) { 1362 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1363 return; 1364 } 1365 1366 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1367 1368 ath10k_err(ar, "firmware register dump:\n"); 1369 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1370 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1371 i, 1372 __le32_to_cpu(reg_dump_values[i]), 1373 __le32_to_cpu(reg_dump_values[i + 1]), 1374 __le32_to_cpu(reg_dump_values[i + 2]), 1375 __le32_to_cpu(reg_dump_values[i + 3])); 1376 1377 if (!crash_data) 1378 return; 1379 1380 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1381 crash_data->registers[i] = reg_dump_values[i]; 1382 } 1383 1384 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1385 { 1386 struct ath10k_fw_crash_data *crash_data; 1387 char uuid[50]; 1388 1389 spin_lock_bh(&ar->data_lock); 1390 1391 ar->stats.fw_crash_counter++; 1392 1393 crash_data = ath10k_debug_get_new_fw_crash_data(ar); 1394 1395 if (crash_data) 1396 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); 1397 else 1398 scnprintf(uuid, sizeof(uuid), "n/a"); 1399 1400 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); 1401 ath10k_print_driver_info(ar); 1402 ath10k_pci_dump_registers(ar, crash_data); 1403 1404 spin_unlock_bh(&ar->data_lock); 1405 1406 queue_work(ar->workqueue, &ar->restart_work); 1407 } 1408 1409 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1410 int force) 1411 { 1412 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1413 1414 if (!force) { 1415 int resources; 1416 /* 1417 * Decide whether to actually poll for completions, or just 1418 * wait for a later chance. 1419 * If there seem to be plenty of resources left, then just wait 1420 * since checking involves reading a CE register, which is a 1421 * relatively expensive operation. 1422 */ 1423 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1424 1425 /* 1426 * If at least 50% of the total resources are still available, 1427 * don't bother checking again yet. 1428 */ 1429 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) 1430 return; 1431 } 1432 ath10k_ce_per_engine_service(ar, pipe); 1433 } 1434 1435 static void ath10k_pci_kill_tasklet(struct ath10k *ar) 1436 { 1437 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1438 int i; 1439 1440 tasklet_kill(&ar_pci->intr_tq); 1441 tasklet_kill(&ar_pci->msi_fw_err); 1442 1443 for (i = 0; i < CE_COUNT; i++) 1444 tasklet_kill(&ar_pci->pipe_info[i].intr); 1445 1446 del_timer_sync(&ar_pci->rx_post_retry); 1447 } 1448 1449 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1450 u8 *ul_pipe, u8 *dl_pipe) 1451 { 1452 const struct service_to_pipe *entry; 1453 bool ul_set = false, dl_set = false; 1454 int i; 1455 1456 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1457 1458 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 1459 entry = &target_service_to_ce_map_wlan[i]; 1460 1461 if (__le32_to_cpu(entry->service_id) != service_id) 1462 continue; 1463 1464 switch (__le32_to_cpu(entry->pipedir)) { 1465 case PIPEDIR_NONE: 1466 break; 1467 case PIPEDIR_IN: 1468 WARN_ON(dl_set); 1469 *dl_pipe = __le32_to_cpu(entry->pipenum); 1470 dl_set = true; 1471 break; 1472 case PIPEDIR_OUT: 1473 WARN_ON(ul_set); 1474 *ul_pipe = __le32_to_cpu(entry->pipenum); 1475 ul_set = true; 1476 break; 1477 case PIPEDIR_INOUT: 1478 WARN_ON(dl_set); 1479 WARN_ON(ul_set); 1480 *dl_pipe = __le32_to_cpu(entry->pipenum); 1481 *ul_pipe = __le32_to_cpu(entry->pipenum); 1482 dl_set = true; 1483 ul_set = true; 1484 break; 1485 } 1486 } 1487 1488 if (WARN_ON(!ul_set || !dl_set)) 1489 return -ENOENT; 1490 1491 return 0; 1492 } 1493 1494 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1495 u8 *ul_pipe, u8 *dl_pipe) 1496 { 1497 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1498 1499 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1500 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1501 ul_pipe, dl_pipe); 1502 } 1503 1504 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1505 { 1506 u32 val; 1507 1508 switch (ar->hw_rev) { 1509 case ATH10K_HW_QCA988X: 1510 case ATH10K_HW_QCA6174: 1511 case ATH10K_HW_QCA9377: 1512 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1513 CORE_CTRL_ADDRESS); 1514 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1515 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1516 CORE_CTRL_ADDRESS, val); 1517 break; 1518 case ATH10K_HW_QCA99X0: 1519 /* TODO: Find appropriate register configuration for QCA99X0 1520 * to mask irq/MSI. 1521 */ 1522 break; 1523 } 1524 } 1525 1526 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1527 { 1528 u32 val; 1529 1530 switch (ar->hw_rev) { 1531 case ATH10K_HW_QCA988X: 1532 case ATH10K_HW_QCA6174: 1533 case ATH10K_HW_QCA9377: 1534 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1535 CORE_CTRL_ADDRESS); 1536 val |= CORE_CTRL_PCIE_REG_31_MASK; 1537 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1538 CORE_CTRL_ADDRESS, val); 1539 break; 1540 case ATH10K_HW_QCA99X0: 1541 /* TODO: Find appropriate register configuration for QCA99X0 1542 * to unmask irq/MSI. 1543 */ 1544 break; 1545 } 1546 } 1547 1548 static void ath10k_pci_irq_disable(struct ath10k *ar) 1549 { 1550 ath10k_ce_disable_interrupts(ar); 1551 ath10k_pci_disable_and_clear_legacy_irq(ar); 1552 ath10k_pci_irq_msi_fw_mask(ar); 1553 } 1554 1555 static void ath10k_pci_irq_sync(struct ath10k *ar) 1556 { 1557 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1558 int i; 1559 1560 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 1561 synchronize_irq(ar_pci->pdev->irq + i); 1562 } 1563 1564 static void ath10k_pci_irq_enable(struct ath10k *ar) 1565 { 1566 ath10k_ce_enable_interrupts(ar); 1567 ath10k_pci_enable_legacy_irq(ar); 1568 ath10k_pci_irq_msi_fw_unmask(ar); 1569 } 1570 1571 static int ath10k_pci_hif_start(struct ath10k *ar) 1572 { 1573 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1574 1575 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1576 1577 ath10k_pci_irq_enable(ar); 1578 ath10k_pci_rx_post(ar); 1579 1580 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1581 ar_pci->link_ctl); 1582 1583 return 0; 1584 } 1585 1586 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1587 { 1588 struct ath10k *ar; 1589 struct ath10k_ce_pipe *ce_pipe; 1590 struct ath10k_ce_ring *ce_ring; 1591 struct sk_buff *skb; 1592 int i; 1593 1594 ar = pci_pipe->hif_ce_state; 1595 ce_pipe = pci_pipe->ce_hdl; 1596 ce_ring = ce_pipe->dest_ring; 1597 1598 if (!ce_ring) 1599 return; 1600 1601 if (!pci_pipe->buf_sz) 1602 return; 1603 1604 for (i = 0; i < ce_ring->nentries; i++) { 1605 skb = ce_ring->per_transfer_context[i]; 1606 if (!skb) 1607 continue; 1608 1609 ce_ring->per_transfer_context[i] = NULL; 1610 1611 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1612 skb->len + skb_tailroom(skb), 1613 DMA_FROM_DEVICE); 1614 dev_kfree_skb_any(skb); 1615 } 1616 } 1617 1618 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1619 { 1620 struct ath10k *ar; 1621 struct ath10k_pci *ar_pci; 1622 struct ath10k_ce_pipe *ce_pipe; 1623 struct ath10k_ce_ring *ce_ring; 1624 struct sk_buff *skb; 1625 int i; 1626 1627 ar = pci_pipe->hif_ce_state; 1628 ar_pci = ath10k_pci_priv(ar); 1629 ce_pipe = pci_pipe->ce_hdl; 1630 ce_ring = ce_pipe->src_ring; 1631 1632 if (!ce_ring) 1633 return; 1634 1635 if (!pci_pipe->buf_sz) 1636 return; 1637 1638 for (i = 0; i < ce_ring->nentries; i++) { 1639 skb = ce_ring->per_transfer_context[i]; 1640 if (!skb) 1641 continue; 1642 1643 ce_ring->per_transfer_context[i] = NULL; 1644 1645 ath10k_htc_tx_completion_handler(ar, skb); 1646 } 1647 } 1648 1649 /* 1650 * Cleanup residual buffers for device shutdown: 1651 * buffers that were enqueued for receive 1652 * buffers that were to be sent 1653 * Note: Buffers that had completed but which were 1654 * not yet processed are on a completion queue. They 1655 * are handled when the completion thread shuts down. 1656 */ 1657 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 1658 { 1659 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1660 int pipe_num; 1661 1662 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1663 struct ath10k_pci_pipe *pipe_info; 1664 1665 pipe_info = &ar_pci->pipe_info[pipe_num]; 1666 ath10k_pci_rx_pipe_cleanup(pipe_info); 1667 ath10k_pci_tx_pipe_cleanup(pipe_info); 1668 } 1669 } 1670 1671 static void ath10k_pci_ce_deinit(struct ath10k *ar) 1672 { 1673 int i; 1674 1675 for (i = 0; i < CE_COUNT; i++) 1676 ath10k_ce_deinit_pipe(ar, i); 1677 } 1678 1679 static void ath10k_pci_flush(struct ath10k *ar) 1680 { 1681 ath10k_pci_kill_tasklet(ar); 1682 ath10k_pci_buffer_cleanup(ar); 1683 } 1684 1685 static void ath10k_pci_hif_stop(struct ath10k *ar) 1686 { 1687 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1688 unsigned long flags; 1689 1690 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 1691 1692 /* Most likely the device has HTT Rx ring configured. The only way to 1693 * prevent the device from accessing (and possible corrupting) host 1694 * memory is to reset the chip now. 1695 * 1696 * There's also no known way of masking MSI interrupts on the device. 1697 * For ranged MSI the CE-related interrupts can be masked. However 1698 * regardless how many MSI interrupts are assigned the first one 1699 * is always used for firmware indications (crashes) and cannot be 1700 * masked. To prevent the device from asserting the interrupt reset it 1701 * before proceeding with cleanup. 1702 */ 1703 ath10k_pci_safe_chip_reset(ar); 1704 1705 ath10k_pci_irq_disable(ar); 1706 ath10k_pci_irq_sync(ar); 1707 ath10k_pci_flush(ar); 1708 1709 spin_lock_irqsave(&ar_pci->ps_lock, flags); 1710 WARN_ON(ar_pci->ps_wake_refcount > 0); 1711 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 1712 } 1713 1714 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1715 void *req, u32 req_len, 1716 void *resp, u32 *resp_len) 1717 { 1718 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1719 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 1720 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 1721 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 1722 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 1723 dma_addr_t req_paddr = 0; 1724 dma_addr_t resp_paddr = 0; 1725 struct bmi_xfer xfer = {}; 1726 void *treq, *tresp = NULL; 1727 int ret = 0; 1728 1729 might_sleep(); 1730 1731 if (resp && !resp_len) 1732 return -EINVAL; 1733 1734 if (resp && resp_len && *resp_len == 0) 1735 return -EINVAL; 1736 1737 treq = kmemdup(req, req_len, GFP_KERNEL); 1738 if (!treq) 1739 return -ENOMEM; 1740 1741 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 1742 ret = dma_mapping_error(ar->dev, req_paddr); 1743 if (ret) { 1744 ret = -EIO; 1745 goto err_dma; 1746 } 1747 1748 if (resp && resp_len) { 1749 tresp = kzalloc(*resp_len, GFP_KERNEL); 1750 if (!tresp) { 1751 ret = -ENOMEM; 1752 goto err_req; 1753 } 1754 1755 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 1756 DMA_FROM_DEVICE); 1757 ret = dma_mapping_error(ar->dev, resp_paddr); 1758 if (ret) { 1759 ret = EIO; 1760 goto err_req; 1761 } 1762 1763 xfer.wait_for_resp = true; 1764 xfer.resp_len = 0; 1765 1766 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 1767 } 1768 1769 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 1770 if (ret) 1771 goto err_resp; 1772 1773 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); 1774 if (ret) { 1775 u32 unused_buffer; 1776 unsigned int unused_nbytes; 1777 unsigned int unused_id; 1778 1779 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 1780 &unused_nbytes, &unused_id); 1781 } else { 1782 /* non-zero means we did not time out */ 1783 ret = 0; 1784 } 1785 1786 err_resp: 1787 if (resp) { 1788 u32 unused_buffer; 1789 1790 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 1791 dma_unmap_single(ar->dev, resp_paddr, 1792 *resp_len, DMA_FROM_DEVICE); 1793 } 1794 err_req: 1795 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 1796 1797 if (ret == 0 && resp_len) { 1798 *resp_len = min(*resp_len, xfer.resp_len); 1799 memcpy(resp, tresp, xfer.resp_len); 1800 } 1801 err_dma: 1802 kfree(treq); 1803 kfree(tresp); 1804 1805 return ret; 1806 } 1807 1808 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 1809 { 1810 struct bmi_xfer *xfer; 1811 1812 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) 1813 return; 1814 1815 xfer->tx_done = true; 1816 } 1817 1818 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 1819 { 1820 struct ath10k *ar = ce_state->ar; 1821 struct bmi_xfer *xfer; 1822 u32 ce_data; 1823 unsigned int nbytes; 1824 unsigned int transfer_id; 1825 unsigned int flags; 1826 1827 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, 1828 &nbytes, &transfer_id, &flags)) 1829 return; 1830 1831 if (WARN_ON_ONCE(!xfer)) 1832 return; 1833 1834 if (!xfer->wait_for_resp) { 1835 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 1836 return; 1837 } 1838 1839 xfer->resp_len = nbytes; 1840 xfer->rx_done = true; 1841 } 1842 1843 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 1844 struct ath10k_ce_pipe *rx_pipe, 1845 struct bmi_xfer *xfer) 1846 { 1847 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1848 1849 while (time_before_eq(jiffies, timeout)) { 1850 ath10k_pci_bmi_send_done(tx_pipe); 1851 ath10k_pci_bmi_recv_data(rx_pipe); 1852 1853 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) 1854 return 0; 1855 1856 schedule(); 1857 } 1858 1859 return -ETIMEDOUT; 1860 } 1861 1862 /* 1863 * Send an interrupt to the device to wake up the Target CPU 1864 * so it has an opportunity to notice any changed state. 1865 */ 1866 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 1867 { 1868 u32 addr, val; 1869 1870 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; 1871 val = ath10k_pci_read32(ar, addr); 1872 val |= CORE_CTRL_CPU_INTR_MASK; 1873 ath10k_pci_write32(ar, addr, val); 1874 1875 return 0; 1876 } 1877 1878 static int ath10k_pci_get_num_banks(struct ath10k *ar) 1879 { 1880 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1881 1882 switch (ar_pci->pdev->device) { 1883 case QCA988X_2_0_DEVICE_ID: 1884 case QCA99X0_2_0_DEVICE_ID: 1885 return 1; 1886 case QCA6164_2_1_DEVICE_ID: 1887 case QCA6174_2_1_DEVICE_ID: 1888 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { 1889 case QCA6174_HW_1_0_CHIP_ID_REV: 1890 case QCA6174_HW_1_1_CHIP_ID_REV: 1891 case QCA6174_HW_2_1_CHIP_ID_REV: 1892 case QCA6174_HW_2_2_CHIP_ID_REV: 1893 return 3; 1894 case QCA6174_HW_1_3_CHIP_ID_REV: 1895 return 2; 1896 case QCA6174_HW_3_0_CHIP_ID_REV: 1897 case QCA6174_HW_3_1_CHIP_ID_REV: 1898 case QCA6174_HW_3_2_CHIP_ID_REV: 1899 return 9; 1900 } 1901 break; 1902 case QCA9377_1_0_DEVICE_ID: 1903 return 2; 1904 } 1905 1906 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 1907 return 1; 1908 } 1909 1910 static int ath10k_pci_init_config(struct ath10k *ar) 1911 { 1912 u32 interconnect_targ_addr; 1913 u32 pcie_state_targ_addr = 0; 1914 u32 pipe_cfg_targ_addr = 0; 1915 u32 svc_to_pipe_map = 0; 1916 u32 pcie_config_flags = 0; 1917 u32 ealloc_value; 1918 u32 ealloc_targ_addr; 1919 u32 flag2_value; 1920 u32 flag2_targ_addr; 1921 int ret = 0; 1922 1923 /* Download to Target the CE Config and the service-to-CE map */ 1924 interconnect_targ_addr = 1925 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 1926 1927 /* Supply Target-side CE configuration */ 1928 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 1929 &pcie_state_targ_addr); 1930 if (ret != 0) { 1931 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 1932 return ret; 1933 } 1934 1935 if (pcie_state_targ_addr == 0) { 1936 ret = -EIO; 1937 ath10k_err(ar, "Invalid pcie state addr\n"); 1938 return ret; 1939 } 1940 1941 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1942 offsetof(struct pcie_state, 1943 pipe_cfg_addr)), 1944 &pipe_cfg_targ_addr); 1945 if (ret != 0) { 1946 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 1947 return ret; 1948 } 1949 1950 if (pipe_cfg_targ_addr == 0) { 1951 ret = -EIO; 1952 ath10k_err(ar, "Invalid pipe cfg addr\n"); 1953 return ret; 1954 } 1955 1956 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 1957 target_ce_config_wlan, 1958 sizeof(struct ce_pipe_config) * 1959 NUM_TARGET_CE_CONFIG_WLAN); 1960 1961 if (ret != 0) { 1962 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 1963 return ret; 1964 } 1965 1966 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1967 offsetof(struct pcie_state, 1968 svc_to_pipe_map)), 1969 &svc_to_pipe_map); 1970 if (ret != 0) { 1971 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 1972 return ret; 1973 } 1974 1975 if (svc_to_pipe_map == 0) { 1976 ret = -EIO; 1977 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 1978 return ret; 1979 } 1980 1981 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 1982 target_service_to_ce_map_wlan, 1983 sizeof(target_service_to_ce_map_wlan)); 1984 if (ret != 0) { 1985 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 1986 return ret; 1987 } 1988 1989 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1990 offsetof(struct pcie_state, 1991 config_flags)), 1992 &pcie_config_flags); 1993 if (ret != 0) { 1994 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 1995 return ret; 1996 } 1997 1998 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1999 2000 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 2001 offsetof(struct pcie_state, 2002 config_flags)), 2003 pcie_config_flags); 2004 if (ret != 0) { 2005 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 2006 return ret; 2007 } 2008 2009 /* configure early allocation */ 2010 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 2011 2012 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 2013 if (ret != 0) { 2014 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); 2015 return ret; 2016 } 2017 2018 /* first bank is switched to IRAM */ 2019 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2020 HI_EARLY_ALLOC_MAGIC_MASK); 2021 ealloc_value |= ((ath10k_pci_get_num_banks(ar) << 2022 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2023 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2024 2025 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 2026 if (ret != 0) { 2027 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 2028 return ret; 2029 } 2030 2031 /* Tell Target to proceed with initialization */ 2032 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 2033 2034 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 2035 if (ret != 0) { 2036 ath10k_err(ar, "Failed to get option val: %d\n", ret); 2037 return ret; 2038 } 2039 2040 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 2041 2042 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 2043 if (ret != 0) { 2044 ath10k_err(ar, "Failed to set option val: %d\n", ret); 2045 return ret; 2046 } 2047 2048 return 0; 2049 } 2050 2051 static void ath10k_pci_override_ce_config(struct ath10k *ar) 2052 { 2053 struct ce_attr *attr; 2054 struct ce_pipe_config *config; 2055 2056 /* For QCA6174 we're overriding the Copy Engine 5 configuration, 2057 * since it is currently used for other feature. 2058 */ 2059 2060 /* Override Host's Copy Engine 5 configuration */ 2061 attr = &host_ce_config_wlan[5]; 2062 attr->src_sz_max = 0; 2063 attr->dest_nentries = 0; 2064 2065 /* Override Target firmware's Copy Engine configuration */ 2066 config = &target_ce_config_wlan[5]; 2067 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); 2068 config->nbytes_max = __cpu_to_le32(2048); 2069 2070 /* Map from service/endpoint to Copy Engine */ 2071 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); 2072 } 2073 2074 static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2075 { 2076 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2077 struct ath10k_pci_pipe *pipe; 2078 int i, ret; 2079 2080 for (i = 0; i < CE_COUNT; i++) { 2081 pipe = &ar_pci->pipe_info[i]; 2082 pipe->ce_hdl = &ar_pci->ce_states[i]; 2083 pipe->pipe_num = i; 2084 pipe->hif_ce_state = ar; 2085 2086 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); 2087 if (ret) { 2088 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2089 i, ret); 2090 return ret; 2091 } 2092 2093 /* Last CE is Diagnostic Window */ 2094 if (i == CE_DIAG_PIPE) { 2095 ar_pci->ce_diag = pipe->ce_hdl; 2096 continue; 2097 } 2098 2099 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); 2100 } 2101 2102 return 0; 2103 } 2104 2105 static void ath10k_pci_free_pipes(struct ath10k *ar) 2106 { 2107 int i; 2108 2109 for (i = 0; i < CE_COUNT; i++) 2110 ath10k_ce_free_pipe(ar, i); 2111 } 2112 2113 static int ath10k_pci_init_pipes(struct ath10k *ar) 2114 { 2115 int i, ret; 2116 2117 for (i = 0; i < CE_COUNT; i++) { 2118 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); 2119 if (ret) { 2120 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2121 i, ret); 2122 return ret; 2123 } 2124 } 2125 2126 return 0; 2127 } 2128 2129 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2130 { 2131 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2132 FW_IND_EVENT_PENDING; 2133 } 2134 2135 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2136 { 2137 u32 val; 2138 2139 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2140 val &= ~FW_IND_EVENT_PENDING; 2141 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2142 } 2143 2144 /* this function effectively clears target memory controller assert line */ 2145 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2146 { 2147 u32 val; 2148 2149 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2150 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2151 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2152 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2153 2154 msleep(10); 2155 2156 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2157 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2158 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2159 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2160 2161 msleep(10); 2162 } 2163 2164 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2165 { 2166 u32 val; 2167 2168 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2169 2170 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2171 SOC_RESET_CONTROL_ADDRESS); 2172 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2173 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2174 } 2175 2176 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2177 { 2178 u32 val; 2179 2180 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2181 SOC_RESET_CONTROL_ADDRESS); 2182 2183 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2184 val | SOC_RESET_CONTROL_CE_RST_MASK); 2185 msleep(10); 2186 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2187 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2188 } 2189 2190 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2191 { 2192 u32 val; 2193 2194 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2195 SOC_LF_TIMER_CONTROL0_ADDRESS); 2196 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + 2197 SOC_LF_TIMER_CONTROL0_ADDRESS, 2198 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2199 } 2200 2201 static int ath10k_pci_warm_reset(struct ath10k *ar) 2202 { 2203 int ret; 2204 2205 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2206 2207 spin_lock_bh(&ar->data_lock); 2208 ar->stats.fw_warm_reset_counter++; 2209 spin_unlock_bh(&ar->data_lock); 2210 2211 ath10k_pci_irq_disable(ar); 2212 2213 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2214 * were to access copy engine while host performs copy engine reset 2215 * then it is possible for the device to confuse pci-e controller to 2216 * the point of bringing host system to a complete stop (i.e. hang). 2217 */ 2218 ath10k_pci_warm_reset_si0(ar); 2219 ath10k_pci_warm_reset_cpu(ar); 2220 ath10k_pci_init_pipes(ar); 2221 ath10k_pci_wait_for_target_init(ar); 2222 2223 ath10k_pci_warm_reset_clear_lf(ar); 2224 ath10k_pci_warm_reset_ce(ar); 2225 ath10k_pci_warm_reset_cpu(ar); 2226 ath10k_pci_init_pipes(ar); 2227 2228 ret = ath10k_pci_wait_for_target_init(ar); 2229 if (ret) { 2230 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2231 return ret; 2232 } 2233 2234 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2235 2236 return 0; 2237 } 2238 2239 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2240 { 2241 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { 2242 return ath10k_pci_warm_reset(ar); 2243 } else if (QCA_REV_99X0(ar)) { 2244 ath10k_pci_irq_disable(ar); 2245 return ath10k_pci_qca99x0_chip_reset(ar); 2246 } else { 2247 return -ENOTSUPP; 2248 } 2249 } 2250 2251 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2252 { 2253 int i, ret; 2254 u32 val; 2255 2256 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2257 2258 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2259 * It is thus preferred to use warm reset which is safer but may not be 2260 * able to recover the device from all possible fail scenarios. 2261 * 2262 * Warm reset doesn't always work on first try so attempt it a few 2263 * times before giving up. 2264 */ 2265 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2266 ret = ath10k_pci_warm_reset(ar); 2267 if (ret) { 2268 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2269 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2270 ret); 2271 continue; 2272 } 2273 2274 /* FIXME: Sometimes copy engine doesn't recover after warm 2275 * reset. In most cases this needs cold reset. In some of these 2276 * cases the device is in such a state that a cold reset may 2277 * lock up the host. 2278 * 2279 * Reading any host interest register via copy engine is 2280 * sufficient to verify if device is capable of booting 2281 * firmware blob. 2282 */ 2283 ret = ath10k_pci_init_pipes(ar); 2284 if (ret) { 2285 ath10k_warn(ar, "failed to init copy engine: %d\n", 2286 ret); 2287 continue; 2288 } 2289 2290 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2291 &val); 2292 if (ret) { 2293 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2294 ret); 2295 continue; 2296 } 2297 2298 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2299 return 0; 2300 } 2301 2302 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2303 ath10k_warn(ar, "refusing cold reset as requested\n"); 2304 return -EPERM; 2305 } 2306 2307 ret = ath10k_pci_cold_reset(ar); 2308 if (ret) { 2309 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2310 return ret; 2311 } 2312 2313 ret = ath10k_pci_wait_for_target_init(ar); 2314 if (ret) { 2315 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2316 ret); 2317 return ret; 2318 } 2319 2320 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2321 2322 return 0; 2323 } 2324 2325 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2326 { 2327 int ret; 2328 2329 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2330 2331 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2332 2333 ret = ath10k_pci_cold_reset(ar); 2334 if (ret) { 2335 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2336 return ret; 2337 } 2338 2339 ret = ath10k_pci_wait_for_target_init(ar); 2340 if (ret) { 2341 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2342 ret); 2343 return ret; 2344 } 2345 2346 ret = ath10k_pci_warm_reset(ar); 2347 if (ret) { 2348 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2349 return ret; 2350 } 2351 2352 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2353 2354 return 0; 2355 } 2356 2357 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2358 { 2359 int ret; 2360 2361 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2362 2363 ret = ath10k_pci_cold_reset(ar); 2364 if (ret) { 2365 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2366 return ret; 2367 } 2368 2369 ret = ath10k_pci_wait_for_target_init(ar); 2370 if (ret) { 2371 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2372 ret); 2373 return ret; 2374 } 2375 2376 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2377 2378 return 0; 2379 } 2380 2381 static int ath10k_pci_chip_reset(struct ath10k *ar) 2382 { 2383 if (QCA_REV_988X(ar)) 2384 return ath10k_pci_qca988x_chip_reset(ar); 2385 else if (QCA_REV_6174(ar)) 2386 return ath10k_pci_qca6174_chip_reset(ar); 2387 else if (QCA_REV_9377(ar)) 2388 return ath10k_pci_qca6174_chip_reset(ar); 2389 else if (QCA_REV_99X0(ar)) 2390 return ath10k_pci_qca99x0_chip_reset(ar); 2391 else 2392 return -ENOTSUPP; 2393 } 2394 2395 static int ath10k_pci_hif_power_up(struct ath10k *ar) 2396 { 2397 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2398 int ret; 2399 2400 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2401 2402 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2403 &ar_pci->link_ctl); 2404 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2405 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 2406 2407 /* 2408 * Bring the target up cleanly. 2409 * 2410 * The target may be in an undefined state with an AUX-powered Target 2411 * and a Host in WoW mode. If the Host crashes, loses power, or is 2412 * restarted (without unloading the driver) then the Target is left 2413 * (aux) powered and running. On a subsequent driver load, the Target 2414 * is in an unexpected state. We try to catch that here in order to 2415 * reset the Target and retry the probe. 2416 */ 2417 ret = ath10k_pci_chip_reset(ar); 2418 if (ret) { 2419 if (ath10k_pci_has_fw_crashed(ar)) { 2420 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2421 ath10k_pci_fw_crashed_clear(ar); 2422 ath10k_pci_fw_crashed_dump(ar); 2423 } 2424 2425 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2426 goto err_sleep; 2427 } 2428 2429 ret = ath10k_pci_init_pipes(ar); 2430 if (ret) { 2431 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2432 goto err_sleep; 2433 } 2434 2435 ret = ath10k_pci_init_config(ar); 2436 if (ret) { 2437 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2438 goto err_ce; 2439 } 2440 2441 ret = ath10k_pci_wake_target_cpu(ar); 2442 if (ret) { 2443 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2444 goto err_ce; 2445 } 2446 2447 return 0; 2448 2449 err_ce: 2450 ath10k_pci_ce_deinit(ar); 2451 2452 err_sleep: 2453 return ret; 2454 } 2455 2456 static void ath10k_pci_hif_power_down(struct ath10k *ar) 2457 { 2458 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2459 2460 /* Currently hif_power_up performs effectively a reset and hif_stop 2461 * resets the chip as well so there's no point in resetting here. 2462 */ 2463 } 2464 2465 #ifdef CONFIG_PM 2466 2467 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2468 { 2469 /* The grace timer can still be counting down and ar->ps_awake be true. 2470 * It is known that the device may be asleep after resuming regardless 2471 * of the SoC powersave state before suspending. Hence make sure the 2472 * device is asleep before proceeding. 2473 */ 2474 ath10k_pci_sleep_sync(ar); 2475 2476 return 0; 2477 } 2478 2479 static int ath10k_pci_hif_resume(struct ath10k *ar) 2480 { 2481 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2482 struct pci_dev *pdev = ar_pci->pdev; 2483 u32 val; 2484 int ret = 0; 2485 2486 ret = ath10k_pci_force_wake(ar); 2487 if (ret) { 2488 ath10k_err(ar, "failed to wake up target: %d\n", ret); 2489 return ret; 2490 } 2491 2492 /* Suspend/Resume resets the PCI configuration space, so we have to 2493 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2494 * from interfering with C3 CPU state. pci_restore_state won't help 2495 * here since it only restores the first 64 bytes pci config header. 2496 */ 2497 pci_read_config_dword(pdev, 0x40, &val); 2498 if ((val & 0x0000ff00) != 0) 2499 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2500 2501 return ret; 2502 } 2503 #endif 2504 2505 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2506 .tx_sg = ath10k_pci_hif_tx_sg, 2507 .diag_read = ath10k_pci_hif_diag_read, 2508 .diag_write = ath10k_pci_diag_write_mem, 2509 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2510 .start = ath10k_pci_hif_start, 2511 .stop = ath10k_pci_hif_stop, 2512 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 2513 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 2514 .send_complete_check = ath10k_pci_hif_send_complete_check, 2515 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 2516 .power_up = ath10k_pci_hif_power_up, 2517 .power_down = ath10k_pci_hif_power_down, 2518 .read32 = ath10k_pci_read32, 2519 .write32 = ath10k_pci_write32, 2520 #ifdef CONFIG_PM 2521 .suspend = ath10k_pci_hif_suspend, 2522 .resume = ath10k_pci_hif_resume, 2523 #endif 2524 }; 2525 2526 static void ath10k_pci_ce_tasklet(unsigned long ptr) 2527 { 2528 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; 2529 struct ath10k_pci *ar_pci = pipe->ar_pci; 2530 2531 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); 2532 } 2533 2534 static void ath10k_msi_err_tasklet(unsigned long data) 2535 { 2536 struct ath10k *ar = (struct ath10k *)data; 2537 2538 if (!ath10k_pci_has_fw_crashed(ar)) { 2539 ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); 2540 return; 2541 } 2542 2543 ath10k_pci_irq_disable(ar); 2544 ath10k_pci_fw_crashed_clear(ar); 2545 ath10k_pci_fw_crashed_dump(ar); 2546 } 2547 2548 /* 2549 * Handler for a per-engine interrupt on a PARTICULAR CE. 2550 * This is used in cases where each CE has a private MSI interrupt. 2551 */ 2552 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) 2553 { 2554 struct ath10k *ar = arg; 2555 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2556 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; 2557 2558 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { 2559 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, 2560 ce_id); 2561 return IRQ_HANDLED; 2562 } 2563 2564 /* 2565 * NOTE: We are able to derive ce_id from irq because we 2566 * use a one-to-one mapping for CE's 0..5. 2567 * CE's 6 & 7 do not use interrupts at all. 2568 * 2569 * This mapping must be kept in sync with the mapping 2570 * used by firmware. 2571 */ 2572 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); 2573 return IRQ_HANDLED; 2574 } 2575 2576 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) 2577 { 2578 struct ath10k *ar = arg; 2579 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2580 2581 tasklet_schedule(&ar_pci->msi_fw_err); 2582 return IRQ_HANDLED; 2583 } 2584 2585 /* 2586 * Top-level interrupt handler for all PCI interrupts from a Target. 2587 * When a block of MSI interrupts is allocated, this top-level handler 2588 * is not used; instead, we directly call the correct sub-handler. 2589 */ 2590 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 2591 { 2592 struct ath10k *ar = arg; 2593 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2594 int ret; 2595 2596 ret = ath10k_pci_force_wake(ar); 2597 if (ret) { 2598 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); 2599 return IRQ_NONE; 2600 } 2601 2602 if (ar_pci->num_msi_intrs == 0) { 2603 if (!ath10k_pci_irq_pending(ar)) 2604 return IRQ_NONE; 2605 2606 ath10k_pci_disable_and_clear_legacy_irq(ar); 2607 } 2608 2609 tasklet_schedule(&ar_pci->intr_tq); 2610 2611 return IRQ_HANDLED; 2612 } 2613 2614 static void ath10k_pci_tasklet(unsigned long data) 2615 { 2616 struct ath10k *ar = (struct ath10k *)data; 2617 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2618 2619 if (ath10k_pci_has_fw_crashed(ar)) { 2620 ath10k_pci_irq_disable(ar); 2621 ath10k_pci_fw_crashed_clear(ar); 2622 ath10k_pci_fw_crashed_dump(ar); 2623 return; 2624 } 2625 2626 ath10k_ce_per_engine_service_any(ar); 2627 2628 /* Re-enable legacy irq that was disabled in the irq handler */ 2629 if (ar_pci->num_msi_intrs == 0) 2630 ath10k_pci_enable_legacy_irq(ar); 2631 } 2632 2633 static int ath10k_pci_request_irq_msix(struct ath10k *ar) 2634 { 2635 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2636 int ret, i; 2637 2638 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, 2639 ath10k_pci_msi_fw_handler, 2640 IRQF_SHARED, "ath10k_pci", ar); 2641 if (ret) { 2642 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", 2643 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); 2644 return ret; 2645 } 2646 2647 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { 2648 ret = request_irq(ar_pci->pdev->irq + i, 2649 ath10k_pci_per_engine_handler, 2650 IRQF_SHARED, "ath10k_pci", ar); 2651 if (ret) { 2652 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", 2653 ar_pci->pdev->irq + i, ret); 2654 2655 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) 2656 free_irq(ar_pci->pdev->irq + i, ar); 2657 2658 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); 2659 return ret; 2660 } 2661 } 2662 2663 return 0; 2664 } 2665 2666 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 2667 { 2668 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2669 int ret; 2670 2671 ret = request_irq(ar_pci->pdev->irq, 2672 ath10k_pci_interrupt_handler, 2673 IRQF_SHARED, "ath10k_pci", ar); 2674 if (ret) { 2675 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 2676 ar_pci->pdev->irq, ret); 2677 return ret; 2678 } 2679 2680 return 0; 2681 } 2682 2683 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) 2684 { 2685 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2686 int ret; 2687 2688 ret = request_irq(ar_pci->pdev->irq, 2689 ath10k_pci_interrupt_handler, 2690 IRQF_SHARED, "ath10k_pci", ar); 2691 if (ret) { 2692 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 2693 ar_pci->pdev->irq, ret); 2694 return ret; 2695 } 2696 2697 return 0; 2698 } 2699 2700 static int ath10k_pci_request_irq(struct ath10k *ar) 2701 { 2702 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2703 2704 switch (ar_pci->num_msi_intrs) { 2705 case 0: 2706 return ath10k_pci_request_irq_legacy(ar); 2707 case 1: 2708 return ath10k_pci_request_irq_msi(ar); 2709 default: 2710 return ath10k_pci_request_irq_msix(ar); 2711 } 2712 } 2713 2714 static void ath10k_pci_free_irq(struct ath10k *ar) 2715 { 2716 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2717 int i; 2718 2719 /* There's at least one interrupt irregardless whether its legacy INTR 2720 * or MSI or MSI-X */ 2721 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 2722 free_irq(ar_pci->pdev->irq + i, ar); 2723 } 2724 2725 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) 2726 { 2727 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2728 int i; 2729 2730 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); 2731 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, 2732 (unsigned long)ar); 2733 2734 for (i = 0; i < CE_COUNT; i++) { 2735 ar_pci->pipe_info[i].ar_pci = ar_pci; 2736 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, 2737 (unsigned long)&ar_pci->pipe_info[i]); 2738 } 2739 } 2740 2741 static int ath10k_pci_init_irq(struct ath10k *ar) 2742 { 2743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2744 int ret; 2745 2746 ath10k_pci_init_irq_tasklets(ar); 2747 2748 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 2749 ath10k_info(ar, "limiting irq mode to: %d\n", 2750 ath10k_pci_irq_mode); 2751 2752 /* Try MSI-X */ 2753 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { 2754 ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1; 2755 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, 2756 ar_pci->num_msi_intrs); 2757 if (ret > 0) 2758 return 0; 2759 2760 /* fall-through */ 2761 } 2762 2763 /* Try MSI */ 2764 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { 2765 ar_pci->num_msi_intrs = 1; 2766 ret = pci_enable_msi(ar_pci->pdev); 2767 if (ret == 0) 2768 return 0; 2769 2770 /* fall-through */ 2771 } 2772 2773 /* Try legacy irq 2774 * 2775 * A potential race occurs here: The CORE_BASE write 2776 * depends on target correctly decoding AXI address but 2777 * host won't know when target writes BAR to CORE_CTRL. 2778 * This write might get lost if target has NOT written BAR. 2779 * For now, fix the race by repeating the write in below 2780 * synchronization checking. */ 2781 ar_pci->num_msi_intrs = 0; 2782 2783 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2784 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 2785 2786 return 0; 2787 } 2788 2789 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) 2790 { 2791 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2792 0); 2793 } 2794 2795 static int ath10k_pci_deinit_irq(struct ath10k *ar) 2796 { 2797 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2798 2799 switch (ar_pci->num_msi_intrs) { 2800 case 0: 2801 ath10k_pci_deinit_irq_legacy(ar); 2802 break; 2803 default: 2804 pci_disable_msi(ar_pci->pdev); 2805 break; 2806 } 2807 2808 return 0; 2809 } 2810 2811 static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2812 { 2813 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2814 unsigned long timeout; 2815 u32 val; 2816 2817 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 2818 2819 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 2820 2821 do { 2822 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2823 2824 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 2825 val); 2826 2827 /* target should never return this */ 2828 if (val == 0xffffffff) 2829 continue; 2830 2831 /* the device has crashed so don't bother trying anymore */ 2832 if (val & FW_IND_EVENT_PENDING) 2833 break; 2834 2835 if (val & FW_IND_INITIALIZED) 2836 break; 2837 2838 if (ar_pci->num_msi_intrs == 0) 2839 /* Fix potential race by repeating CORE_BASE writes */ 2840 ath10k_pci_enable_legacy_irq(ar); 2841 2842 mdelay(10); 2843 } while (time_before(jiffies, timeout)); 2844 2845 ath10k_pci_disable_and_clear_legacy_irq(ar); 2846 ath10k_pci_irq_msi_fw_mask(ar); 2847 2848 if (val == 0xffffffff) { 2849 ath10k_err(ar, "failed to read device register, device is gone\n"); 2850 return -EIO; 2851 } 2852 2853 if (val & FW_IND_EVENT_PENDING) { 2854 ath10k_warn(ar, "device has crashed during init\n"); 2855 return -ECOMM; 2856 } 2857 2858 if (!(val & FW_IND_INITIALIZED)) { 2859 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 2860 val); 2861 return -ETIMEDOUT; 2862 } 2863 2864 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 2865 return 0; 2866 } 2867 2868 static int ath10k_pci_cold_reset(struct ath10k *ar) 2869 { 2870 u32 val; 2871 2872 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 2873 2874 spin_lock_bh(&ar->data_lock); 2875 2876 ar->stats.fw_cold_reset_counter++; 2877 2878 spin_unlock_bh(&ar->data_lock); 2879 2880 /* Put Target, including PCIe, into RESET. */ 2881 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 2882 val |= 1; 2883 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2884 2885 /* After writing into SOC_GLOBAL_RESET to put device into 2886 * reset and pulling out of reset pcie may not be stable 2887 * for any immediate pcie register access and cause bus error, 2888 * add delay before any pcie access request to fix this issue. 2889 */ 2890 msleep(20); 2891 2892 /* Pull Target, including PCIe, out of RESET. */ 2893 val &= ~1; 2894 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2895 2896 msleep(20); 2897 2898 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 2899 2900 return 0; 2901 } 2902 2903 static int ath10k_pci_claim(struct ath10k *ar) 2904 { 2905 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2906 struct pci_dev *pdev = ar_pci->pdev; 2907 int ret; 2908 2909 pci_set_drvdata(pdev, ar); 2910 2911 ret = pci_enable_device(pdev); 2912 if (ret) { 2913 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 2914 return ret; 2915 } 2916 2917 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2918 if (ret) { 2919 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 2920 ret); 2921 goto err_device; 2922 } 2923 2924 /* Target expects 32 bit DMA. Enforce it. */ 2925 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2926 if (ret) { 2927 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 2928 goto err_region; 2929 } 2930 2931 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2932 if (ret) { 2933 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", 2934 ret); 2935 goto err_region; 2936 } 2937 2938 pci_set_master(pdev); 2939 2940 /* Arrange for access to Target SoC registers. */ 2941 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 2942 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 2943 if (!ar_pci->mem) { 2944 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 2945 ret = -EIO; 2946 goto err_master; 2947 } 2948 2949 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2950 return 0; 2951 2952 err_master: 2953 pci_clear_master(pdev); 2954 2955 err_region: 2956 pci_release_region(pdev, BAR_NUM); 2957 2958 err_device: 2959 pci_disable_device(pdev); 2960 2961 return ret; 2962 } 2963 2964 static void ath10k_pci_release(struct ath10k *ar) 2965 { 2966 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2967 struct pci_dev *pdev = ar_pci->pdev; 2968 2969 pci_iounmap(pdev, ar_pci->mem); 2970 pci_release_region(pdev, BAR_NUM); 2971 pci_clear_master(pdev); 2972 pci_disable_device(pdev); 2973 } 2974 2975 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 2976 { 2977 const struct ath10k_pci_supp_chip *supp_chip; 2978 int i; 2979 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 2980 2981 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 2982 supp_chip = &ath10k_pci_supp_chips[i]; 2983 2984 if (supp_chip->dev_id == dev_id && 2985 supp_chip->rev_id == rev_id) 2986 return true; 2987 } 2988 2989 return false; 2990 } 2991 2992 static int ath10k_pci_probe(struct pci_dev *pdev, 2993 const struct pci_device_id *pci_dev) 2994 { 2995 int ret = 0; 2996 struct ath10k *ar; 2997 struct ath10k_pci *ar_pci; 2998 enum ath10k_hw_rev hw_rev; 2999 u32 chip_id; 3000 bool pci_ps; 3001 3002 switch (pci_dev->device) { 3003 case QCA988X_2_0_DEVICE_ID: 3004 hw_rev = ATH10K_HW_QCA988X; 3005 pci_ps = false; 3006 break; 3007 case QCA6164_2_1_DEVICE_ID: 3008 case QCA6174_2_1_DEVICE_ID: 3009 hw_rev = ATH10K_HW_QCA6174; 3010 pci_ps = true; 3011 break; 3012 case QCA99X0_2_0_DEVICE_ID: 3013 hw_rev = ATH10K_HW_QCA99X0; 3014 pci_ps = false; 3015 break; 3016 case QCA9377_1_0_DEVICE_ID: 3017 hw_rev = ATH10K_HW_QCA9377; 3018 pci_ps = true; 3019 break; 3020 default: 3021 WARN_ON(1); 3022 return -ENOTSUPP; 3023 } 3024 3025 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 3026 hw_rev, &ath10k_pci_hif_ops); 3027 if (!ar) { 3028 dev_err(&pdev->dev, "failed to allocate core\n"); 3029 return -ENOMEM; 3030 } 3031 3032 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 3033 pdev->vendor, pdev->device, 3034 pdev->subsystem_vendor, pdev->subsystem_device); 3035 3036 ar_pci = ath10k_pci_priv(ar); 3037 ar_pci->pdev = pdev; 3038 ar_pci->dev = &pdev->dev; 3039 ar_pci->ar = ar; 3040 ar->dev_id = pci_dev->device; 3041 ar_pci->pci_ps = pci_ps; 3042 3043 ar->id.vendor = pdev->vendor; 3044 ar->id.device = pdev->device; 3045 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3046 ar->id.subsystem_device = pdev->subsystem_device; 3047 3048 spin_lock_init(&ar_pci->ce_lock); 3049 spin_lock_init(&ar_pci->ps_lock); 3050 3051 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 3052 (unsigned long)ar); 3053 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, 3054 (unsigned long)ar); 3055 3056 ret = ath10k_pci_claim(ar); 3057 if (ret) { 3058 ath10k_err(ar, "failed to claim device: %d\n", ret); 3059 goto err_core_destroy; 3060 } 3061 3062 if (QCA_REV_6174(ar)) 3063 ath10k_pci_override_ce_config(ar); 3064 3065 ret = ath10k_pci_alloc_pipes(ar); 3066 if (ret) { 3067 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3068 ret); 3069 goto err_sleep; 3070 } 3071 3072 ret = ath10k_pci_force_wake(ar); 3073 if (ret) { 3074 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3075 goto err_free_pipes; 3076 } 3077 3078 ath10k_pci_ce_deinit(ar); 3079 ath10k_pci_irq_disable(ar); 3080 3081 ret = ath10k_pci_init_irq(ar); 3082 if (ret) { 3083 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3084 goto err_free_pipes; 3085 } 3086 3087 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", 3088 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, 3089 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 3090 3091 ret = ath10k_pci_request_irq(ar); 3092 if (ret) { 3093 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 3094 goto err_deinit_irq; 3095 } 3096 3097 ret = ath10k_pci_chip_reset(ar); 3098 if (ret) { 3099 ath10k_err(ar, "failed to reset chip: %d\n", ret); 3100 goto err_free_irq; 3101 } 3102 3103 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3104 if (chip_id == 0xffffffff) { 3105 ath10k_err(ar, "failed to get chip id\n"); 3106 goto err_free_irq; 3107 } 3108 3109 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { 3110 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3111 pdev->device, chip_id); 3112 goto err_free_irq; 3113 } 3114 3115 ret = ath10k_core_register(ar, chip_id); 3116 if (ret) { 3117 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3118 goto err_free_irq; 3119 } 3120 3121 return 0; 3122 3123 err_free_irq: 3124 ath10k_pci_free_irq(ar); 3125 ath10k_pci_kill_tasklet(ar); 3126 3127 err_deinit_irq: 3128 ath10k_pci_deinit_irq(ar); 3129 3130 err_free_pipes: 3131 ath10k_pci_free_pipes(ar); 3132 3133 err_sleep: 3134 ath10k_pci_sleep_sync(ar); 3135 ath10k_pci_release(ar); 3136 3137 err_core_destroy: 3138 ath10k_core_destroy(ar); 3139 3140 return ret; 3141 } 3142 3143 static void ath10k_pci_remove(struct pci_dev *pdev) 3144 { 3145 struct ath10k *ar = pci_get_drvdata(pdev); 3146 struct ath10k_pci *ar_pci; 3147 3148 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3149 3150 if (!ar) 3151 return; 3152 3153 ar_pci = ath10k_pci_priv(ar); 3154 3155 if (!ar_pci) 3156 return; 3157 3158 ath10k_core_unregister(ar); 3159 ath10k_pci_free_irq(ar); 3160 ath10k_pci_kill_tasklet(ar); 3161 ath10k_pci_deinit_irq(ar); 3162 ath10k_pci_ce_deinit(ar); 3163 ath10k_pci_free_pipes(ar); 3164 ath10k_pci_sleep_sync(ar); 3165 ath10k_pci_release(ar); 3166 ath10k_core_destroy(ar); 3167 } 3168 3169 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3170 3171 static struct pci_driver ath10k_pci_driver = { 3172 .name = "ath10k_pci", 3173 .id_table = ath10k_pci_id_table, 3174 .probe = ath10k_pci_probe, 3175 .remove = ath10k_pci_remove, 3176 }; 3177 3178 static int __init ath10k_pci_init(void) 3179 { 3180 int ret; 3181 3182 ret = pci_register_driver(&ath10k_pci_driver); 3183 if (ret) 3184 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3185 ret); 3186 3187 return ret; 3188 } 3189 module_init(ath10k_pci_init); 3190 3191 static void __exit ath10k_pci_exit(void) 3192 { 3193 pci_unregister_driver(&ath10k_pci_driver); 3194 } 3195 3196 module_exit(ath10k_pci_exit); 3197 3198 MODULE_AUTHOR("Qualcomm Atheros"); 3199 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 3200 MODULE_LICENSE("Dual BSD/GPL"); 3201 3202 /* QCA988x 2.0 firmware files */ 3203 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 3204 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3205 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3206 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3207 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3208 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3209 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3210 3211 /* QCA6174 2.1 firmware files */ 3212 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3213 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3214 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); 3215 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3216 3217 /* QCA6174 3.1 firmware files */ 3218 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3219 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3220 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); 3221 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3222 3223 /* QCA9377 1.0 firmware files */ 3224 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3225 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); 3226