1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/pci.h> 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/spinlock.h> 22 #include <linux/bitops.h> 23 24 #include "core.h" 25 #include "debug.h" 26 27 #include "targaddrs.h" 28 #include "bmi.h" 29 30 #include "hif.h" 31 #include "htc.h" 32 33 #include "ce.h" 34 #include "pci.h" 35 36 enum ath10k_pci_irq_mode { 37 ATH10K_PCI_IRQ_AUTO = 0, 38 ATH10K_PCI_IRQ_LEGACY = 1, 39 ATH10K_PCI_IRQ_MSI = 2, 40 }; 41 42 enum ath10k_pci_reset_mode { 43 ATH10K_PCI_RESET_AUTO = 0, 44 ATH10K_PCI_RESET_WARM_ONLY = 1, 45 }; 46 47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 49 50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 52 53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 55 56 /* how long wait to wait for target to initialise, in ms */ 57 #define ATH10K_PCI_TARGET_WAIT 3000 58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 59 60 static const struct pci_device_id ath10k_pci_id_table[] = { 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 63 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 64 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 65 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 66 {0} 67 }; 68 69 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 70 /* QCA988X pre 2.0 chips are not supported because they need some nasty 71 * hacks. ath10k doesn't have them and these devices crash horribly 72 * because of that. 73 */ 74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 75 76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 81 82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 87 88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 89 90 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 92 }; 93 94 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 95 static int ath10k_pci_cold_reset(struct ath10k *ar); 96 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 97 static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 98 static int ath10k_pci_init_irq(struct ath10k *ar); 99 static int ath10k_pci_deinit_irq(struct ath10k *ar); 100 static int ath10k_pci_request_irq(struct ath10k *ar); 101 static void ath10k_pci_free_irq(struct ath10k *ar); 102 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 103 struct ath10k_ce_pipe *rx_pipe, 104 struct bmi_xfer *xfer); 105 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 106 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); 107 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 108 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 109 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 110 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 111 112 static struct ce_attr host_ce_config_wlan[] = { 113 /* CE0: host->target HTC control and raw streams */ 114 { 115 .flags = CE_ATTR_FLAGS, 116 .src_nentries = 16, 117 .src_sz_max = 256, 118 .dest_nentries = 0, 119 .send_cb = ath10k_pci_htc_tx_cb, 120 }, 121 122 /* CE1: target->host HTT + HTC control */ 123 { 124 .flags = CE_ATTR_FLAGS, 125 .src_nentries = 0, 126 .src_sz_max = 2048, 127 .dest_nentries = 512, 128 .recv_cb = ath10k_pci_htt_htc_rx_cb, 129 }, 130 131 /* CE2: target->host WMI */ 132 { 133 .flags = CE_ATTR_FLAGS, 134 .src_nentries = 0, 135 .src_sz_max = 2048, 136 .dest_nentries = 128, 137 .recv_cb = ath10k_pci_htc_rx_cb, 138 }, 139 140 /* CE3: host->target WMI */ 141 { 142 .flags = CE_ATTR_FLAGS, 143 .src_nentries = 32, 144 .src_sz_max = 2048, 145 .dest_nentries = 0, 146 .send_cb = ath10k_pci_htc_tx_cb, 147 }, 148 149 /* CE4: host->target HTT */ 150 { 151 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 152 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 153 .src_sz_max = 256, 154 .dest_nentries = 0, 155 .send_cb = ath10k_pci_htt_tx_cb, 156 }, 157 158 /* CE5: target->host HTT (HIF->HTT) */ 159 { 160 .flags = CE_ATTR_FLAGS, 161 .src_nentries = 0, 162 .src_sz_max = 512, 163 .dest_nentries = 512, 164 .recv_cb = ath10k_pci_htt_rx_cb, 165 }, 166 167 /* CE6: target autonomous hif_memcpy */ 168 { 169 .flags = CE_ATTR_FLAGS, 170 .src_nentries = 0, 171 .src_sz_max = 0, 172 .dest_nentries = 0, 173 }, 174 175 /* CE7: ce_diag, the Diagnostic Window */ 176 { 177 .flags = CE_ATTR_FLAGS, 178 .src_nentries = 2, 179 .src_sz_max = DIAG_TRANSFER_LIMIT, 180 .dest_nentries = 2, 181 }, 182 183 /* CE8: target->host pktlog */ 184 { 185 .flags = CE_ATTR_FLAGS, 186 .src_nentries = 0, 187 .src_sz_max = 2048, 188 .dest_nentries = 128, 189 }, 190 191 /* CE9 target autonomous qcache memcpy */ 192 { 193 .flags = CE_ATTR_FLAGS, 194 .src_nentries = 0, 195 .src_sz_max = 0, 196 .dest_nentries = 0, 197 }, 198 199 /* CE10: target autonomous hif memcpy */ 200 { 201 .flags = CE_ATTR_FLAGS, 202 .src_nentries = 0, 203 .src_sz_max = 0, 204 .dest_nentries = 0, 205 }, 206 207 /* CE11: target autonomous hif memcpy */ 208 { 209 .flags = CE_ATTR_FLAGS, 210 .src_nentries = 0, 211 .src_sz_max = 0, 212 .dest_nentries = 0, 213 }, 214 }; 215 216 /* Target firmware's Copy Engine configuration. */ 217 static struct ce_pipe_config target_ce_config_wlan[] = { 218 /* CE0: host->target HTC control and raw streams */ 219 { 220 .pipenum = __cpu_to_le32(0), 221 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 222 .nentries = __cpu_to_le32(32), 223 .nbytes_max = __cpu_to_le32(256), 224 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 225 .reserved = __cpu_to_le32(0), 226 }, 227 228 /* CE1: target->host HTT + HTC control */ 229 { 230 .pipenum = __cpu_to_le32(1), 231 .pipedir = __cpu_to_le32(PIPEDIR_IN), 232 .nentries = __cpu_to_le32(32), 233 .nbytes_max = __cpu_to_le32(2048), 234 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 235 .reserved = __cpu_to_le32(0), 236 }, 237 238 /* CE2: target->host WMI */ 239 { 240 .pipenum = __cpu_to_le32(2), 241 .pipedir = __cpu_to_le32(PIPEDIR_IN), 242 .nentries = __cpu_to_le32(64), 243 .nbytes_max = __cpu_to_le32(2048), 244 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 245 .reserved = __cpu_to_le32(0), 246 }, 247 248 /* CE3: host->target WMI */ 249 { 250 .pipenum = __cpu_to_le32(3), 251 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 252 .nentries = __cpu_to_le32(32), 253 .nbytes_max = __cpu_to_le32(2048), 254 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 255 .reserved = __cpu_to_le32(0), 256 }, 257 258 /* CE4: host->target HTT */ 259 { 260 .pipenum = __cpu_to_le32(4), 261 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 262 .nentries = __cpu_to_le32(256), 263 .nbytes_max = __cpu_to_le32(256), 264 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 265 .reserved = __cpu_to_le32(0), 266 }, 267 268 /* NB: 50% of src nentries, since tx has 2 frags */ 269 270 /* CE5: target->host HTT (HIF->HTT) */ 271 { 272 .pipenum = __cpu_to_le32(5), 273 .pipedir = __cpu_to_le32(PIPEDIR_IN), 274 .nentries = __cpu_to_le32(32), 275 .nbytes_max = __cpu_to_le32(512), 276 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 277 .reserved = __cpu_to_le32(0), 278 }, 279 280 /* CE6: Reserved for target autonomous hif_memcpy */ 281 { 282 .pipenum = __cpu_to_le32(6), 283 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 284 .nentries = __cpu_to_le32(32), 285 .nbytes_max = __cpu_to_le32(4096), 286 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 287 .reserved = __cpu_to_le32(0), 288 }, 289 290 /* CE7 used only by Host */ 291 { 292 .pipenum = __cpu_to_le32(7), 293 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 294 .nentries = __cpu_to_le32(0), 295 .nbytes_max = __cpu_to_le32(0), 296 .flags = __cpu_to_le32(0), 297 .reserved = __cpu_to_le32(0), 298 }, 299 300 /* CE8 target->host packtlog */ 301 { 302 .pipenum = __cpu_to_le32(8), 303 .pipedir = __cpu_to_le32(PIPEDIR_IN), 304 .nentries = __cpu_to_le32(64), 305 .nbytes_max = __cpu_to_le32(2048), 306 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 307 .reserved = __cpu_to_le32(0), 308 }, 309 310 /* CE9 target autonomous qcache memcpy */ 311 { 312 .pipenum = __cpu_to_le32(9), 313 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 314 .nentries = __cpu_to_le32(32), 315 .nbytes_max = __cpu_to_le32(2048), 316 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 317 .reserved = __cpu_to_le32(0), 318 }, 319 320 /* It not necessary to send target wlan configuration for CE10 & CE11 321 * as these CEs are not actively used in target. 322 */ 323 }; 324 325 /* 326 * Map from service/endpoint to Copy Engine. 327 * This table is derived from the CE_PCI TABLE, above. 328 * It is passed to the Target at startup for use by firmware. 329 */ 330 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 331 { 332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 333 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 334 __cpu_to_le32(3), 335 }, 336 { 337 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 338 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 339 __cpu_to_le32(2), 340 }, 341 { 342 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 343 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 344 __cpu_to_le32(3), 345 }, 346 { 347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 348 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 349 __cpu_to_le32(2), 350 }, 351 { 352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 353 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 354 __cpu_to_le32(3), 355 }, 356 { 357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 358 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 359 __cpu_to_le32(2), 360 }, 361 { 362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 363 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 364 __cpu_to_le32(3), 365 }, 366 { 367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 368 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 369 __cpu_to_le32(2), 370 }, 371 { 372 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 373 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 374 __cpu_to_le32(3), 375 }, 376 { 377 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 378 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 379 __cpu_to_le32(2), 380 }, 381 { 382 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 383 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 384 __cpu_to_le32(0), 385 }, 386 { 387 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 388 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 389 __cpu_to_le32(1), 390 }, 391 { /* not used */ 392 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 393 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 394 __cpu_to_le32(0), 395 }, 396 { /* not used */ 397 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 398 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 399 __cpu_to_le32(1), 400 }, 401 { 402 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 403 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 404 __cpu_to_le32(4), 405 }, 406 { 407 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 408 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 409 __cpu_to_le32(5), 410 }, 411 412 /* (Additions here) */ 413 414 { /* must be last */ 415 __cpu_to_le32(0), 416 __cpu_to_le32(0), 417 __cpu_to_le32(0), 418 }, 419 }; 420 421 static bool ath10k_pci_is_awake(struct ath10k *ar) 422 { 423 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 424 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 425 RTC_STATE_ADDRESS); 426 427 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 428 } 429 430 static void __ath10k_pci_wake(struct ath10k *ar) 431 { 432 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 433 434 lockdep_assert_held(&ar_pci->ps_lock); 435 436 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 437 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 438 439 iowrite32(PCIE_SOC_WAKE_V_MASK, 440 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 441 PCIE_SOC_WAKE_ADDRESS); 442 } 443 444 static void __ath10k_pci_sleep(struct ath10k *ar) 445 { 446 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 447 448 lockdep_assert_held(&ar_pci->ps_lock); 449 450 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 451 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 452 453 iowrite32(PCIE_SOC_WAKE_RESET, 454 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 455 PCIE_SOC_WAKE_ADDRESS); 456 ar_pci->ps_awake = false; 457 } 458 459 static int ath10k_pci_wake_wait(struct ath10k *ar) 460 { 461 int tot_delay = 0; 462 int curr_delay = 5; 463 464 while (tot_delay < PCIE_WAKE_TIMEOUT) { 465 if (ath10k_pci_is_awake(ar)) { 466 if (tot_delay > PCIE_WAKE_LATE_US) 467 ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", 468 tot_delay / 1000); 469 return 0; 470 } 471 472 udelay(curr_delay); 473 tot_delay += curr_delay; 474 475 if (curr_delay < 50) 476 curr_delay += 5; 477 } 478 479 return -ETIMEDOUT; 480 } 481 482 static int ath10k_pci_force_wake(struct ath10k *ar) 483 { 484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 485 unsigned long flags; 486 int ret = 0; 487 488 spin_lock_irqsave(&ar_pci->ps_lock, flags); 489 490 if (!ar_pci->ps_awake) { 491 iowrite32(PCIE_SOC_WAKE_V_MASK, 492 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 493 PCIE_SOC_WAKE_ADDRESS); 494 495 ret = ath10k_pci_wake_wait(ar); 496 if (ret == 0) 497 ar_pci->ps_awake = true; 498 } 499 500 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 501 502 return ret; 503 } 504 505 static void ath10k_pci_force_sleep(struct ath10k *ar) 506 { 507 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 508 unsigned long flags; 509 510 spin_lock_irqsave(&ar_pci->ps_lock, flags); 511 512 iowrite32(PCIE_SOC_WAKE_RESET, 513 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 514 PCIE_SOC_WAKE_ADDRESS); 515 ar_pci->ps_awake = false; 516 517 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 518 } 519 520 static int ath10k_pci_wake(struct ath10k *ar) 521 { 522 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 523 unsigned long flags; 524 int ret = 0; 525 526 if (ar_pci->pci_ps == 0) 527 return ret; 528 529 spin_lock_irqsave(&ar_pci->ps_lock, flags); 530 531 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 532 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 533 534 /* This function can be called very frequently. To avoid excessive 535 * CPU stalls for MMIO reads use a cache var to hold the device state. 536 */ 537 if (!ar_pci->ps_awake) { 538 __ath10k_pci_wake(ar); 539 540 ret = ath10k_pci_wake_wait(ar); 541 if (ret == 0) 542 ar_pci->ps_awake = true; 543 } 544 545 if (ret == 0) { 546 ar_pci->ps_wake_refcount++; 547 WARN_ON(ar_pci->ps_wake_refcount == 0); 548 } 549 550 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 551 552 return ret; 553 } 554 555 static void ath10k_pci_sleep(struct ath10k *ar) 556 { 557 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 558 unsigned long flags; 559 560 if (ar_pci->pci_ps == 0) 561 return; 562 563 spin_lock_irqsave(&ar_pci->ps_lock, flags); 564 565 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 566 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 567 568 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 569 goto skip; 570 571 ar_pci->ps_wake_refcount--; 572 573 mod_timer(&ar_pci->ps_timer, jiffies + 574 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 575 576 skip: 577 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 578 } 579 580 static void ath10k_pci_ps_timer(unsigned long ptr) 581 { 582 struct ath10k *ar = (void *)ptr; 583 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 584 unsigned long flags; 585 586 spin_lock_irqsave(&ar_pci->ps_lock, flags); 587 588 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 589 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 590 591 if (ar_pci->ps_wake_refcount > 0) 592 goto skip; 593 594 __ath10k_pci_sleep(ar); 595 596 skip: 597 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 598 } 599 600 static void ath10k_pci_sleep_sync(struct ath10k *ar) 601 { 602 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 603 unsigned long flags; 604 605 if (ar_pci->pci_ps == 0) { 606 ath10k_pci_force_sleep(ar); 607 return; 608 } 609 610 del_timer_sync(&ar_pci->ps_timer); 611 612 spin_lock_irqsave(&ar_pci->ps_lock, flags); 613 WARN_ON(ar_pci->ps_wake_refcount > 0); 614 __ath10k_pci_sleep(ar); 615 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 616 } 617 618 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 619 { 620 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 621 int ret; 622 623 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 624 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 625 offset, offset + sizeof(value), ar_pci->mem_len); 626 return; 627 } 628 629 ret = ath10k_pci_wake(ar); 630 if (ret) { 631 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 632 value, offset, ret); 633 return; 634 } 635 636 iowrite32(value, ar_pci->mem + offset); 637 ath10k_pci_sleep(ar); 638 } 639 640 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 641 { 642 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 643 u32 val; 644 int ret; 645 646 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 647 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 648 offset, offset + sizeof(val), ar_pci->mem_len); 649 return 0; 650 } 651 652 ret = ath10k_pci_wake(ar); 653 if (ret) { 654 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 655 offset, ret); 656 return 0xffffffff; 657 } 658 659 val = ioread32(ar_pci->mem + offset); 660 ath10k_pci_sleep(ar); 661 662 return val; 663 } 664 665 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 666 { 667 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 668 } 669 670 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 671 { 672 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 673 } 674 675 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 676 { 677 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 678 } 679 680 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 681 { 682 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 683 } 684 685 static bool ath10k_pci_irq_pending(struct ath10k *ar) 686 { 687 u32 cause; 688 689 /* Check if the shared legacy irq is for us */ 690 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 691 PCIE_INTR_CAUSE_ADDRESS); 692 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 693 return true; 694 695 return false; 696 } 697 698 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 699 { 700 /* IMPORTANT: INTR_CLR register has to be set after 701 * INTR_ENABLE is set to 0, otherwise interrupt can not be 702 * really cleared. */ 703 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 704 0); 705 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 706 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 707 708 /* IMPORTANT: this extra read transaction is required to 709 * flush the posted write buffer. */ 710 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 711 PCIE_INTR_ENABLE_ADDRESS); 712 } 713 714 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 715 { 716 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 717 PCIE_INTR_ENABLE_ADDRESS, 718 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 719 720 /* IMPORTANT: this extra read transaction is required to 721 * flush the posted write buffer. */ 722 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 723 PCIE_INTR_ENABLE_ADDRESS); 724 } 725 726 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 727 { 728 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 729 730 if (ar_pci->num_msi_intrs > 1) 731 return "msi-x"; 732 733 if (ar_pci->num_msi_intrs == 1) 734 return "msi"; 735 736 return "legacy"; 737 } 738 739 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 740 { 741 struct ath10k *ar = pipe->hif_ce_state; 742 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 743 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 744 struct sk_buff *skb; 745 dma_addr_t paddr; 746 int ret; 747 748 skb = dev_alloc_skb(pipe->buf_sz); 749 if (!skb) 750 return -ENOMEM; 751 752 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 753 754 paddr = dma_map_single(ar->dev, skb->data, 755 skb->len + skb_tailroom(skb), 756 DMA_FROM_DEVICE); 757 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 758 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 759 dev_kfree_skb_any(skb); 760 return -EIO; 761 } 762 763 ATH10K_SKB_RXCB(skb)->paddr = paddr; 764 765 spin_lock_bh(&ar_pci->ce_lock); 766 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); 767 spin_unlock_bh(&ar_pci->ce_lock); 768 if (ret) { 769 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 770 DMA_FROM_DEVICE); 771 dev_kfree_skb_any(skb); 772 return ret; 773 } 774 775 return 0; 776 } 777 778 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 779 { 780 struct ath10k *ar = pipe->hif_ce_state; 781 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 782 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 783 int ret, num; 784 785 if (pipe->buf_sz == 0) 786 return; 787 788 if (!ce_pipe->dest_ring) 789 return; 790 791 spin_lock_bh(&ar_pci->ce_lock); 792 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 793 spin_unlock_bh(&ar_pci->ce_lock); 794 while (num--) { 795 ret = __ath10k_pci_rx_post_buf(pipe); 796 if (ret) { 797 if (ret == -ENOSPC) 798 break; 799 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 800 mod_timer(&ar_pci->rx_post_retry, jiffies + 801 ATH10K_PCI_RX_POST_RETRY_MS); 802 break; 803 } 804 } 805 } 806 807 static void ath10k_pci_rx_post(struct ath10k *ar) 808 { 809 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 810 int i; 811 812 for (i = 0; i < CE_COUNT; i++) 813 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 814 } 815 816 static void ath10k_pci_rx_replenish_retry(unsigned long ptr) 817 { 818 struct ath10k *ar = (void *)ptr; 819 820 ath10k_pci_rx_post(ar); 821 } 822 823 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 824 { 825 u32 val = 0; 826 827 switch (ar->hw_rev) { 828 case ATH10K_HW_QCA988X: 829 case ATH10K_HW_QCA6174: 830 case ATH10K_HW_QCA9377: 831 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 832 CORE_CTRL_ADDRESS) & 833 0x7ff) << 21; 834 break; 835 case ATH10K_HW_QCA99X0: 836 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 837 break; 838 } 839 840 val |= 0x100000 | (addr & 0xfffff); 841 return val; 842 } 843 844 /* 845 * Diagnostic read/write access is provided for startup/config/debug usage. 846 * Caller must guarantee proper alignment, when applicable, and single user 847 * at any moment. 848 */ 849 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 850 int nbytes) 851 { 852 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 853 int ret = 0; 854 u32 buf; 855 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 856 unsigned int id; 857 unsigned int flags; 858 struct ath10k_ce_pipe *ce_diag; 859 /* Host buffer address in CE space */ 860 u32 ce_data; 861 dma_addr_t ce_data_base = 0; 862 void *data_buf = NULL; 863 int i; 864 865 spin_lock_bh(&ar_pci->ce_lock); 866 867 ce_diag = ar_pci->ce_diag; 868 869 /* 870 * Allocate a temporary bounce buffer to hold caller's data 871 * to be DMA'ed from Target. This guarantees 872 * 1) 4-byte alignment 873 * 2) Buffer in DMA-able space 874 */ 875 orig_nbytes = nbytes; 876 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 877 orig_nbytes, 878 &ce_data_base, 879 GFP_ATOMIC); 880 881 if (!data_buf) { 882 ret = -ENOMEM; 883 goto done; 884 } 885 memset(data_buf, 0, orig_nbytes); 886 887 remaining_bytes = orig_nbytes; 888 ce_data = ce_data_base; 889 while (remaining_bytes) { 890 nbytes = min_t(unsigned int, remaining_bytes, 891 DIAG_TRANSFER_LIMIT); 892 893 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); 894 if (ret != 0) 895 goto done; 896 897 /* Request CE to send from Target(!) address to Host buffer */ 898 /* 899 * The address supplied by the caller is in the 900 * Target CPU virtual address space. 901 * 902 * In order to use this address with the diagnostic CE, 903 * convert it from Target CPU virtual address space 904 * to CE address space 905 */ 906 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 907 908 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 909 0); 910 if (ret) 911 goto done; 912 913 i = 0; 914 while (ath10k_ce_completed_send_next_nolock(ce_diag, 915 NULL) != 0) { 916 mdelay(1); 917 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 918 ret = -EBUSY; 919 goto done; 920 } 921 } 922 923 i = 0; 924 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 925 &completed_nbytes, 926 &id, &flags) != 0) { 927 mdelay(1); 928 929 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 930 ret = -EBUSY; 931 goto done; 932 } 933 } 934 935 if (nbytes != completed_nbytes) { 936 ret = -EIO; 937 goto done; 938 } 939 940 if (buf != ce_data) { 941 ret = -EIO; 942 goto done; 943 } 944 945 remaining_bytes -= nbytes; 946 address += nbytes; 947 ce_data += nbytes; 948 } 949 950 done: 951 if (ret == 0) 952 memcpy(data, data_buf, orig_nbytes); 953 else 954 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", 955 address, ret); 956 957 if (data_buf) 958 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 959 ce_data_base); 960 961 spin_unlock_bh(&ar_pci->ce_lock); 962 963 return ret; 964 } 965 966 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 967 { 968 __le32 val = 0; 969 int ret; 970 971 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 972 *value = __le32_to_cpu(val); 973 974 return ret; 975 } 976 977 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 978 u32 src, u32 len) 979 { 980 u32 host_addr, addr; 981 int ret; 982 983 host_addr = host_interest_item_address(src); 984 985 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 986 if (ret != 0) { 987 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 988 src, ret); 989 return ret; 990 } 991 992 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 993 if (ret != 0) { 994 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 995 addr, len, ret); 996 return ret; 997 } 998 999 return 0; 1000 } 1001 1002 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1003 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1004 1005 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1006 const void *data, int nbytes) 1007 { 1008 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1009 int ret = 0; 1010 u32 buf; 1011 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1012 unsigned int id; 1013 unsigned int flags; 1014 struct ath10k_ce_pipe *ce_diag; 1015 void *data_buf = NULL; 1016 u32 ce_data; /* Host buffer address in CE space */ 1017 dma_addr_t ce_data_base = 0; 1018 int i; 1019 1020 spin_lock_bh(&ar_pci->ce_lock); 1021 1022 ce_diag = ar_pci->ce_diag; 1023 1024 /* 1025 * Allocate a temporary bounce buffer to hold caller's data 1026 * to be DMA'ed to Target. This guarantees 1027 * 1) 4-byte alignment 1028 * 2) Buffer in DMA-able space 1029 */ 1030 orig_nbytes = nbytes; 1031 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1032 orig_nbytes, 1033 &ce_data_base, 1034 GFP_ATOMIC); 1035 if (!data_buf) { 1036 ret = -ENOMEM; 1037 goto done; 1038 } 1039 1040 /* Copy caller's data to allocated DMA buf */ 1041 memcpy(data_buf, data, orig_nbytes); 1042 1043 /* 1044 * The address supplied by the caller is in the 1045 * Target CPU virtual address space. 1046 * 1047 * In order to use this address with the diagnostic CE, 1048 * convert it from 1049 * Target CPU virtual address space 1050 * to 1051 * CE address space 1052 */ 1053 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1054 1055 remaining_bytes = orig_nbytes; 1056 ce_data = ce_data_base; 1057 while (remaining_bytes) { 1058 /* FIXME: check cast */ 1059 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1060 1061 /* Set up to receive directly into Target(!) address */ 1062 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); 1063 if (ret != 0) 1064 goto done; 1065 1066 /* 1067 * Request CE to send caller-supplied data that 1068 * was copied to bounce buffer to Target(!) address. 1069 */ 1070 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1071 nbytes, 0, 0); 1072 if (ret != 0) 1073 goto done; 1074 1075 i = 0; 1076 while (ath10k_ce_completed_send_next_nolock(ce_diag, 1077 NULL) != 0) { 1078 mdelay(1); 1079 1080 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1081 ret = -EBUSY; 1082 goto done; 1083 } 1084 } 1085 1086 i = 0; 1087 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 1088 &completed_nbytes, 1089 &id, &flags) != 0) { 1090 mdelay(1); 1091 1092 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1093 ret = -EBUSY; 1094 goto done; 1095 } 1096 } 1097 1098 if (nbytes != completed_nbytes) { 1099 ret = -EIO; 1100 goto done; 1101 } 1102 1103 if (buf != address) { 1104 ret = -EIO; 1105 goto done; 1106 } 1107 1108 remaining_bytes -= nbytes; 1109 address += nbytes; 1110 ce_data += nbytes; 1111 } 1112 1113 done: 1114 if (data_buf) { 1115 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1116 ce_data_base); 1117 } 1118 1119 if (ret != 0) 1120 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1121 address, ret); 1122 1123 spin_unlock_bh(&ar_pci->ce_lock); 1124 1125 return ret; 1126 } 1127 1128 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1129 { 1130 __le32 val = __cpu_to_le32(value); 1131 1132 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1133 } 1134 1135 /* Called by lower (CE) layer when a send to Target completes. */ 1136 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) 1137 { 1138 struct ath10k *ar = ce_state->ar; 1139 struct sk_buff_head list; 1140 struct sk_buff *skb; 1141 1142 __skb_queue_head_init(&list); 1143 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1144 /* no need to call tx completion for NULL pointers */ 1145 if (skb == NULL) 1146 continue; 1147 1148 __skb_queue_tail(&list, skb); 1149 } 1150 1151 while ((skb = __skb_dequeue(&list))) 1152 ath10k_htc_tx_completion_handler(ar, skb); 1153 } 1154 1155 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, 1156 void (*callback)(struct ath10k *ar, 1157 struct sk_buff *skb)) 1158 { 1159 struct ath10k *ar = ce_state->ar; 1160 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1161 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1162 struct sk_buff *skb; 1163 struct sk_buff_head list; 1164 void *transfer_context; 1165 u32 ce_data; 1166 unsigned int nbytes, max_nbytes; 1167 unsigned int transfer_id; 1168 unsigned int flags; 1169 1170 __skb_queue_head_init(&list); 1171 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1172 &ce_data, &nbytes, &transfer_id, 1173 &flags) == 0) { 1174 skb = transfer_context; 1175 max_nbytes = skb->len + skb_tailroom(skb); 1176 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1177 max_nbytes, DMA_FROM_DEVICE); 1178 1179 if (unlikely(max_nbytes < nbytes)) { 1180 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1181 nbytes, max_nbytes); 1182 dev_kfree_skb_any(skb); 1183 continue; 1184 } 1185 1186 skb_put(skb, nbytes); 1187 __skb_queue_tail(&list, skb); 1188 } 1189 1190 while ((skb = __skb_dequeue(&list))) { 1191 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1192 ce_state->id, skb->len); 1193 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1194 skb->data, skb->len); 1195 1196 callback(ar, skb); 1197 } 1198 1199 ath10k_pci_rx_post_pipe(pipe_info); 1200 } 1201 1202 /* Called by lower (CE) layer when data is received from the Target. */ 1203 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1204 { 1205 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1206 } 1207 1208 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1209 { 1210 /* CE4 polling needs to be done whenever CE pipe which transports 1211 * HTT Rx (target->host) is processed. 1212 */ 1213 ath10k_ce_per_engine_service(ce_state->ar, 4); 1214 1215 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1216 } 1217 1218 /* Called by lower (CE) layer when a send to HTT Target completes. */ 1219 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1220 { 1221 struct ath10k *ar = ce_state->ar; 1222 struct sk_buff *skb; 1223 1224 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1225 /* no need to call tx completion for NULL pointers */ 1226 if (!skb) 1227 continue; 1228 1229 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1230 skb->len, DMA_TO_DEVICE); 1231 ath10k_htt_hif_tx_complete(ar, skb); 1232 } 1233 } 1234 1235 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) 1236 { 1237 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 1238 ath10k_htt_t2h_msg_handler(ar, skb); 1239 } 1240 1241 /* Called by lower (CE) layer when HTT data is received from the Target. */ 1242 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) 1243 { 1244 /* CE4 polling needs to be done whenever CE pipe which transports 1245 * HTT Rx (target->host) is processed. 1246 */ 1247 ath10k_ce_per_engine_service(ce_state->ar, 4); 1248 1249 ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1250 } 1251 1252 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1253 struct ath10k_hif_sg_item *items, int n_items) 1254 { 1255 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1256 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1257 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1258 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1259 unsigned int nentries_mask; 1260 unsigned int sw_index; 1261 unsigned int write_index; 1262 int err, i = 0; 1263 1264 spin_lock_bh(&ar_pci->ce_lock); 1265 1266 nentries_mask = src_ring->nentries_mask; 1267 sw_index = src_ring->sw_index; 1268 write_index = src_ring->write_index; 1269 1270 if (unlikely(CE_RING_DELTA(nentries_mask, 1271 write_index, sw_index - 1) < n_items)) { 1272 err = -ENOBUFS; 1273 goto err; 1274 } 1275 1276 for (i = 0; i < n_items - 1; i++) { 1277 ath10k_dbg(ar, ATH10K_DBG_PCI, 1278 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1279 i, items[i].paddr, items[i].len, n_items); 1280 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1281 items[i].vaddr, items[i].len); 1282 1283 err = ath10k_ce_send_nolock(ce_pipe, 1284 items[i].transfer_context, 1285 items[i].paddr, 1286 items[i].len, 1287 items[i].transfer_id, 1288 CE_SEND_FLAG_GATHER); 1289 if (err) 1290 goto err; 1291 } 1292 1293 /* `i` is equal to `n_items -1` after for() */ 1294 1295 ath10k_dbg(ar, ATH10K_DBG_PCI, 1296 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1297 i, items[i].paddr, items[i].len, n_items); 1298 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1299 items[i].vaddr, items[i].len); 1300 1301 err = ath10k_ce_send_nolock(ce_pipe, 1302 items[i].transfer_context, 1303 items[i].paddr, 1304 items[i].len, 1305 items[i].transfer_id, 1306 0); 1307 if (err) 1308 goto err; 1309 1310 spin_unlock_bh(&ar_pci->ce_lock); 1311 return 0; 1312 1313 err: 1314 for (; i > 0; i--) 1315 __ath10k_ce_send_revert(ce_pipe); 1316 1317 spin_unlock_bh(&ar_pci->ce_lock); 1318 return err; 1319 } 1320 1321 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1322 size_t buf_len) 1323 { 1324 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1325 } 1326 1327 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1328 { 1329 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1330 1331 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1332 1333 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1334 } 1335 1336 static void ath10k_pci_dump_registers(struct ath10k *ar, 1337 struct ath10k_fw_crash_data *crash_data) 1338 { 1339 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1340 int i, ret; 1341 1342 lockdep_assert_held(&ar->data_lock); 1343 1344 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1345 hi_failure_state, 1346 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1347 if (ret) { 1348 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1349 return; 1350 } 1351 1352 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1353 1354 ath10k_err(ar, "firmware register dump:\n"); 1355 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1356 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1357 i, 1358 __le32_to_cpu(reg_dump_values[i]), 1359 __le32_to_cpu(reg_dump_values[i + 1]), 1360 __le32_to_cpu(reg_dump_values[i + 2]), 1361 __le32_to_cpu(reg_dump_values[i + 3])); 1362 1363 if (!crash_data) 1364 return; 1365 1366 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1367 crash_data->registers[i] = reg_dump_values[i]; 1368 } 1369 1370 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1371 { 1372 struct ath10k_fw_crash_data *crash_data; 1373 char uuid[50]; 1374 1375 spin_lock_bh(&ar->data_lock); 1376 1377 ar->stats.fw_crash_counter++; 1378 1379 crash_data = ath10k_debug_get_new_fw_crash_data(ar); 1380 1381 if (crash_data) 1382 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); 1383 else 1384 scnprintf(uuid, sizeof(uuid), "n/a"); 1385 1386 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); 1387 ath10k_print_driver_info(ar); 1388 ath10k_pci_dump_registers(ar, crash_data); 1389 1390 spin_unlock_bh(&ar->data_lock); 1391 1392 queue_work(ar->workqueue, &ar->restart_work); 1393 } 1394 1395 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1396 int force) 1397 { 1398 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1399 1400 if (!force) { 1401 int resources; 1402 /* 1403 * Decide whether to actually poll for completions, or just 1404 * wait for a later chance. 1405 * If there seem to be plenty of resources left, then just wait 1406 * since checking involves reading a CE register, which is a 1407 * relatively expensive operation. 1408 */ 1409 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1410 1411 /* 1412 * If at least 50% of the total resources are still available, 1413 * don't bother checking again yet. 1414 */ 1415 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) 1416 return; 1417 } 1418 ath10k_ce_per_engine_service(ar, pipe); 1419 } 1420 1421 static void ath10k_pci_kill_tasklet(struct ath10k *ar) 1422 { 1423 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1424 int i; 1425 1426 tasklet_kill(&ar_pci->intr_tq); 1427 tasklet_kill(&ar_pci->msi_fw_err); 1428 1429 for (i = 0; i < CE_COUNT; i++) 1430 tasklet_kill(&ar_pci->pipe_info[i].intr); 1431 1432 del_timer_sync(&ar_pci->rx_post_retry); 1433 } 1434 1435 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1436 u8 *ul_pipe, u8 *dl_pipe) 1437 { 1438 const struct service_to_pipe *entry; 1439 bool ul_set = false, dl_set = false; 1440 int i; 1441 1442 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1443 1444 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 1445 entry = &target_service_to_ce_map_wlan[i]; 1446 1447 if (__le32_to_cpu(entry->service_id) != service_id) 1448 continue; 1449 1450 switch (__le32_to_cpu(entry->pipedir)) { 1451 case PIPEDIR_NONE: 1452 break; 1453 case PIPEDIR_IN: 1454 WARN_ON(dl_set); 1455 *dl_pipe = __le32_to_cpu(entry->pipenum); 1456 dl_set = true; 1457 break; 1458 case PIPEDIR_OUT: 1459 WARN_ON(ul_set); 1460 *ul_pipe = __le32_to_cpu(entry->pipenum); 1461 ul_set = true; 1462 break; 1463 case PIPEDIR_INOUT: 1464 WARN_ON(dl_set); 1465 WARN_ON(ul_set); 1466 *dl_pipe = __le32_to_cpu(entry->pipenum); 1467 *ul_pipe = __le32_to_cpu(entry->pipenum); 1468 dl_set = true; 1469 ul_set = true; 1470 break; 1471 } 1472 } 1473 1474 if (WARN_ON(!ul_set || !dl_set)) 1475 return -ENOENT; 1476 1477 return 0; 1478 } 1479 1480 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1481 u8 *ul_pipe, u8 *dl_pipe) 1482 { 1483 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1484 1485 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1486 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1487 ul_pipe, dl_pipe); 1488 } 1489 1490 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1491 { 1492 u32 val; 1493 1494 switch (ar->hw_rev) { 1495 case ATH10K_HW_QCA988X: 1496 case ATH10K_HW_QCA6174: 1497 case ATH10K_HW_QCA9377: 1498 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1499 CORE_CTRL_ADDRESS); 1500 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1501 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1502 CORE_CTRL_ADDRESS, val); 1503 break; 1504 case ATH10K_HW_QCA99X0: 1505 /* TODO: Find appropriate register configuration for QCA99X0 1506 * to mask irq/MSI. 1507 */ 1508 break; 1509 } 1510 } 1511 1512 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1513 { 1514 u32 val; 1515 1516 switch (ar->hw_rev) { 1517 case ATH10K_HW_QCA988X: 1518 case ATH10K_HW_QCA6174: 1519 case ATH10K_HW_QCA9377: 1520 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1521 CORE_CTRL_ADDRESS); 1522 val |= CORE_CTRL_PCIE_REG_31_MASK; 1523 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1524 CORE_CTRL_ADDRESS, val); 1525 break; 1526 case ATH10K_HW_QCA99X0: 1527 /* TODO: Find appropriate register configuration for QCA99X0 1528 * to unmask irq/MSI. 1529 */ 1530 break; 1531 } 1532 } 1533 1534 static void ath10k_pci_irq_disable(struct ath10k *ar) 1535 { 1536 ath10k_ce_disable_interrupts(ar); 1537 ath10k_pci_disable_and_clear_legacy_irq(ar); 1538 ath10k_pci_irq_msi_fw_mask(ar); 1539 } 1540 1541 static void ath10k_pci_irq_sync(struct ath10k *ar) 1542 { 1543 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1544 int i; 1545 1546 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 1547 synchronize_irq(ar_pci->pdev->irq + i); 1548 } 1549 1550 static void ath10k_pci_irq_enable(struct ath10k *ar) 1551 { 1552 ath10k_ce_enable_interrupts(ar); 1553 ath10k_pci_enable_legacy_irq(ar); 1554 ath10k_pci_irq_msi_fw_unmask(ar); 1555 } 1556 1557 static int ath10k_pci_hif_start(struct ath10k *ar) 1558 { 1559 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1560 1561 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1562 1563 ath10k_pci_irq_enable(ar); 1564 ath10k_pci_rx_post(ar); 1565 1566 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1567 ar_pci->link_ctl); 1568 1569 return 0; 1570 } 1571 1572 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1573 { 1574 struct ath10k *ar; 1575 struct ath10k_ce_pipe *ce_pipe; 1576 struct ath10k_ce_ring *ce_ring; 1577 struct sk_buff *skb; 1578 int i; 1579 1580 ar = pci_pipe->hif_ce_state; 1581 ce_pipe = pci_pipe->ce_hdl; 1582 ce_ring = ce_pipe->dest_ring; 1583 1584 if (!ce_ring) 1585 return; 1586 1587 if (!pci_pipe->buf_sz) 1588 return; 1589 1590 for (i = 0; i < ce_ring->nentries; i++) { 1591 skb = ce_ring->per_transfer_context[i]; 1592 if (!skb) 1593 continue; 1594 1595 ce_ring->per_transfer_context[i] = NULL; 1596 1597 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1598 skb->len + skb_tailroom(skb), 1599 DMA_FROM_DEVICE); 1600 dev_kfree_skb_any(skb); 1601 } 1602 } 1603 1604 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1605 { 1606 struct ath10k *ar; 1607 struct ath10k_pci *ar_pci; 1608 struct ath10k_ce_pipe *ce_pipe; 1609 struct ath10k_ce_ring *ce_ring; 1610 struct sk_buff *skb; 1611 int i; 1612 1613 ar = pci_pipe->hif_ce_state; 1614 ar_pci = ath10k_pci_priv(ar); 1615 ce_pipe = pci_pipe->ce_hdl; 1616 ce_ring = ce_pipe->src_ring; 1617 1618 if (!ce_ring) 1619 return; 1620 1621 if (!pci_pipe->buf_sz) 1622 return; 1623 1624 for (i = 0; i < ce_ring->nentries; i++) { 1625 skb = ce_ring->per_transfer_context[i]; 1626 if (!skb) 1627 continue; 1628 1629 ce_ring->per_transfer_context[i] = NULL; 1630 1631 ath10k_htc_tx_completion_handler(ar, skb); 1632 } 1633 } 1634 1635 /* 1636 * Cleanup residual buffers for device shutdown: 1637 * buffers that were enqueued for receive 1638 * buffers that were to be sent 1639 * Note: Buffers that had completed but which were 1640 * not yet processed are on a completion queue. They 1641 * are handled when the completion thread shuts down. 1642 */ 1643 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 1644 { 1645 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1646 int pipe_num; 1647 1648 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1649 struct ath10k_pci_pipe *pipe_info; 1650 1651 pipe_info = &ar_pci->pipe_info[pipe_num]; 1652 ath10k_pci_rx_pipe_cleanup(pipe_info); 1653 ath10k_pci_tx_pipe_cleanup(pipe_info); 1654 } 1655 } 1656 1657 static void ath10k_pci_ce_deinit(struct ath10k *ar) 1658 { 1659 int i; 1660 1661 for (i = 0; i < CE_COUNT; i++) 1662 ath10k_ce_deinit_pipe(ar, i); 1663 } 1664 1665 static void ath10k_pci_flush(struct ath10k *ar) 1666 { 1667 ath10k_pci_kill_tasklet(ar); 1668 ath10k_pci_buffer_cleanup(ar); 1669 } 1670 1671 static void ath10k_pci_hif_stop(struct ath10k *ar) 1672 { 1673 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1674 unsigned long flags; 1675 1676 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 1677 1678 /* Most likely the device has HTT Rx ring configured. The only way to 1679 * prevent the device from accessing (and possible corrupting) host 1680 * memory is to reset the chip now. 1681 * 1682 * There's also no known way of masking MSI interrupts on the device. 1683 * For ranged MSI the CE-related interrupts can be masked. However 1684 * regardless how many MSI interrupts are assigned the first one 1685 * is always used for firmware indications (crashes) and cannot be 1686 * masked. To prevent the device from asserting the interrupt reset it 1687 * before proceeding with cleanup. 1688 */ 1689 ath10k_pci_safe_chip_reset(ar); 1690 1691 ath10k_pci_irq_disable(ar); 1692 ath10k_pci_irq_sync(ar); 1693 ath10k_pci_flush(ar); 1694 1695 spin_lock_irqsave(&ar_pci->ps_lock, flags); 1696 WARN_ON(ar_pci->ps_wake_refcount > 0); 1697 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 1698 } 1699 1700 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1701 void *req, u32 req_len, 1702 void *resp, u32 *resp_len) 1703 { 1704 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1705 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 1706 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 1707 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 1708 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 1709 dma_addr_t req_paddr = 0; 1710 dma_addr_t resp_paddr = 0; 1711 struct bmi_xfer xfer = {}; 1712 void *treq, *tresp = NULL; 1713 int ret = 0; 1714 1715 might_sleep(); 1716 1717 if (resp && !resp_len) 1718 return -EINVAL; 1719 1720 if (resp && resp_len && *resp_len == 0) 1721 return -EINVAL; 1722 1723 treq = kmemdup(req, req_len, GFP_KERNEL); 1724 if (!treq) 1725 return -ENOMEM; 1726 1727 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 1728 ret = dma_mapping_error(ar->dev, req_paddr); 1729 if (ret) { 1730 ret = -EIO; 1731 goto err_dma; 1732 } 1733 1734 if (resp && resp_len) { 1735 tresp = kzalloc(*resp_len, GFP_KERNEL); 1736 if (!tresp) { 1737 ret = -ENOMEM; 1738 goto err_req; 1739 } 1740 1741 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 1742 DMA_FROM_DEVICE); 1743 ret = dma_mapping_error(ar->dev, resp_paddr); 1744 if (ret) { 1745 ret = EIO; 1746 goto err_req; 1747 } 1748 1749 xfer.wait_for_resp = true; 1750 xfer.resp_len = 0; 1751 1752 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 1753 } 1754 1755 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 1756 if (ret) 1757 goto err_resp; 1758 1759 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); 1760 if (ret) { 1761 u32 unused_buffer; 1762 unsigned int unused_nbytes; 1763 unsigned int unused_id; 1764 1765 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 1766 &unused_nbytes, &unused_id); 1767 } else { 1768 /* non-zero means we did not time out */ 1769 ret = 0; 1770 } 1771 1772 err_resp: 1773 if (resp) { 1774 u32 unused_buffer; 1775 1776 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 1777 dma_unmap_single(ar->dev, resp_paddr, 1778 *resp_len, DMA_FROM_DEVICE); 1779 } 1780 err_req: 1781 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 1782 1783 if (ret == 0 && resp_len) { 1784 *resp_len = min(*resp_len, xfer.resp_len); 1785 memcpy(resp, tresp, xfer.resp_len); 1786 } 1787 err_dma: 1788 kfree(treq); 1789 kfree(tresp); 1790 1791 return ret; 1792 } 1793 1794 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 1795 { 1796 struct bmi_xfer *xfer; 1797 1798 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) 1799 return; 1800 1801 xfer->tx_done = true; 1802 } 1803 1804 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 1805 { 1806 struct ath10k *ar = ce_state->ar; 1807 struct bmi_xfer *xfer; 1808 u32 ce_data; 1809 unsigned int nbytes; 1810 unsigned int transfer_id; 1811 unsigned int flags; 1812 1813 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, 1814 &nbytes, &transfer_id, &flags)) 1815 return; 1816 1817 if (WARN_ON_ONCE(!xfer)) 1818 return; 1819 1820 if (!xfer->wait_for_resp) { 1821 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 1822 return; 1823 } 1824 1825 xfer->resp_len = nbytes; 1826 xfer->rx_done = true; 1827 } 1828 1829 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 1830 struct ath10k_ce_pipe *rx_pipe, 1831 struct bmi_xfer *xfer) 1832 { 1833 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1834 1835 while (time_before_eq(jiffies, timeout)) { 1836 ath10k_pci_bmi_send_done(tx_pipe); 1837 ath10k_pci_bmi_recv_data(rx_pipe); 1838 1839 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) 1840 return 0; 1841 1842 schedule(); 1843 } 1844 1845 return -ETIMEDOUT; 1846 } 1847 1848 /* 1849 * Send an interrupt to the device to wake up the Target CPU 1850 * so it has an opportunity to notice any changed state. 1851 */ 1852 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 1853 { 1854 u32 addr, val; 1855 1856 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; 1857 val = ath10k_pci_read32(ar, addr); 1858 val |= CORE_CTRL_CPU_INTR_MASK; 1859 ath10k_pci_write32(ar, addr, val); 1860 1861 return 0; 1862 } 1863 1864 static int ath10k_pci_get_num_banks(struct ath10k *ar) 1865 { 1866 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1867 1868 switch (ar_pci->pdev->device) { 1869 case QCA988X_2_0_DEVICE_ID: 1870 case QCA99X0_2_0_DEVICE_ID: 1871 return 1; 1872 case QCA6164_2_1_DEVICE_ID: 1873 case QCA6174_2_1_DEVICE_ID: 1874 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { 1875 case QCA6174_HW_1_0_CHIP_ID_REV: 1876 case QCA6174_HW_1_1_CHIP_ID_REV: 1877 case QCA6174_HW_2_1_CHIP_ID_REV: 1878 case QCA6174_HW_2_2_CHIP_ID_REV: 1879 return 3; 1880 case QCA6174_HW_1_3_CHIP_ID_REV: 1881 return 2; 1882 case QCA6174_HW_3_0_CHIP_ID_REV: 1883 case QCA6174_HW_3_1_CHIP_ID_REV: 1884 case QCA6174_HW_3_2_CHIP_ID_REV: 1885 return 9; 1886 } 1887 break; 1888 case QCA9377_1_0_DEVICE_ID: 1889 return 2; 1890 } 1891 1892 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 1893 return 1; 1894 } 1895 1896 static int ath10k_pci_init_config(struct ath10k *ar) 1897 { 1898 u32 interconnect_targ_addr; 1899 u32 pcie_state_targ_addr = 0; 1900 u32 pipe_cfg_targ_addr = 0; 1901 u32 svc_to_pipe_map = 0; 1902 u32 pcie_config_flags = 0; 1903 u32 ealloc_value; 1904 u32 ealloc_targ_addr; 1905 u32 flag2_value; 1906 u32 flag2_targ_addr; 1907 int ret = 0; 1908 1909 /* Download to Target the CE Config and the service-to-CE map */ 1910 interconnect_targ_addr = 1911 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 1912 1913 /* Supply Target-side CE configuration */ 1914 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 1915 &pcie_state_targ_addr); 1916 if (ret != 0) { 1917 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 1918 return ret; 1919 } 1920 1921 if (pcie_state_targ_addr == 0) { 1922 ret = -EIO; 1923 ath10k_err(ar, "Invalid pcie state addr\n"); 1924 return ret; 1925 } 1926 1927 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1928 offsetof(struct pcie_state, 1929 pipe_cfg_addr)), 1930 &pipe_cfg_targ_addr); 1931 if (ret != 0) { 1932 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 1933 return ret; 1934 } 1935 1936 if (pipe_cfg_targ_addr == 0) { 1937 ret = -EIO; 1938 ath10k_err(ar, "Invalid pipe cfg addr\n"); 1939 return ret; 1940 } 1941 1942 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 1943 target_ce_config_wlan, 1944 sizeof(struct ce_pipe_config) * 1945 NUM_TARGET_CE_CONFIG_WLAN); 1946 1947 if (ret != 0) { 1948 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 1949 return ret; 1950 } 1951 1952 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1953 offsetof(struct pcie_state, 1954 svc_to_pipe_map)), 1955 &svc_to_pipe_map); 1956 if (ret != 0) { 1957 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 1958 return ret; 1959 } 1960 1961 if (svc_to_pipe_map == 0) { 1962 ret = -EIO; 1963 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 1964 return ret; 1965 } 1966 1967 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 1968 target_service_to_ce_map_wlan, 1969 sizeof(target_service_to_ce_map_wlan)); 1970 if (ret != 0) { 1971 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 1972 return ret; 1973 } 1974 1975 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1976 offsetof(struct pcie_state, 1977 config_flags)), 1978 &pcie_config_flags); 1979 if (ret != 0) { 1980 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 1981 return ret; 1982 } 1983 1984 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1985 1986 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 1987 offsetof(struct pcie_state, 1988 config_flags)), 1989 pcie_config_flags); 1990 if (ret != 0) { 1991 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 1992 return ret; 1993 } 1994 1995 /* configure early allocation */ 1996 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 1997 1998 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 1999 if (ret != 0) { 2000 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); 2001 return ret; 2002 } 2003 2004 /* first bank is switched to IRAM */ 2005 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2006 HI_EARLY_ALLOC_MAGIC_MASK); 2007 ealloc_value |= ((ath10k_pci_get_num_banks(ar) << 2008 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2009 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2010 2011 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 2012 if (ret != 0) { 2013 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 2014 return ret; 2015 } 2016 2017 /* Tell Target to proceed with initialization */ 2018 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 2019 2020 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 2021 if (ret != 0) { 2022 ath10k_err(ar, "Failed to get option val: %d\n", ret); 2023 return ret; 2024 } 2025 2026 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 2027 2028 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 2029 if (ret != 0) { 2030 ath10k_err(ar, "Failed to set option val: %d\n", ret); 2031 return ret; 2032 } 2033 2034 return 0; 2035 } 2036 2037 static void ath10k_pci_override_ce_config(struct ath10k *ar) 2038 { 2039 struct ce_attr *attr; 2040 struct ce_pipe_config *config; 2041 2042 /* For QCA6174 we're overriding the Copy Engine 5 configuration, 2043 * since it is currently used for other feature. 2044 */ 2045 2046 /* Override Host's Copy Engine 5 configuration */ 2047 attr = &host_ce_config_wlan[5]; 2048 attr->src_sz_max = 0; 2049 attr->dest_nentries = 0; 2050 2051 /* Override Target firmware's Copy Engine configuration */ 2052 config = &target_ce_config_wlan[5]; 2053 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); 2054 config->nbytes_max = __cpu_to_le32(2048); 2055 2056 /* Map from service/endpoint to Copy Engine */ 2057 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); 2058 } 2059 2060 static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2061 { 2062 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2063 struct ath10k_pci_pipe *pipe; 2064 int i, ret; 2065 2066 for (i = 0; i < CE_COUNT; i++) { 2067 pipe = &ar_pci->pipe_info[i]; 2068 pipe->ce_hdl = &ar_pci->ce_states[i]; 2069 pipe->pipe_num = i; 2070 pipe->hif_ce_state = ar; 2071 2072 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); 2073 if (ret) { 2074 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2075 i, ret); 2076 return ret; 2077 } 2078 2079 /* Last CE is Diagnostic Window */ 2080 if (i == CE_DIAG_PIPE) { 2081 ar_pci->ce_diag = pipe->ce_hdl; 2082 continue; 2083 } 2084 2085 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); 2086 } 2087 2088 return 0; 2089 } 2090 2091 static void ath10k_pci_free_pipes(struct ath10k *ar) 2092 { 2093 int i; 2094 2095 for (i = 0; i < CE_COUNT; i++) 2096 ath10k_ce_free_pipe(ar, i); 2097 } 2098 2099 static int ath10k_pci_init_pipes(struct ath10k *ar) 2100 { 2101 int i, ret; 2102 2103 for (i = 0; i < CE_COUNT; i++) { 2104 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); 2105 if (ret) { 2106 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2107 i, ret); 2108 return ret; 2109 } 2110 } 2111 2112 return 0; 2113 } 2114 2115 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2116 { 2117 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2118 FW_IND_EVENT_PENDING; 2119 } 2120 2121 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2122 { 2123 u32 val; 2124 2125 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2126 val &= ~FW_IND_EVENT_PENDING; 2127 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2128 } 2129 2130 /* this function effectively clears target memory controller assert line */ 2131 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2132 { 2133 u32 val; 2134 2135 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2136 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2137 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2138 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2139 2140 msleep(10); 2141 2142 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2143 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2144 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2145 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2146 2147 msleep(10); 2148 } 2149 2150 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2151 { 2152 u32 val; 2153 2154 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2155 2156 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2157 SOC_RESET_CONTROL_ADDRESS); 2158 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2159 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2160 } 2161 2162 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2163 { 2164 u32 val; 2165 2166 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2167 SOC_RESET_CONTROL_ADDRESS); 2168 2169 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2170 val | SOC_RESET_CONTROL_CE_RST_MASK); 2171 msleep(10); 2172 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2173 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2174 } 2175 2176 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2177 { 2178 u32 val; 2179 2180 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2181 SOC_LF_TIMER_CONTROL0_ADDRESS); 2182 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + 2183 SOC_LF_TIMER_CONTROL0_ADDRESS, 2184 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2185 } 2186 2187 static int ath10k_pci_warm_reset(struct ath10k *ar) 2188 { 2189 int ret; 2190 2191 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2192 2193 spin_lock_bh(&ar->data_lock); 2194 ar->stats.fw_warm_reset_counter++; 2195 spin_unlock_bh(&ar->data_lock); 2196 2197 ath10k_pci_irq_disable(ar); 2198 2199 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2200 * were to access copy engine while host performs copy engine reset 2201 * then it is possible for the device to confuse pci-e controller to 2202 * the point of bringing host system to a complete stop (i.e. hang). 2203 */ 2204 ath10k_pci_warm_reset_si0(ar); 2205 ath10k_pci_warm_reset_cpu(ar); 2206 ath10k_pci_init_pipes(ar); 2207 ath10k_pci_wait_for_target_init(ar); 2208 2209 ath10k_pci_warm_reset_clear_lf(ar); 2210 ath10k_pci_warm_reset_ce(ar); 2211 ath10k_pci_warm_reset_cpu(ar); 2212 ath10k_pci_init_pipes(ar); 2213 2214 ret = ath10k_pci_wait_for_target_init(ar); 2215 if (ret) { 2216 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2217 return ret; 2218 } 2219 2220 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2221 2222 return 0; 2223 } 2224 2225 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2226 { 2227 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { 2228 return ath10k_pci_warm_reset(ar); 2229 } else if (QCA_REV_99X0(ar)) { 2230 ath10k_pci_irq_disable(ar); 2231 return ath10k_pci_qca99x0_chip_reset(ar); 2232 } else { 2233 return -ENOTSUPP; 2234 } 2235 } 2236 2237 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2238 { 2239 int i, ret; 2240 u32 val; 2241 2242 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2243 2244 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2245 * It is thus preferred to use warm reset which is safer but may not be 2246 * able to recover the device from all possible fail scenarios. 2247 * 2248 * Warm reset doesn't always work on first try so attempt it a few 2249 * times before giving up. 2250 */ 2251 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2252 ret = ath10k_pci_warm_reset(ar); 2253 if (ret) { 2254 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2255 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2256 ret); 2257 continue; 2258 } 2259 2260 /* FIXME: Sometimes copy engine doesn't recover after warm 2261 * reset. In most cases this needs cold reset. In some of these 2262 * cases the device is in such a state that a cold reset may 2263 * lock up the host. 2264 * 2265 * Reading any host interest register via copy engine is 2266 * sufficient to verify if device is capable of booting 2267 * firmware blob. 2268 */ 2269 ret = ath10k_pci_init_pipes(ar); 2270 if (ret) { 2271 ath10k_warn(ar, "failed to init copy engine: %d\n", 2272 ret); 2273 continue; 2274 } 2275 2276 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2277 &val); 2278 if (ret) { 2279 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2280 ret); 2281 continue; 2282 } 2283 2284 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2285 return 0; 2286 } 2287 2288 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2289 ath10k_warn(ar, "refusing cold reset as requested\n"); 2290 return -EPERM; 2291 } 2292 2293 ret = ath10k_pci_cold_reset(ar); 2294 if (ret) { 2295 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2296 return ret; 2297 } 2298 2299 ret = ath10k_pci_wait_for_target_init(ar); 2300 if (ret) { 2301 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2302 ret); 2303 return ret; 2304 } 2305 2306 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2307 2308 return 0; 2309 } 2310 2311 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2312 { 2313 int ret; 2314 2315 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2316 2317 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2318 2319 ret = ath10k_pci_cold_reset(ar); 2320 if (ret) { 2321 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2322 return ret; 2323 } 2324 2325 ret = ath10k_pci_wait_for_target_init(ar); 2326 if (ret) { 2327 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2328 ret); 2329 return ret; 2330 } 2331 2332 ret = ath10k_pci_warm_reset(ar); 2333 if (ret) { 2334 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2335 return ret; 2336 } 2337 2338 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2339 2340 return 0; 2341 } 2342 2343 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2344 { 2345 int ret; 2346 2347 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2348 2349 ret = ath10k_pci_cold_reset(ar); 2350 if (ret) { 2351 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2352 return ret; 2353 } 2354 2355 ret = ath10k_pci_wait_for_target_init(ar); 2356 if (ret) { 2357 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2358 ret); 2359 return ret; 2360 } 2361 2362 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2363 2364 return 0; 2365 } 2366 2367 static int ath10k_pci_chip_reset(struct ath10k *ar) 2368 { 2369 if (QCA_REV_988X(ar)) 2370 return ath10k_pci_qca988x_chip_reset(ar); 2371 else if (QCA_REV_6174(ar)) 2372 return ath10k_pci_qca6174_chip_reset(ar); 2373 else if (QCA_REV_9377(ar)) 2374 return ath10k_pci_qca6174_chip_reset(ar); 2375 else if (QCA_REV_99X0(ar)) 2376 return ath10k_pci_qca99x0_chip_reset(ar); 2377 else 2378 return -ENOTSUPP; 2379 } 2380 2381 static int ath10k_pci_hif_power_up(struct ath10k *ar) 2382 { 2383 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2384 int ret; 2385 2386 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2387 2388 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2389 &ar_pci->link_ctl); 2390 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2391 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 2392 2393 /* 2394 * Bring the target up cleanly. 2395 * 2396 * The target may be in an undefined state with an AUX-powered Target 2397 * and a Host in WoW mode. If the Host crashes, loses power, or is 2398 * restarted (without unloading the driver) then the Target is left 2399 * (aux) powered and running. On a subsequent driver load, the Target 2400 * is in an unexpected state. We try to catch that here in order to 2401 * reset the Target and retry the probe. 2402 */ 2403 ret = ath10k_pci_chip_reset(ar); 2404 if (ret) { 2405 if (ath10k_pci_has_fw_crashed(ar)) { 2406 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2407 ath10k_pci_fw_crashed_clear(ar); 2408 ath10k_pci_fw_crashed_dump(ar); 2409 } 2410 2411 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2412 goto err_sleep; 2413 } 2414 2415 ret = ath10k_pci_init_pipes(ar); 2416 if (ret) { 2417 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2418 goto err_sleep; 2419 } 2420 2421 ret = ath10k_pci_init_config(ar); 2422 if (ret) { 2423 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2424 goto err_ce; 2425 } 2426 2427 ret = ath10k_pci_wake_target_cpu(ar); 2428 if (ret) { 2429 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2430 goto err_ce; 2431 } 2432 2433 return 0; 2434 2435 err_ce: 2436 ath10k_pci_ce_deinit(ar); 2437 2438 err_sleep: 2439 return ret; 2440 } 2441 2442 static void ath10k_pci_hif_power_down(struct ath10k *ar) 2443 { 2444 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2445 2446 /* Currently hif_power_up performs effectively a reset and hif_stop 2447 * resets the chip as well so there's no point in resetting here. 2448 */ 2449 } 2450 2451 #ifdef CONFIG_PM 2452 2453 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2454 { 2455 /* The grace timer can still be counting down and ar->ps_awake be true. 2456 * It is known that the device may be asleep after resuming regardless 2457 * of the SoC powersave state before suspending. Hence make sure the 2458 * device is asleep before proceeding. 2459 */ 2460 ath10k_pci_sleep_sync(ar); 2461 2462 return 0; 2463 } 2464 2465 static int ath10k_pci_hif_resume(struct ath10k *ar) 2466 { 2467 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2468 struct pci_dev *pdev = ar_pci->pdev; 2469 u32 val; 2470 int ret = 0; 2471 2472 if (ar_pci->pci_ps == 0) { 2473 ret = ath10k_pci_force_wake(ar); 2474 if (ret) { 2475 ath10k_err(ar, "failed to wake up target: %d\n", ret); 2476 return ret; 2477 } 2478 } 2479 2480 /* Suspend/Resume resets the PCI configuration space, so we have to 2481 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2482 * from interfering with C3 CPU state. pci_restore_state won't help 2483 * here since it only restores the first 64 bytes pci config header. 2484 */ 2485 pci_read_config_dword(pdev, 0x40, &val); 2486 if ((val & 0x0000ff00) != 0) 2487 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2488 2489 return ret; 2490 } 2491 #endif 2492 2493 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2494 .tx_sg = ath10k_pci_hif_tx_sg, 2495 .diag_read = ath10k_pci_hif_diag_read, 2496 .diag_write = ath10k_pci_diag_write_mem, 2497 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2498 .start = ath10k_pci_hif_start, 2499 .stop = ath10k_pci_hif_stop, 2500 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 2501 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 2502 .send_complete_check = ath10k_pci_hif_send_complete_check, 2503 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 2504 .power_up = ath10k_pci_hif_power_up, 2505 .power_down = ath10k_pci_hif_power_down, 2506 .read32 = ath10k_pci_read32, 2507 .write32 = ath10k_pci_write32, 2508 #ifdef CONFIG_PM 2509 .suspend = ath10k_pci_hif_suspend, 2510 .resume = ath10k_pci_hif_resume, 2511 #endif 2512 }; 2513 2514 static void ath10k_pci_ce_tasklet(unsigned long ptr) 2515 { 2516 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; 2517 struct ath10k_pci *ar_pci = pipe->ar_pci; 2518 2519 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); 2520 } 2521 2522 static void ath10k_msi_err_tasklet(unsigned long data) 2523 { 2524 struct ath10k *ar = (struct ath10k *)data; 2525 2526 if (!ath10k_pci_has_fw_crashed(ar)) { 2527 ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); 2528 return; 2529 } 2530 2531 ath10k_pci_irq_disable(ar); 2532 ath10k_pci_fw_crashed_clear(ar); 2533 ath10k_pci_fw_crashed_dump(ar); 2534 } 2535 2536 /* 2537 * Handler for a per-engine interrupt on a PARTICULAR CE. 2538 * This is used in cases where each CE has a private MSI interrupt. 2539 */ 2540 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) 2541 { 2542 struct ath10k *ar = arg; 2543 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2544 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; 2545 2546 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { 2547 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, 2548 ce_id); 2549 return IRQ_HANDLED; 2550 } 2551 2552 /* 2553 * NOTE: We are able to derive ce_id from irq because we 2554 * use a one-to-one mapping for CE's 0..5. 2555 * CE's 6 & 7 do not use interrupts at all. 2556 * 2557 * This mapping must be kept in sync with the mapping 2558 * used by firmware. 2559 */ 2560 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); 2561 return IRQ_HANDLED; 2562 } 2563 2564 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) 2565 { 2566 struct ath10k *ar = arg; 2567 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2568 2569 tasklet_schedule(&ar_pci->msi_fw_err); 2570 return IRQ_HANDLED; 2571 } 2572 2573 /* 2574 * Top-level interrupt handler for all PCI interrupts from a Target. 2575 * When a block of MSI interrupts is allocated, this top-level handler 2576 * is not used; instead, we directly call the correct sub-handler. 2577 */ 2578 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 2579 { 2580 struct ath10k *ar = arg; 2581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2582 int ret; 2583 2584 if (ar_pci->pci_ps == 0) { 2585 ret = ath10k_pci_force_wake(ar); 2586 if (ret) { 2587 ath10k_warn(ar, "failed to wake device up on irq: %d\n", 2588 ret); 2589 return IRQ_NONE; 2590 } 2591 } 2592 2593 if (ar_pci->num_msi_intrs == 0) { 2594 if (!ath10k_pci_irq_pending(ar)) 2595 return IRQ_NONE; 2596 2597 ath10k_pci_disable_and_clear_legacy_irq(ar); 2598 } 2599 2600 tasklet_schedule(&ar_pci->intr_tq); 2601 2602 return IRQ_HANDLED; 2603 } 2604 2605 static void ath10k_pci_tasklet(unsigned long data) 2606 { 2607 struct ath10k *ar = (struct ath10k *)data; 2608 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2609 2610 if (ath10k_pci_has_fw_crashed(ar)) { 2611 ath10k_pci_irq_disable(ar); 2612 ath10k_pci_fw_crashed_clear(ar); 2613 ath10k_pci_fw_crashed_dump(ar); 2614 return; 2615 } 2616 2617 ath10k_ce_per_engine_service_any(ar); 2618 2619 /* Re-enable legacy irq that was disabled in the irq handler */ 2620 if (ar_pci->num_msi_intrs == 0) 2621 ath10k_pci_enable_legacy_irq(ar); 2622 } 2623 2624 static int ath10k_pci_request_irq_msix(struct ath10k *ar) 2625 { 2626 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2627 int ret, i; 2628 2629 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, 2630 ath10k_pci_msi_fw_handler, 2631 IRQF_SHARED, "ath10k_pci", ar); 2632 if (ret) { 2633 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", 2634 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); 2635 return ret; 2636 } 2637 2638 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { 2639 ret = request_irq(ar_pci->pdev->irq + i, 2640 ath10k_pci_per_engine_handler, 2641 IRQF_SHARED, "ath10k_pci", ar); 2642 if (ret) { 2643 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", 2644 ar_pci->pdev->irq + i, ret); 2645 2646 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) 2647 free_irq(ar_pci->pdev->irq + i, ar); 2648 2649 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); 2650 return ret; 2651 } 2652 } 2653 2654 return 0; 2655 } 2656 2657 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 2658 { 2659 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2660 int ret; 2661 2662 ret = request_irq(ar_pci->pdev->irq, 2663 ath10k_pci_interrupt_handler, 2664 IRQF_SHARED, "ath10k_pci", ar); 2665 if (ret) { 2666 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 2667 ar_pci->pdev->irq, ret); 2668 return ret; 2669 } 2670 2671 return 0; 2672 } 2673 2674 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) 2675 { 2676 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2677 int ret; 2678 2679 ret = request_irq(ar_pci->pdev->irq, 2680 ath10k_pci_interrupt_handler, 2681 IRQF_SHARED, "ath10k_pci", ar); 2682 if (ret) { 2683 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 2684 ar_pci->pdev->irq, ret); 2685 return ret; 2686 } 2687 2688 return 0; 2689 } 2690 2691 static int ath10k_pci_request_irq(struct ath10k *ar) 2692 { 2693 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2694 2695 switch (ar_pci->num_msi_intrs) { 2696 case 0: 2697 return ath10k_pci_request_irq_legacy(ar); 2698 case 1: 2699 return ath10k_pci_request_irq_msi(ar); 2700 default: 2701 return ath10k_pci_request_irq_msix(ar); 2702 } 2703 } 2704 2705 static void ath10k_pci_free_irq(struct ath10k *ar) 2706 { 2707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2708 int i; 2709 2710 /* There's at least one interrupt irregardless whether its legacy INTR 2711 * or MSI or MSI-X */ 2712 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 2713 free_irq(ar_pci->pdev->irq + i, ar); 2714 } 2715 2716 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) 2717 { 2718 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2719 int i; 2720 2721 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); 2722 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, 2723 (unsigned long)ar); 2724 2725 for (i = 0; i < CE_COUNT; i++) { 2726 ar_pci->pipe_info[i].ar_pci = ar_pci; 2727 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, 2728 (unsigned long)&ar_pci->pipe_info[i]); 2729 } 2730 } 2731 2732 static int ath10k_pci_init_irq(struct ath10k *ar) 2733 { 2734 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2735 int ret; 2736 2737 ath10k_pci_init_irq_tasklets(ar); 2738 2739 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 2740 ath10k_info(ar, "limiting irq mode to: %d\n", 2741 ath10k_pci_irq_mode); 2742 2743 /* Try MSI-X */ 2744 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { 2745 ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1; 2746 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, 2747 ar_pci->num_msi_intrs); 2748 if (ret > 0) 2749 return 0; 2750 2751 /* fall-through */ 2752 } 2753 2754 /* Try MSI */ 2755 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { 2756 ar_pci->num_msi_intrs = 1; 2757 ret = pci_enable_msi(ar_pci->pdev); 2758 if (ret == 0) 2759 return 0; 2760 2761 /* fall-through */ 2762 } 2763 2764 /* Try legacy irq 2765 * 2766 * A potential race occurs here: The CORE_BASE write 2767 * depends on target correctly decoding AXI address but 2768 * host won't know when target writes BAR to CORE_CTRL. 2769 * This write might get lost if target has NOT written BAR. 2770 * For now, fix the race by repeating the write in below 2771 * synchronization checking. */ 2772 ar_pci->num_msi_intrs = 0; 2773 2774 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2775 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 2776 2777 return 0; 2778 } 2779 2780 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) 2781 { 2782 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2783 0); 2784 } 2785 2786 static int ath10k_pci_deinit_irq(struct ath10k *ar) 2787 { 2788 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2789 2790 switch (ar_pci->num_msi_intrs) { 2791 case 0: 2792 ath10k_pci_deinit_irq_legacy(ar); 2793 break; 2794 default: 2795 pci_disable_msi(ar_pci->pdev); 2796 break; 2797 } 2798 2799 return 0; 2800 } 2801 2802 static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2803 { 2804 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2805 unsigned long timeout; 2806 u32 val; 2807 2808 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 2809 2810 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 2811 2812 do { 2813 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2814 2815 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 2816 val); 2817 2818 /* target should never return this */ 2819 if (val == 0xffffffff) 2820 continue; 2821 2822 /* the device has crashed so don't bother trying anymore */ 2823 if (val & FW_IND_EVENT_PENDING) 2824 break; 2825 2826 if (val & FW_IND_INITIALIZED) 2827 break; 2828 2829 if (ar_pci->num_msi_intrs == 0) 2830 /* Fix potential race by repeating CORE_BASE writes */ 2831 ath10k_pci_enable_legacy_irq(ar); 2832 2833 mdelay(10); 2834 } while (time_before(jiffies, timeout)); 2835 2836 ath10k_pci_disable_and_clear_legacy_irq(ar); 2837 ath10k_pci_irq_msi_fw_mask(ar); 2838 2839 if (val == 0xffffffff) { 2840 ath10k_err(ar, "failed to read device register, device is gone\n"); 2841 return -EIO; 2842 } 2843 2844 if (val & FW_IND_EVENT_PENDING) { 2845 ath10k_warn(ar, "device has crashed during init\n"); 2846 return -ECOMM; 2847 } 2848 2849 if (!(val & FW_IND_INITIALIZED)) { 2850 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 2851 val); 2852 return -ETIMEDOUT; 2853 } 2854 2855 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 2856 return 0; 2857 } 2858 2859 static int ath10k_pci_cold_reset(struct ath10k *ar) 2860 { 2861 u32 val; 2862 2863 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 2864 2865 spin_lock_bh(&ar->data_lock); 2866 2867 ar->stats.fw_cold_reset_counter++; 2868 2869 spin_unlock_bh(&ar->data_lock); 2870 2871 /* Put Target, including PCIe, into RESET. */ 2872 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 2873 val |= 1; 2874 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2875 2876 /* After writing into SOC_GLOBAL_RESET to put device into 2877 * reset and pulling out of reset pcie may not be stable 2878 * for any immediate pcie register access and cause bus error, 2879 * add delay before any pcie access request to fix this issue. 2880 */ 2881 msleep(20); 2882 2883 /* Pull Target, including PCIe, out of RESET. */ 2884 val &= ~1; 2885 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2886 2887 msleep(20); 2888 2889 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 2890 2891 return 0; 2892 } 2893 2894 static int ath10k_pci_claim(struct ath10k *ar) 2895 { 2896 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2897 struct pci_dev *pdev = ar_pci->pdev; 2898 int ret; 2899 2900 pci_set_drvdata(pdev, ar); 2901 2902 ret = pci_enable_device(pdev); 2903 if (ret) { 2904 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 2905 return ret; 2906 } 2907 2908 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2909 if (ret) { 2910 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 2911 ret); 2912 goto err_device; 2913 } 2914 2915 /* Target expects 32 bit DMA. Enforce it. */ 2916 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2917 if (ret) { 2918 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 2919 goto err_region; 2920 } 2921 2922 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2923 if (ret) { 2924 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", 2925 ret); 2926 goto err_region; 2927 } 2928 2929 pci_set_master(pdev); 2930 2931 /* Arrange for access to Target SoC registers. */ 2932 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 2933 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 2934 if (!ar_pci->mem) { 2935 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 2936 ret = -EIO; 2937 goto err_master; 2938 } 2939 2940 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2941 return 0; 2942 2943 err_master: 2944 pci_clear_master(pdev); 2945 2946 err_region: 2947 pci_release_region(pdev, BAR_NUM); 2948 2949 err_device: 2950 pci_disable_device(pdev); 2951 2952 return ret; 2953 } 2954 2955 static void ath10k_pci_release(struct ath10k *ar) 2956 { 2957 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2958 struct pci_dev *pdev = ar_pci->pdev; 2959 2960 pci_iounmap(pdev, ar_pci->mem); 2961 pci_release_region(pdev, BAR_NUM); 2962 pci_clear_master(pdev); 2963 pci_disable_device(pdev); 2964 } 2965 2966 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 2967 { 2968 const struct ath10k_pci_supp_chip *supp_chip; 2969 int i; 2970 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 2971 2972 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 2973 supp_chip = &ath10k_pci_supp_chips[i]; 2974 2975 if (supp_chip->dev_id == dev_id && 2976 supp_chip->rev_id == rev_id) 2977 return true; 2978 } 2979 2980 return false; 2981 } 2982 2983 static int ath10k_pci_probe(struct pci_dev *pdev, 2984 const struct pci_device_id *pci_dev) 2985 { 2986 int ret = 0; 2987 struct ath10k *ar; 2988 struct ath10k_pci *ar_pci; 2989 enum ath10k_hw_rev hw_rev; 2990 u32 chip_id; 2991 bool pci_ps; 2992 2993 switch (pci_dev->device) { 2994 case QCA988X_2_0_DEVICE_ID: 2995 hw_rev = ATH10K_HW_QCA988X; 2996 pci_ps = false; 2997 break; 2998 case QCA6164_2_1_DEVICE_ID: 2999 case QCA6174_2_1_DEVICE_ID: 3000 hw_rev = ATH10K_HW_QCA6174; 3001 pci_ps = true; 3002 break; 3003 case QCA99X0_2_0_DEVICE_ID: 3004 hw_rev = ATH10K_HW_QCA99X0; 3005 pci_ps = false; 3006 break; 3007 case QCA9377_1_0_DEVICE_ID: 3008 hw_rev = ATH10K_HW_QCA9377; 3009 pci_ps = true; 3010 break; 3011 default: 3012 WARN_ON(1); 3013 return -ENOTSUPP; 3014 } 3015 3016 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 3017 hw_rev, &ath10k_pci_hif_ops); 3018 if (!ar) { 3019 dev_err(&pdev->dev, "failed to allocate core\n"); 3020 return -ENOMEM; 3021 } 3022 3023 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 3024 pdev->vendor, pdev->device, 3025 pdev->subsystem_vendor, pdev->subsystem_device); 3026 3027 ar_pci = ath10k_pci_priv(ar); 3028 ar_pci->pdev = pdev; 3029 ar_pci->dev = &pdev->dev; 3030 ar_pci->ar = ar; 3031 ar->dev_id = pci_dev->device; 3032 ar_pci->pci_ps = pci_ps; 3033 3034 ar->id.vendor = pdev->vendor; 3035 ar->id.device = pdev->device; 3036 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3037 ar->id.subsystem_device = pdev->subsystem_device; 3038 3039 spin_lock_init(&ar_pci->ce_lock); 3040 spin_lock_init(&ar_pci->ps_lock); 3041 3042 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 3043 (unsigned long)ar); 3044 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, 3045 (unsigned long)ar); 3046 3047 ret = ath10k_pci_claim(ar); 3048 if (ret) { 3049 ath10k_err(ar, "failed to claim device: %d\n", ret); 3050 goto err_core_destroy; 3051 } 3052 3053 if (QCA_REV_6174(ar)) 3054 ath10k_pci_override_ce_config(ar); 3055 3056 ret = ath10k_pci_alloc_pipes(ar); 3057 if (ret) { 3058 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3059 ret); 3060 goto err_sleep; 3061 } 3062 3063 ath10k_pci_ce_deinit(ar); 3064 ath10k_pci_irq_disable(ar); 3065 3066 if (ar_pci->pci_ps == 0) { 3067 ret = ath10k_pci_force_wake(ar); 3068 if (ret) { 3069 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3070 goto err_free_pipes; 3071 } 3072 } 3073 3074 ret = ath10k_pci_init_irq(ar); 3075 if (ret) { 3076 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3077 goto err_free_pipes; 3078 } 3079 3080 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", 3081 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, 3082 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 3083 3084 ret = ath10k_pci_request_irq(ar); 3085 if (ret) { 3086 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 3087 goto err_deinit_irq; 3088 } 3089 3090 ret = ath10k_pci_chip_reset(ar); 3091 if (ret) { 3092 ath10k_err(ar, "failed to reset chip: %d\n", ret); 3093 goto err_free_irq; 3094 } 3095 3096 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3097 if (chip_id == 0xffffffff) { 3098 ath10k_err(ar, "failed to get chip id\n"); 3099 goto err_free_irq; 3100 } 3101 3102 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { 3103 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3104 pdev->device, chip_id); 3105 goto err_free_irq; 3106 } 3107 3108 ret = ath10k_core_register(ar, chip_id); 3109 if (ret) { 3110 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3111 goto err_free_irq; 3112 } 3113 3114 return 0; 3115 3116 err_free_irq: 3117 ath10k_pci_free_irq(ar); 3118 ath10k_pci_kill_tasklet(ar); 3119 3120 err_deinit_irq: 3121 ath10k_pci_deinit_irq(ar); 3122 3123 err_free_pipes: 3124 ath10k_pci_free_pipes(ar); 3125 3126 err_sleep: 3127 ath10k_pci_sleep_sync(ar); 3128 ath10k_pci_release(ar); 3129 3130 err_core_destroy: 3131 ath10k_core_destroy(ar); 3132 3133 return ret; 3134 } 3135 3136 static void ath10k_pci_remove(struct pci_dev *pdev) 3137 { 3138 struct ath10k *ar = pci_get_drvdata(pdev); 3139 struct ath10k_pci *ar_pci; 3140 3141 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3142 3143 if (!ar) 3144 return; 3145 3146 ar_pci = ath10k_pci_priv(ar); 3147 3148 if (!ar_pci) 3149 return; 3150 3151 ath10k_core_unregister(ar); 3152 ath10k_pci_free_irq(ar); 3153 ath10k_pci_kill_tasklet(ar); 3154 ath10k_pci_deinit_irq(ar); 3155 ath10k_pci_ce_deinit(ar); 3156 ath10k_pci_free_pipes(ar); 3157 ath10k_pci_sleep_sync(ar); 3158 ath10k_pci_release(ar); 3159 ath10k_core_destroy(ar); 3160 } 3161 3162 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3163 3164 static struct pci_driver ath10k_pci_driver = { 3165 .name = "ath10k_pci", 3166 .id_table = ath10k_pci_id_table, 3167 .probe = ath10k_pci_probe, 3168 .remove = ath10k_pci_remove, 3169 }; 3170 3171 static int __init ath10k_pci_init(void) 3172 { 3173 int ret; 3174 3175 ret = pci_register_driver(&ath10k_pci_driver); 3176 if (ret) 3177 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3178 ret); 3179 3180 return ret; 3181 } 3182 module_init(ath10k_pci_init); 3183 3184 static void __exit ath10k_pci_exit(void) 3185 { 3186 pci_unregister_driver(&ath10k_pci_driver); 3187 } 3188 3189 module_exit(ath10k_pci_exit); 3190 3191 MODULE_AUTHOR("Qualcomm Atheros"); 3192 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 3193 MODULE_LICENSE("Dual BSD/GPL"); 3194 3195 /* QCA988x 2.0 firmware files */ 3196 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 3197 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3198 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3199 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3200 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3201 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3202 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3203 3204 /* QCA6174 2.1 firmware files */ 3205 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3206 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3207 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); 3208 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3209 3210 /* QCA6174 3.1 firmware files */ 3211 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3212 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3213 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); 3214 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3215 3216 /* QCA9377 1.0 firmware files */ 3217 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3218 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); 3219