1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 * 4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 5 * 6 * Contact Information: 7 * Intel Linux Wireless <ilw@linux.intel.com> 8 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 9 *****************************************************************************/ 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/etherdevice.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/types.h> 17 #include <linux/lockdep.h> 18 #include <linux/pci.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/delay.h> 21 #include <linux/skbuff.h> 22 #include <net/mac80211.h> 23 24 #include "common.h" 25 26 int 27 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) 28 { 29 const int interval = 10; /* microseconds */ 30 int t = 0; 31 32 do { 33 if ((_il_rd(il, addr) & mask) == (bits & mask)) 34 return t; 35 udelay(interval); 36 t += interval; 37 } while (t < timeout); 38 39 return -ETIMEDOUT; 40 } 41 EXPORT_SYMBOL(_il_poll_bit); 42 43 void 44 il_set_bit(struct il_priv *p, u32 r, u32 m) 45 { 46 unsigned long reg_flags; 47 48 spin_lock_irqsave(&p->reg_lock, reg_flags); 49 _il_set_bit(p, r, m); 50 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 51 } 52 EXPORT_SYMBOL(il_set_bit); 53 54 void 55 il_clear_bit(struct il_priv *p, u32 r, u32 m) 56 { 57 unsigned long reg_flags; 58 59 spin_lock_irqsave(&p->reg_lock, reg_flags); 60 _il_clear_bit(p, r, m); 61 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 62 } 63 EXPORT_SYMBOL(il_clear_bit); 64 65 bool 66 _il_grab_nic_access(struct il_priv *il) 67 { 68 int ret; 69 u32 val; 70 71 /* this bit wakes up the NIC */ 72 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 73 74 /* 75 * These bits say the device is running, and should keep running for 76 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 77 * but they do not indicate that embedded SRAM is restored yet; 78 * 3945 and 4965 have volatile SRAM, and must save/restore contents 79 * to/from host DRAM when sleeping/waking for power-saving. 80 * Each direction takes approximately 1/4 millisecond; with this 81 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 82 * series of register accesses are expected (e.g. reading Event Log), 83 * to keep device from sleeping. 84 * 85 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 86 * SRAM is okay/restored. We don't check that here because this call 87 * is just for hardware register access; but GP1 MAC_SLEEP check is a 88 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 89 * 90 */ 91 ret = 92 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 93 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 94 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 95 if (unlikely(ret < 0)) { 96 val = _il_rd(il, CSR_GP_CNTRL); 97 WARN_ONCE(1, "Timeout waiting for ucode processor access " 98 "(CSR_GP_CNTRL 0x%08x)\n", val); 99 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 100 return false; 101 } 102 103 return true; 104 } 105 EXPORT_SYMBOL_GPL(_il_grab_nic_access); 106 107 int 108 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) 109 { 110 const int interval = 10; /* microseconds */ 111 int t = 0; 112 113 do { 114 if ((il_rd(il, addr) & mask) == mask) 115 return t; 116 udelay(interval); 117 t += interval; 118 } while (t < timeout); 119 120 return -ETIMEDOUT; 121 } 122 EXPORT_SYMBOL(il_poll_bit); 123 124 u32 125 il_rd_prph(struct il_priv *il, u32 reg) 126 { 127 unsigned long reg_flags; 128 u32 val; 129 130 spin_lock_irqsave(&il->reg_lock, reg_flags); 131 _il_grab_nic_access(il); 132 val = _il_rd_prph(il, reg); 133 _il_release_nic_access(il); 134 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 135 return val; 136 } 137 EXPORT_SYMBOL(il_rd_prph); 138 139 void 140 il_wr_prph(struct il_priv *il, u32 addr, u32 val) 141 { 142 unsigned long reg_flags; 143 144 spin_lock_irqsave(&il->reg_lock, reg_flags); 145 if (likely(_il_grab_nic_access(il))) { 146 _il_wr_prph(il, addr, val); 147 _il_release_nic_access(il); 148 } 149 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 150 } 151 EXPORT_SYMBOL(il_wr_prph); 152 153 u32 154 il_read_targ_mem(struct il_priv *il, u32 addr) 155 { 156 unsigned long reg_flags; 157 u32 value; 158 159 spin_lock_irqsave(&il->reg_lock, reg_flags); 160 _il_grab_nic_access(il); 161 162 _il_wr(il, HBUS_TARG_MEM_RADDR, addr); 163 value = _il_rd(il, HBUS_TARG_MEM_RDAT); 164 165 _il_release_nic_access(il); 166 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 167 return value; 168 } 169 EXPORT_SYMBOL(il_read_targ_mem); 170 171 void 172 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) 173 { 174 unsigned long reg_flags; 175 176 spin_lock_irqsave(&il->reg_lock, reg_flags); 177 if (likely(_il_grab_nic_access(il))) { 178 _il_wr(il, HBUS_TARG_MEM_WADDR, addr); 179 _il_wr(il, HBUS_TARG_MEM_WDAT, val); 180 _il_release_nic_access(il); 181 } 182 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 183 } 184 EXPORT_SYMBOL(il_write_targ_mem); 185 186 const char * 187 il_get_cmd_string(u8 cmd) 188 { 189 switch (cmd) { 190 IL_CMD(N_ALIVE); 191 IL_CMD(N_ERROR); 192 IL_CMD(C_RXON); 193 IL_CMD(C_RXON_ASSOC); 194 IL_CMD(C_QOS_PARAM); 195 IL_CMD(C_RXON_TIMING); 196 IL_CMD(C_ADD_STA); 197 IL_CMD(C_REM_STA); 198 IL_CMD(C_WEPKEY); 199 IL_CMD(N_3945_RX); 200 IL_CMD(C_TX); 201 IL_CMD(C_RATE_SCALE); 202 IL_CMD(C_LEDS); 203 IL_CMD(C_TX_LINK_QUALITY_CMD); 204 IL_CMD(C_CHANNEL_SWITCH); 205 IL_CMD(N_CHANNEL_SWITCH); 206 IL_CMD(C_SPECTRUM_MEASUREMENT); 207 IL_CMD(N_SPECTRUM_MEASUREMENT); 208 IL_CMD(C_POWER_TBL); 209 IL_CMD(N_PM_SLEEP); 210 IL_CMD(N_PM_DEBUG_STATS); 211 IL_CMD(C_SCAN); 212 IL_CMD(C_SCAN_ABORT); 213 IL_CMD(N_SCAN_START); 214 IL_CMD(N_SCAN_RESULTS); 215 IL_CMD(N_SCAN_COMPLETE); 216 IL_CMD(N_BEACON); 217 IL_CMD(C_TX_BEACON); 218 IL_CMD(C_TX_PWR_TBL); 219 IL_CMD(C_BT_CONFIG); 220 IL_CMD(C_STATS); 221 IL_CMD(N_STATS); 222 IL_CMD(N_CARD_STATE); 223 IL_CMD(N_MISSED_BEACONS); 224 IL_CMD(C_CT_KILL_CONFIG); 225 IL_CMD(C_SENSITIVITY); 226 IL_CMD(C_PHY_CALIBRATION); 227 IL_CMD(N_RX_PHY); 228 IL_CMD(N_RX_MPDU); 229 IL_CMD(N_RX); 230 IL_CMD(N_COMPRESSED_BA); 231 default: 232 return "UNKNOWN"; 233 234 } 235 } 236 EXPORT_SYMBOL(il_get_cmd_string); 237 238 #define HOST_COMPLETE_TIMEOUT (HZ / 2) 239 240 static void 241 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, 242 struct il_rx_pkt *pkt) 243 { 244 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 245 IL_ERR("Bad return from %s (0x%08X)\n", 246 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 247 return; 248 } 249 #ifdef CONFIG_IWLEGACY_DEBUG 250 switch (cmd->hdr.cmd) { 251 case C_TX_LINK_QUALITY_CMD: 252 case C_SENSITIVITY: 253 D_HC_DUMP("back from %s (0x%08X)\n", 254 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 255 break; 256 default: 257 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), 258 pkt->hdr.flags); 259 } 260 #endif 261 } 262 263 static int 264 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) 265 { 266 int ret; 267 268 BUG_ON(!(cmd->flags & CMD_ASYNC)); 269 270 /* An asynchronous command can not expect an SKB to be set. */ 271 BUG_ON(cmd->flags & CMD_WANT_SKB); 272 273 /* Assign a generic callback if one is not provided */ 274 if (!cmd->callback) 275 cmd->callback = il_generic_cmd_callback; 276 277 if (test_bit(S_EXIT_PENDING, &il->status)) 278 return -EBUSY; 279 280 ret = il_enqueue_hcmd(il, cmd); 281 if (ret < 0) { 282 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 283 il_get_cmd_string(cmd->id), ret); 284 return ret; 285 } 286 return 0; 287 } 288 289 int 290 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) 291 { 292 int cmd_idx; 293 int ret; 294 295 lockdep_assert_held(&il->mutex); 296 297 BUG_ON(cmd->flags & CMD_ASYNC); 298 299 /* A synchronous command can not have a callback set. */ 300 BUG_ON(cmd->callback); 301 302 D_INFO("Attempting to send sync command %s\n", 303 il_get_cmd_string(cmd->id)); 304 305 set_bit(S_HCMD_ACTIVE, &il->status); 306 D_INFO("Setting HCMD_ACTIVE for command %s\n", 307 il_get_cmd_string(cmd->id)); 308 309 cmd_idx = il_enqueue_hcmd(il, cmd); 310 if (cmd_idx < 0) { 311 ret = cmd_idx; 312 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 313 il_get_cmd_string(cmd->id), ret); 314 goto out; 315 } 316 317 ret = wait_event_timeout(il->wait_command_queue, 318 !test_bit(S_HCMD_ACTIVE, &il->status), 319 HOST_COMPLETE_TIMEOUT); 320 if (!ret) { 321 if (test_bit(S_HCMD_ACTIVE, &il->status)) { 322 IL_ERR("Error sending %s: time out after %dms.\n", 323 il_get_cmd_string(cmd->id), 324 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 325 326 clear_bit(S_HCMD_ACTIVE, &il->status); 327 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 328 il_get_cmd_string(cmd->id)); 329 ret = -ETIMEDOUT; 330 goto cancel; 331 } 332 } 333 334 if (test_bit(S_RFKILL, &il->status)) { 335 IL_ERR("Command %s aborted: RF KILL Switch\n", 336 il_get_cmd_string(cmd->id)); 337 ret = -ECANCELED; 338 goto fail; 339 } 340 if (test_bit(S_FW_ERROR, &il->status)) { 341 IL_ERR("Command %s failed: FW Error\n", 342 il_get_cmd_string(cmd->id)); 343 ret = -EIO; 344 goto fail; 345 } 346 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 347 IL_ERR("Error: Response NULL in '%s'\n", 348 il_get_cmd_string(cmd->id)); 349 ret = -EIO; 350 goto cancel; 351 } 352 353 ret = 0; 354 goto out; 355 356 cancel: 357 if (cmd->flags & CMD_WANT_SKB) { 358 /* 359 * Cancel the CMD_WANT_SKB flag for the cmd in the 360 * TX cmd queue. Otherwise in case the cmd comes 361 * in later, it will possibly set an invalid 362 * address (cmd->meta.source). 363 */ 364 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; 365 } 366 fail: 367 if (cmd->reply_page) { 368 il_free_pages(il, cmd->reply_page); 369 cmd->reply_page = 0; 370 } 371 out: 372 return ret; 373 } 374 EXPORT_SYMBOL(il_send_cmd_sync); 375 376 int 377 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) 378 { 379 if (cmd->flags & CMD_ASYNC) 380 return il_send_cmd_async(il, cmd); 381 382 return il_send_cmd_sync(il, cmd); 383 } 384 EXPORT_SYMBOL(il_send_cmd); 385 386 int 387 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) 388 { 389 struct il_host_cmd cmd = { 390 .id = id, 391 .len = len, 392 .data = data, 393 }; 394 395 return il_send_cmd_sync(il, &cmd); 396 } 397 EXPORT_SYMBOL(il_send_cmd_pdu); 398 399 int 400 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, 401 void (*callback) (struct il_priv *il, 402 struct il_device_cmd *cmd, 403 struct il_rx_pkt *pkt)) 404 { 405 struct il_host_cmd cmd = { 406 .id = id, 407 .len = len, 408 .data = data, 409 }; 410 411 cmd.flags |= CMD_ASYNC; 412 cmd.callback = callback; 413 414 return il_send_cmd_async(il, &cmd); 415 } 416 EXPORT_SYMBOL(il_send_cmd_pdu_async); 417 418 /* default: IL_LED_BLINK(0) using blinking idx table */ 419 static int led_mode; 420 module_param(led_mode, int, 0444); 421 MODULE_PARM_DESC(led_mode, 422 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); 423 424 /* Throughput OFF time(ms) ON time (ms) 425 * >300 25 25 426 * >200 to 300 40 40 427 * >100 to 200 55 55 428 * >70 to 100 65 65 429 * >50 to 70 75 75 430 * >20 to 50 85 85 431 * >10 to 20 95 95 432 * >5 to 10 110 110 433 * >1 to 5 130 130 434 * >0 to 1 167 167 435 * <=0 SOLID ON 436 */ 437 static const struct ieee80211_tpt_blink il_blink[] = { 438 {.throughput = 0, .blink_time = 334}, 439 {.throughput = 1 * 1024 - 1, .blink_time = 260}, 440 {.throughput = 5 * 1024 - 1, .blink_time = 220}, 441 {.throughput = 10 * 1024 - 1, .blink_time = 190}, 442 {.throughput = 20 * 1024 - 1, .blink_time = 170}, 443 {.throughput = 50 * 1024 - 1, .blink_time = 150}, 444 {.throughput = 70 * 1024 - 1, .blink_time = 130}, 445 {.throughput = 100 * 1024 - 1, .blink_time = 110}, 446 {.throughput = 200 * 1024 - 1, .blink_time = 80}, 447 {.throughput = 300 * 1024 - 1, .blink_time = 50}, 448 }; 449 450 /* 451 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 452 * Led blink rate analysis showed an average deviation of 0% on 3945, 453 * 5% on 4965 HW. 454 * Need to compensate on the led on/off time per HW according to the deviation 455 * to achieve the desired led frequency 456 * The calculation is: (100-averageDeviation)/100 * blinkTime 457 * For code efficiency the calculation will be: 458 * compensation = (100 - averageDeviation) * 64 / 100 459 * NewBlinkTime = (compensation * BlinkTime) / 64 460 */ 461 static inline u8 462 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) 463 { 464 if (!compensation) { 465 IL_ERR("undefined blink compensation: " 466 "use pre-defined blinking time\n"); 467 return time; 468 } 469 470 return (u8) ((time * compensation) >> 6); 471 } 472 473 /* Set led pattern command */ 474 static int 475 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) 476 { 477 struct il_led_cmd led_cmd = { 478 .id = IL_LED_LINK, 479 .interval = IL_DEF_LED_INTRVL 480 }; 481 int ret; 482 483 if (!test_bit(S_READY, &il->status)) 484 return -EBUSY; 485 486 if (il->blink_on == on && il->blink_off == off) 487 return 0; 488 489 if (off == 0) { 490 /* led is SOLID_ON */ 491 on = IL_LED_SOLID; 492 } 493 494 D_LED("Led blink time compensation=%u\n", 495 il->cfg->led_compensation); 496 led_cmd.on = 497 il_blink_compensation(il, on, 498 il->cfg->led_compensation); 499 led_cmd.off = 500 il_blink_compensation(il, off, 501 il->cfg->led_compensation); 502 503 ret = il->ops->send_led_cmd(il, &led_cmd); 504 if (!ret) { 505 il->blink_on = on; 506 il->blink_off = off; 507 } 508 return ret; 509 } 510 511 static void 512 il_led_brightness_set(struct led_classdev *led_cdev, 513 enum led_brightness brightness) 514 { 515 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 516 unsigned long on = 0; 517 518 if (brightness > 0) 519 on = IL_LED_SOLID; 520 521 il_led_cmd(il, on, 0); 522 } 523 524 static int 525 il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, 526 unsigned long *delay_off) 527 { 528 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 529 530 return il_led_cmd(il, *delay_on, *delay_off); 531 } 532 533 void 534 il_leds_init(struct il_priv *il) 535 { 536 int mode = led_mode; 537 int ret; 538 539 if (mode == IL_LED_DEFAULT) 540 mode = il->cfg->led_mode; 541 542 il->led.name = 543 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); 544 il->led.brightness_set = il_led_brightness_set; 545 il->led.blink_set = il_led_blink_set; 546 il->led.max_brightness = 1; 547 548 switch (mode) { 549 case IL_LED_DEFAULT: 550 WARN_ON(1); 551 break; 552 case IL_LED_BLINK: 553 il->led.default_trigger = 554 ieee80211_create_tpt_led_trigger(il->hw, 555 IEEE80211_TPT_LEDTRIG_FL_CONNECTED, 556 il_blink, 557 ARRAY_SIZE(il_blink)); 558 break; 559 case IL_LED_RF_STATE: 560 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); 561 break; 562 } 563 564 ret = led_classdev_register(&il->pci_dev->dev, &il->led); 565 if (ret) { 566 kfree(il->led.name); 567 return; 568 } 569 570 il->led_registered = true; 571 } 572 EXPORT_SYMBOL(il_leds_init); 573 574 void 575 il_leds_exit(struct il_priv *il) 576 { 577 if (!il->led_registered) 578 return; 579 580 led_classdev_unregister(&il->led); 581 kfree(il->led.name); 582 } 583 EXPORT_SYMBOL(il_leds_exit); 584 585 /************************** EEPROM BANDS **************************** 586 * 587 * The il_eeprom_band definitions below provide the mapping from the 588 * EEPROM contents to the specific channel number supported for each 589 * band. 590 * 591 * For example, il_priv->eeprom.band_3_channels[4] from the band_3 592 * definition below maps to physical channel 42 in the 5.2GHz spectrum. 593 * The specific geography and calibration information for that channel 594 * is contained in the eeprom map itself. 595 * 596 * During init, we copy the eeprom information and channel map 597 * information into il->channel_info_24/52 and il->channel_map_24/52 598 * 599 * channel_map_24/52 provides the idx in the channel_info array for a 600 * given channel. We have to have two separate maps as there is channel 601 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and 602 * band_2 603 * 604 * A value of 0xff stored in the channel_map indicates that the channel 605 * is not supported by the hardware at all. 606 * 607 * A value of 0xfe in the channel_map indicates that the channel is not 608 * valid for Tx with the current hardware. This means that 609 * while the system can tune and receive on a given channel, it may not 610 * be able to associate or transmit any frames on that 611 * channel. There is no corresponding channel information for that 612 * entry. 613 * 614 *********************************************************************/ 615 616 /* 2.4 GHz */ 617 const u8 il_eeprom_band_1[14] = { 618 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 619 }; 620 621 /* 5.2 GHz bands */ 622 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 623 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 624 }; 625 626 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 627 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 628 }; 629 630 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 631 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 632 }; 633 634 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 635 145, 149, 153, 157, 161, 165 636 }; 637 638 static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 639 1, 2, 3, 4, 5, 6, 7 640 }; 641 642 static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 643 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 644 }; 645 646 /****************************************************************************** 647 * 648 * EEPROM related functions 649 * 650 ******************************************************************************/ 651 652 static int 653 il_eeprom_verify_signature(struct il_priv *il) 654 { 655 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 656 int ret = 0; 657 658 D_EEPROM("EEPROM signature=0x%08x\n", gp); 659 switch (gp) { 660 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 661 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 662 break; 663 default: 664 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); 665 ret = -ENOENT; 666 break; 667 } 668 return ret; 669 } 670 671 const u8 * 672 il_eeprom_query_addr(const struct il_priv *il, size_t offset) 673 { 674 BUG_ON(offset >= il->cfg->eeprom_size); 675 return &il->eeprom[offset]; 676 } 677 EXPORT_SYMBOL(il_eeprom_query_addr); 678 679 u16 680 il_eeprom_query16(const struct il_priv *il, size_t offset) 681 { 682 if (!il->eeprom) 683 return 0; 684 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); 685 } 686 EXPORT_SYMBOL(il_eeprom_query16); 687 688 /* 689 * il_eeprom_init - read EEPROM contents 690 * 691 * Load the EEPROM contents from adapter into il->eeprom 692 * 693 * NOTE: This routine uses the non-debug IO access functions. 694 */ 695 int 696 il_eeprom_init(struct il_priv *il) 697 { 698 __le16 *e; 699 u32 gp = _il_rd(il, CSR_EEPROM_GP); 700 int sz; 701 int ret; 702 int addr; 703 704 /* allocate eeprom */ 705 sz = il->cfg->eeprom_size; 706 D_EEPROM("NVM size = %d\n", sz); 707 il->eeprom = kzalloc(sz, GFP_KERNEL); 708 if (!il->eeprom) 709 return -ENOMEM; 710 711 e = (__le16 *) il->eeprom; 712 713 il->ops->apm_init(il); 714 715 ret = il_eeprom_verify_signature(il); 716 if (ret < 0) { 717 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 718 ret = -ENOENT; 719 goto err; 720 } 721 722 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 723 ret = il->ops->eeprom_acquire_semaphore(il); 724 if (ret < 0) { 725 IL_ERR("Failed to acquire EEPROM semaphore.\n"); 726 ret = -ENOENT; 727 goto err; 728 } 729 730 /* eeprom is an array of 16bit values */ 731 for (addr = 0; addr < sz; addr += sizeof(u16)) { 732 u32 r; 733 734 _il_wr(il, CSR_EEPROM_REG, 735 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 736 737 ret = 738 _il_poll_bit(il, CSR_EEPROM_REG, 739 CSR_EEPROM_REG_READ_VALID_MSK, 740 CSR_EEPROM_REG_READ_VALID_MSK, 741 IL_EEPROM_ACCESS_TIMEOUT); 742 if (ret < 0) { 743 IL_ERR("Time out reading EEPROM[%d]\n", addr); 744 goto done; 745 } 746 r = _il_rd(il, CSR_EEPROM_REG); 747 e[addr / 2] = cpu_to_le16(r >> 16); 748 } 749 750 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", 751 il_eeprom_query16(il, EEPROM_VERSION)); 752 753 ret = 0; 754 done: 755 il->ops->eeprom_release_semaphore(il); 756 757 err: 758 if (ret) 759 il_eeprom_free(il); 760 /* Reset chip to save power until we load uCode during "up". */ 761 il_apm_stop(il); 762 return ret; 763 } 764 EXPORT_SYMBOL(il_eeprom_init); 765 766 void 767 il_eeprom_free(struct il_priv *il) 768 { 769 kfree(il->eeprom); 770 il->eeprom = NULL; 771 } 772 EXPORT_SYMBOL(il_eeprom_free); 773 774 static void 775 il_init_band_reference(const struct il_priv *il, int eep_band, 776 int *eeprom_ch_count, 777 const struct il_eeprom_channel **eeprom_ch_info, 778 const u8 **eeprom_ch_idx) 779 { 780 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; 781 782 switch (eep_band) { 783 case 1: /* 2.4GHz band */ 784 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); 785 *eeprom_ch_info = 786 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 787 offset); 788 *eeprom_ch_idx = il_eeprom_band_1; 789 break; 790 case 2: /* 4.9GHz band */ 791 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); 792 *eeprom_ch_info = 793 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 794 offset); 795 *eeprom_ch_idx = il_eeprom_band_2; 796 break; 797 case 3: /* 5.2GHz band */ 798 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); 799 *eeprom_ch_info = 800 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 801 offset); 802 *eeprom_ch_idx = il_eeprom_band_3; 803 break; 804 case 4: /* 5.5GHz band */ 805 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); 806 *eeprom_ch_info = 807 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 808 offset); 809 *eeprom_ch_idx = il_eeprom_band_4; 810 break; 811 case 5: /* 5.7GHz band */ 812 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); 813 *eeprom_ch_info = 814 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 815 offset); 816 *eeprom_ch_idx = il_eeprom_band_5; 817 break; 818 case 6: /* 2.4GHz ht40 channels */ 819 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); 820 *eeprom_ch_info = 821 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 822 offset); 823 *eeprom_ch_idx = il_eeprom_band_6; 824 break; 825 case 7: /* 5 GHz ht40 channels */ 826 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); 827 *eeprom_ch_info = 828 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 829 offset); 830 *eeprom_ch_idx = il_eeprom_band_7; 831 break; 832 default: 833 BUG(); 834 } 835 } 836 837 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 838 ? # x " " : "") 839 /* 840 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. 841 * 842 * Does not set up a command, or touch hardware. 843 */ 844 static int 845 il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, 846 const struct il_eeprom_channel *eeprom_ch, 847 u8 clear_ht40_extension_channel) 848 { 849 struct il_channel_info *ch_info; 850 851 ch_info = 852 (struct il_channel_info *)il_get_channel_info(il, band, channel); 853 854 if (!il_is_channel_valid(ch_info)) 855 return -1; 856 857 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 858 " Ad-Hoc %ssupported\n", ch_info->channel, 859 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 860 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), 861 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), 862 CHECK_AND_PRINT(DFS), eeprom_ch->flags, 863 eeprom_ch->max_power_avg, 864 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && 865 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); 866 867 ch_info->ht40_eeprom = *eeprom_ch; 868 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 869 ch_info->ht40_flags = eeprom_ch->flags; 870 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) 871 ch_info->ht40_extension_channel &= 872 ~clear_ht40_extension_channel; 873 874 return 0; 875 } 876 877 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 878 ? # x " " : "") 879 880 /* 881 * il_init_channel_map - Set up driver's info for all possible channels 882 */ 883 int 884 il_init_channel_map(struct il_priv *il) 885 { 886 int eeprom_ch_count = 0; 887 const u8 *eeprom_ch_idx = NULL; 888 const struct il_eeprom_channel *eeprom_ch_info = NULL; 889 int band, ch; 890 struct il_channel_info *ch_info; 891 892 if (il->channel_count) { 893 D_EEPROM("Channel map already initialized.\n"); 894 return 0; 895 } 896 897 D_EEPROM("Initializing regulatory info from EEPROM\n"); 898 899 il->channel_count = 900 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + 901 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + 902 ARRAY_SIZE(il_eeprom_band_5); 903 904 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); 905 906 il->channel_info = 907 kcalloc(il->channel_count, sizeof(struct il_channel_info), 908 GFP_KERNEL); 909 if (!il->channel_info) { 910 IL_ERR("Could not allocate channel_info\n"); 911 il->channel_count = 0; 912 return -ENOMEM; 913 } 914 915 ch_info = il->channel_info; 916 917 /* Loop through the 5 EEPROM bands adding them in order to the 918 * channel map we maintain (that contains additional information than 919 * what just in the EEPROM) */ 920 for (band = 1; band <= 5; band++) { 921 922 il_init_band_reference(il, band, &eeprom_ch_count, 923 &eeprom_ch_info, &eeprom_ch_idx); 924 925 /* Loop through each band adding each of the channels */ 926 for (ch = 0; ch < eeprom_ch_count; ch++) { 927 ch_info->channel = eeprom_ch_idx[ch]; 928 ch_info->band = 929 (band == 930 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 931 932 /* permanently store EEPROM's channel regulatory flags 933 * and max power in channel info database. */ 934 ch_info->eeprom = eeprom_ch_info[ch]; 935 936 /* Copy the run-time flags so they are there even on 937 * invalid channels */ 938 ch_info->flags = eeprom_ch_info[ch].flags; 939 /* First write that ht40 is not enabled, and then enable 940 * one by one */ 941 ch_info->ht40_extension_channel = 942 IEEE80211_CHAN_NO_HT40; 943 944 if (!(il_is_channel_valid(ch_info))) { 945 D_EEPROM("Ch. %d Flags %x [%sGHz] - " 946 "No traffic\n", ch_info->channel, 947 ch_info->flags, 948 il_is_channel_a_band(ch_info) ? "5.2" : 949 "2.4"); 950 ch_info++; 951 continue; 952 } 953 954 /* Initialize regulatory-based run-time data */ 955 ch_info->max_power_avg = ch_info->curr_txpow = 956 eeprom_ch_info[ch].max_power_avg; 957 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 958 ch_info->min_power = 0; 959 960 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" 961 " Ad-Hoc %ssupported\n", ch_info->channel, 962 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 963 CHECK_AND_PRINT_I(VALID), 964 CHECK_AND_PRINT_I(IBSS), 965 CHECK_AND_PRINT_I(ACTIVE), 966 CHECK_AND_PRINT_I(RADAR), 967 CHECK_AND_PRINT_I(WIDE), 968 CHECK_AND_PRINT_I(DFS), 969 eeprom_ch_info[ch].flags, 970 eeprom_ch_info[ch].max_power_avg, 971 ((eeprom_ch_info[ch]. 972 flags & EEPROM_CHANNEL_IBSS) && 973 !(eeprom_ch_info[ch]. 974 flags & EEPROM_CHANNEL_RADAR)) ? "" : 975 "not "); 976 977 ch_info++; 978 } 979 } 980 981 /* Check if we do have HT40 channels */ 982 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && 983 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) 984 return 0; 985 986 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 987 for (band = 6; band <= 7; band++) { 988 enum nl80211_band ieeeband; 989 990 il_init_band_reference(il, band, &eeprom_ch_count, 991 &eeprom_ch_info, &eeprom_ch_idx); 992 993 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 994 ieeeband = 995 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 996 997 /* Loop through each band adding each of the channels */ 998 for (ch = 0; ch < eeprom_ch_count; ch++) { 999 /* Set up driver's info for lower half */ 1000 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], 1001 &eeprom_ch_info[ch], 1002 IEEE80211_CHAN_NO_HT40PLUS); 1003 1004 /* Set up driver's info for upper half */ 1005 il_mod_ht40_chan_info(il, ieeeband, 1006 eeprom_ch_idx[ch] + 4, 1007 &eeprom_ch_info[ch], 1008 IEEE80211_CHAN_NO_HT40MINUS); 1009 } 1010 } 1011 1012 return 0; 1013 } 1014 EXPORT_SYMBOL(il_init_channel_map); 1015 1016 /* 1017 * il_free_channel_map - undo allocations in il_init_channel_map 1018 */ 1019 void 1020 il_free_channel_map(struct il_priv *il) 1021 { 1022 kfree(il->channel_info); 1023 il->channel_count = 0; 1024 } 1025 EXPORT_SYMBOL(il_free_channel_map); 1026 1027 /* 1028 * il_get_channel_info - Find driver's ilate channel info 1029 * 1030 * Based on band and channel number. 1031 */ 1032 const struct il_channel_info * 1033 il_get_channel_info(const struct il_priv *il, enum nl80211_band band, 1034 u16 channel) 1035 { 1036 int i; 1037 1038 switch (band) { 1039 case NL80211_BAND_5GHZ: 1040 for (i = 14; i < il->channel_count; i++) { 1041 if (il->channel_info[i].channel == channel) 1042 return &il->channel_info[i]; 1043 } 1044 break; 1045 case NL80211_BAND_2GHZ: 1046 if (channel >= 1 && channel <= 14) 1047 return &il->channel_info[channel - 1]; 1048 break; 1049 default: 1050 BUG(); 1051 } 1052 1053 return NULL; 1054 } 1055 EXPORT_SYMBOL(il_get_channel_info); 1056 1057 /* 1058 * Setting power level allows the card to go to sleep when not busy. 1059 * 1060 * We calculate a sleep command based on the required latency, which 1061 * we get from mac80211. 1062 */ 1063 1064 #define SLP_VEC(X0, X1, X2, X3, X4) { \ 1065 cpu_to_le32(X0), \ 1066 cpu_to_le32(X1), \ 1067 cpu_to_le32(X2), \ 1068 cpu_to_le32(X3), \ 1069 cpu_to_le32(X4) \ 1070 } 1071 1072 static void 1073 il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1074 { 1075 static const __le32 interval[3][IL_POWER_VEC_SIZE] = { 1076 SLP_VEC(2, 2, 4, 6, 0xFF), 1077 SLP_VEC(2, 4, 7, 10, 10), 1078 SLP_VEC(4, 7, 10, 10, 0xFF) 1079 }; 1080 int i, dtim_period, no_dtim; 1081 u32 max_sleep; 1082 bool skip; 1083 1084 memset(cmd, 0, sizeof(*cmd)); 1085 1086 if (il->power_data.pci_pm) 1087 cmd->flags |= IL_POWER_PCI_PM_MSK; 1088 1089 /* if no Power Save, we are done */ 1090 if (il->power_data.ps_disabled) 1091 return; 1092 1093 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; 1094 cmd->keep_alive_seconds = 0; 1095 cmd->debug_flags = 0; 1096 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); 1097 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); 1098 cmd->keep_alive_beacons = 0; 1099 1100 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; 1101 1102 if (dtim_period <= 2) { 1103 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); 1104 no_dtim = 2; 1105 } else if (dtim_period <= 10) { 1106 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); 1107 no_dtim = 2; 1108 } else { 1109 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); 1110 no_dtim = 0; 1111 } 1112 1113 if (dtim_period == 0) { 1114 dtim_period = 1; 1115 skip = false; 1116 } else { 1117 skip = !!no_dtim; 1118 } 1119 1120 if (skip) { 1121 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; 1122 1123 max_sleep = le32_to_cpu(tmp); 1124 if (max_sleep == 0xFF) 1125 max_sleep = dtim_period * (skip + 1); 1126 else if (max_sleep > dtim_period) 1127 max_sleep = (max_sleep / dtim_period) * dtim_period; 1128 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; 1129 } else { 1130 max_sleep = dtim_period; 1131 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; 1132 } 1133 1134 for (i = 0; i < IL_POWER_VEC_SIZE; i++) 1135 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 1136 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 1137 } 1138 1139 static int 1140 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) 1141 { 1142 D_POWER("Sending power/sleep command\n"); 1143 D_POWER("Flags value = 0x%08X\n", cmd->flags); 1144 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 1145 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 1146 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 1147 le32_to_cpu(cmd->sleep_interval[0]), 1148 le32_to_cpu(cmd->sleep_interval[1]), 1149 le32_to_cpu(cmd->sleep_interval[2]), 1150 le32_to_cpu(cmd->sleep_interval[3]), 1151 le32_to_cpu(cmd->sleep_interval[4])); 1152 1153 return il_send_cmd_pdu(il, C_POWER_TBL, 1154 sizeof(struct il_powertable_cmd), cmd); 1155 } 1156 1157 static int 1158 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1159 { 1160 int ret; 1161 bool update_chains; 1162 1163 lockdep_assert_held(&il->mutex); 1164 1165 /* Don't update the RX chain when chain noise calibration is running */ 1166 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || 1167 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; 1168 1169 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 1170 return 0; 1171 1172 if (!il_is_ready_rf(il)) 1173 return -EIO; 1174 1175 /* scan complete use sleep_power_next, need to be updated */ 1176 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 1177 if (test_bit(S_SCANNING, &il->status) && !force) { 1178 D_INFO("Defer power set mode while scanning\n"); 1179 return 0; 1180 } 1181 1182 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) 1183 set_bit(S_POWER_PMI, &il->status); 1184 1185 ret = il_set_power(il, cmd); 1186 if (!ret) { 1187 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 1188 clear_bit(S_POWER_PMI, &il->status); 1189 1190 if (il->ops->update_chain_flags && update_chains) 1191 il->ops->update_chain_flags(il); 1192 else if (il->ops->update_chain_flags) 1193 D_POWER("Cannot update the power, chain noise " 1194 "calibration running: %d\n", 1195 il->chain_noise_data.state); 1196 1197 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); 1198 } else 1199 IL_ERR("set power fail, ret = %d", ret); 1200 1201 return ret; 1202 } 1203 1204 int 1205 il_power_update_mode(struct il_priv *il, bool force) 1206 { 1207 struct il_powertable_cmd cmd; 1208 1209 il_build_powertable_cmd(il, &cmd); 1210 1211 return il_power_set_mode(il, &cmd, force); 1212 } 1213 EXPORT_SYMBOL(il_power_update_mode); 1214 1215 /* initialize to default */ 1216 void 1217 il_power_initialize(struct il_priv *il) 1218 { 1219 u16 lctl; 1220 1221 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 1222 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 1223 1224 il->power_data.debug_sleep_level_override = -1; 1225 1226 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); 1227 } 1228 EXPORT_SYMBOL(il_power_initialize); 1229 1230 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 1231 * sending probe req. This should be set long enough to hear probe responses 1232 * from more than one AP. */ 1233 #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ 1234 #define IL_ACTIVE_DWELL_TIME_52 (20) 1235 1236 #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) 1237 #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) 1238 1239 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 1240 * Must be set longer than active dwell time. 1241 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 1242 #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ 1243 #define IL_PASSIVE_DWELL_TIME_52 (10) 1244 #define IL_PASSIVE_DWELL_BASE (100) 1245 #define IL_CHANNEL_TUNE_TIME 5 1246 1247 static int 1248 il_send_scan_abort(struct il_priv *il) 1249 { 1250 int ret; 1251 struct il_rx_pkt *pkt; 1252 struct il_host_cmd cmd = { 1253 .id = C_SCAN_ABORT, 1254 .flags = CMD_WANT_SKB, 1255 }; 1256 1257 /* Exit instantly with error when device is not ready 1258 * to receive scan abort command or it does not perform 1259 * hardware scan currently */ 1260 if (!test_bit(S_READY, &il->status) || 1261 !test_bit(S_GEO_CONFIGURED, &il->status) || 1262 !test_bit(S_SCAN_HW, &il->status) || 1263 test_bit(S_FW_ERROR, &il->status) || 1264 test_bit(S_EXIT_PENDING, &il->status)) 1265 return -EIO; 1266 1267 ret = il_send_cmd_sync(il, &cmd); 1268 if (ret) 1269 return ret; 1270 1271 pkt = (struct il_rx_pkt *)cmd.reply_page; 1272 if (pkt->u.status != CAN_ABORT_STATUS) { 1273 /* The scan abort will return 1 for success or 1274 * 2 for "failure". A failure condition can be 1275 * due to simply not being in an active scan which 1276 * can occur if we send the scan abort before we 1277 * the microcode has notified us that a scan is 1278 * completed. */ 1279 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); 1280 ret = -EIO; 1281 } 1282 1283 il_free_pages(il, cmd.reply_page); 1284 return ret; 1285 } 1286 1287 static void 1288 il_complete_scan(struct il_priv *il, bool aborted) 1289 { 1290 struct cfg80211_scan_info info = { 1291 .aborted = aborted, 1292 }; 1293 1294 /* check if scan was requested from mac80211 */ 1295 if (il->scan_request) { 1296 D_SCAN("Complete scan in mac80211\n"); 1297 ieee80211_scan_completed(il->hw, &info); 1298 } 1299 1300 il->scan_vif = NULL; 1301 il->scan_request = NULL; 1302 } 1303 1304 void 1305 il_force_scan_end(struct il_priv *il) 1306 { 1307 lockdep_assert_held(&il->mutex); 1308 1309 if (!test_bit(S_SCANNING, &il->status)) { 1310 D_SCAN("Forcing scan end while not scanning\n"); 1311 return; 1312 } 1313 1314 D_SCAN("Forcing scan end\n"); 1315 clear_bit(S_SCANNING, &il->status); 1316 clear_bit(S_SCAN_HW, &il->status); 1317 clear_bit(S_SCAN_ABORTING, &il->status); 1318 il_complete_scan(il, true); 1319 } 1320 1321 static void 1322 il_do_scan_abort(struct il_priv *il) 1323 { 1324 int ret; 1325 1326 lockdep_assert_held(&il->mutex); 1327 1328 if (!test_bit(S_SCANNING, &il->status)) { 1329 D_SCAN("Not performing scan to abort\n"); 1330 return; 1331 } 1332 1333 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { 1334 D_SCAN("Scan abort in progress\n"); 1335 return; 1336 } 1337 1338 ret = il_send_scan_abort(il); 1339 if (ret) { 1340 D_SCAN("Send scan abort failed %d\n", ret); 1341 il_force_scan_end(il); 1342 } else 1343 D_SCAN("Successfully send scan abort\n"); 1344 } 1345 1346 /* 1347 * il_scan_cancel - Cancel any currently executing HW scan 1348 */ 1349 int 1350 il_scan_cancel(struct il_priv *il) 1351 { 1352 D_SCAN("Queuing abort scan\n"); 1353 queue_work(il->workqueue, &il->abort_scan); 1354 return 0; 1355 } 1356 EXPORT_SYMBOL(il_scan_cancel); 1357 1358 /* 1359 * il_scan_cancel_timeout - Cancel any currently executing HW scan 1360 * @ms: amount of time to wait (in milliseconds) for scan to abort 1361 * 1362 */ 1363 int 1364 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) 1365 { 1366 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 1367 1368 lockdep_assert_held(&il->mutex); 1369 1370 D_SCAN("Scan cancel timeout\n"); 1371 1372 il_do_scan_abort(il); 1373 1374 while (time_before_eq(jiffies, timeout)) { 1375 if (!test_bit(S_SCAN_HW, &il->status)) 1376 break; 1377 msleep(20); 1378 } 1379 1380 return test_bit(S_SCAN_HW, &il->status); 1381 } 1382 EXPORT_SYMBOL(il_scan_cancel_timeout); 1383 1384 /* Service response to C_SCAN (0x80) */ 1385 static void 1386 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) 1387 { 1388 #ifdef CONFIG_IWLEGACY_DEBUG 1389 struct il_rx_pkt *pkt = rxb_addr(rxb); 1390 struct il_scanreq_notification *notif = 1391 (struct il_scanreq_notification *)pkt->u.raw; 1392 1393 D_SCAN("Scan request status = 0x%x\n", notif->status); 1394 #endif 1395 } 1396 1397 /* Service N_SCAN_START (0x82) */ 1398 static void 1399 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) 1400 { 1401 struct il_rx_pkt *pkt = rxb_addr(rxb); 1402 struct il_scanstart_notification *notif = 1403 (struct il_scanstart_notification *)pkt->u.raw; 1404 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1405 D_SCAN("Scan start: " "%d [802.11%s] " 1406 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, 1407 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), 1408 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); 1409 } 1410 1411 /* Service N_SCAN_RESULTS (0x83) */ 1412 static void 1413 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) 1414 { 1415 #ifdef CONFIG_IWLEGACY_DEBUG 1416 struct il_rx_pkt *pkt = rxb_addr(rxb); 1417 struct il_scanresults_notification *notif = 1418 (struct il_scanresults_notification *)pkt->u.raw; 1419 1420 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " 1421 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", 1422 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), 1423 le32_to_cpu(notif->stats[0]), 1424 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); 1425 #endif 1426 } 1427 1428 /* Service N_SCAN_COMPLETE (0x84) */ 1429 static void 1430 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) 1431 { 1432 1433 struct il_rx_pkt *pkt = rxb_addr(rxb); 1434 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1435 1436 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1437 scan_notif->scanned_channels, scan_notif->tsf_low, 1438 scan_notif->tsf_high, scan_notif->status); 1439 1440 /* The HW is no longer scanning */ 1441 clear_bit(S_SCAN_HW, &il->status); 1442 1443 D_SCAN("Scan on %sGHz took %dms\n", 1444 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", 1445 jiffies_to_msecs(jiffies - il->scan_start)); 1446 1447 queue_work(il->workqueue, &il->scan_completed); 1448 } 1449 1450 void 1451 il_setup_rx_scan_handlers(struct il_priv *il) 1452 { 1453 /* scan handlers */ 1454 il->handlers[C_SCAN] = il_hdl_scan; 1455 il->handlers[N_SCAN_START] = il_hdl_scan_start; 1456 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; 1457 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; 1458 } 1459 EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1460 1461 u16 1462 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, 1463 u8 n_probes) 1464 { 1465 if (band == NL80211_BAND_5GHZ) 1466 return IL_ACTIVE_DWELL_TIME_52 + 1467 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1468 else 1469 return IL_ACTIVE_DWELL_TIME_24 + 1470 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 1471 } 1472 EXPORT_SYMBOL(il_get_active_dwell_time); 1473 1474 u16 1475 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, 1476 struct ieee80211_vif *vif) 1477 { 1478 u16 value; 1479 1480 u16 passive = 1481 (band == 1482 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1483 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1484 IL_PASSIVE_DWELL_TIME_52; 1485 1486 if (il_is_any_associated(il)) { 1487 /* 1488 * If we're associated, we clamp the maximum passive 1489 * dwell time to be 98% of the smallest beacon interval 1490 * (minus 2 * channel tune time) 1491 */ 1492 value = il->vif ? il->vif->bss_conf.beacon_int : 0; 1493 if (value > IL_PASSIVE_DWELL_BASE || !value) 1494 value = IL_PASSIVE_DWELL_BASE; 1495 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; 1496 passive = min(value, passive); 1497 } 1498 1499 return passive; 1500 } 1501 EXPORT_SYMBOL(il_get_passive_dwell_time); 1502 1503 void 1504 il_init_scan_params(struct il_priv *il) 1505 { 1506 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1507 if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) 1508 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; 1509 if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) 1510 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; 1511 } 1512 EXPORT_SYMBOL(il_init_scan_params); 1513 1514 static int 1515 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) 1516 { 1517 int ret; 1518 1519 lockdep_assert_held(&il->mutex); 1520 1521 cancel_delayed_work(&il->scan_check); 1522 1523 if (!il_is_ready_rf(il)) { 1524 IL_WARN("Request scan called when driver not ready.\n"); 1525 return -EIO; 1526 } 1527 1528 if (test_bit(S_SCAN_HW, &il->status)) { 1529 D_SCAN("Multiple concurrent scan requests in parallel.\n"); 1530 return -EBUSY; 1531 } 1532 1533 if (test_bit(S_SCAN_ABORTING, &il->status)) { 1534 D_SCAN("Scan request while abort pending.\n"); 1535 return -EBUSY; 1536 } 1537 1538 D_SCAN("Starting scan...\n"); 1539 1540 set_bit(S_SCANNING, &il->status); 1541 il->scan_start = jiffies; 1542 1543 ret = il->ops->request_scan(il, vif); 1544 if (ret) { 1545 clear_bit(S_SCANNING, &il->status); 1546 return ret; 1547 } 1548 1549 queue_delayed_work(il->workqueue, &il->scan_check, 1550 IL_SCAN_CHECK_WATCHDOG); 1551 1552 return 0; 1553 } 1554 1555 int 1556 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1557 struct ieee80211_scan_request *hw_req) 1558 { 1559 struct cfg80211_scan_request *req = &hw_req->req; 1560 struct il_priv *il = hw->priv; 1561 int ret; 1562 1563 if (req->n_channels == 0) { 1564 IL_ERR("Can not scan on no channels.\n"); 1565 return -EINVAL; 1566 } 1567 1568 mutex_lock(&il->mutex); 1569 D_MAC80211("enter\n"); 1570 1571 if (test_bit(S_SCANNING, &il->status)) { 1572 D_SCAN("Scan already in progress.\n"); 1573 ret = -EAGAIN; 1574 goto out_unlock; 1575 } 1576 1577 /* mac80211 will only ask for one band at a time */ 1578 il->scan_request = req; 1579 il->scan_vif = vif; 1580 il->scan_band = req->channels[0]->band; 1581 1582 ret = il_scan_initiate(il, vif); 1583 1584 out_unlock: 1585 D_MAC80211("leave ret %d\n", ret); 1586 mutex_unlock(&il->mutex); 1587 1588 return ret; 1589 } 1590 EXPORT_SYMBOL(il_mac_hw_scan); 1591 1592 static void 1593 il_bg_scan_check(struct work_struct *data) 1594 { 1595 struct il_priv *il = 1596 container_of(data, struct il_priv, scan_check.work); 1597 1598 D_SCAN("Scan check work\n"); 1599 1600 /* Since we are here firmware does not finish scan and 1601 * most likely is in bad shape, so we don't bother to 1602 * send abort command, just force scan complete to mac80211 */ 1603 mutex_lock(&il->mutex); 1604 il_force_scan_end(il); 1605 mutex_unlock(&il->mutex); 1606 } 1607 1608 /* 1609 * il_fill_probe_req - fill in all required fields and IE for probe request 1610 */ 1611 u16 1612 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1613 const u8 *ta, const u8 *ies, int ie_len, int left) 1614 { 1615 int len = 0; 1616 u8 *pos = NULL; 1617 1618 /* Make sure there is enough space for the probe request, 1619 * two mandatory IEs and the data */ 1620 left -= 24; 1621 if (left < 0) 1622 return 0; 1623 1624 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1625 eth_broadcast_addr(frame->da); 1626 memcpy(frame->sa, ta, ETH_ALEN); 1627 eth_broadcast_addr(frame->bssid); 1628 frame->seq_ctrl = 0; 1629 1630 len += 24; 1631 1632 /* ...next IE... */ 1633 pos = &frame->u.probe_req.variable[0]; 1634 1635 /* fill in our indirect SSID IE */ 1636 left -= 2; 1637 if (left < 0) 1638 return 0; 1639 *pos++ = WLAN_EID_SSID; 1640 *pos++ = 0; 1641 1642 len += 2; 1643 1644 if (WARN_ON(left < ie_len)) 1645 return len; 1646 1647 if (ies && ie_len) { 1648 memcpy(pos, ies, ie_len); 1649 len += ie_len; 1650 } 1651 1652 return (u16) len; 1653 } 1654 EXPORT_SYMBOL(il_fill_probe_req); 1655 1656 static void 1657 il_bg_abort_scan(struct work_struct *work) 1658 { 1659 struct il_priv *il = container_of(work, struct il_priv, abort_scan); 1660 1661 D_SCAN("Abort scan work\n"); 1662 1663 /* We keep scan_check work queued in case when firmware will not 1664 * report back scan completed notification */ 1665 mutex_lock(&il->mutex); 1666 il_scan_cancel_timeout(il, 200); 1667 mutex_unlock(&il->mutex); 1668 } 1669 1670 static void 1671 il_bg_scan_completed(struct work_struct *work) 1672 { 1673 struct il_priv *il = container_of(work, struct il_priv, scan_completed); 1674 bool aborted; 1675 1676 D_SCAN("Completed scan.\n"); 1677 1678 cancel_delayed_work(&il->scan_check); 1679 1680 mutex_lock(&il->mutex); 1681 1682 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); 1683 if (aborted) 1684 D_SCAN("Aborted scan completed.\n"); 1685 1686 if (!test_and_clear_bit(S_SCANNING, &il->status)) { 1687 D_SCAN("Scan already completed.\n"); 1688 goto out_settings; 1689 } 1690 1691 il_complete_scan(il, aborted); 1692 1693 out_settings: 1694 /* Can we still talk to firmware ? */ 1695 if (!il_is_ready_rf(il)) 1696 goto out; 1697 1698 /* 1699 * We do not commit power settings while scan is pending, 1700 * do it now if the settings changed. 1701 */ 1702 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); 1703 il_set_tx_power(il, il->tx_power_next, false); 1704 1705 il->ops->post_scan(il); 1706 1707 out: 1708 mutex_unlock(&il->mutex); 1709 } 1710 1711 void 1712 il_setup_scan_deferred_work(struct il_priv *il) 1713 { 1714 INIT_WORK(&il->scan_completed, il_bg_scan_completed); 1715 INIT_WORK(&il->abort_scan, il_bg_abort_scan); 1716 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); 1717 } 1718 EXPORT_SYMBOL(il_setup_scan_deferred_work); 1719 1720 void 1721 il_cancel_scan_deferred_work(struct il_priv *il) 1722 { 1723 cancel_work_sync(&il->abort_scan); 1724 cancel_work_sync(&il->scan_completed); 1725 1726 if (cancel_delayed_work_sync(&il->scan_check)) { 1727 mutex_lock(&il->mutex); 1728 il_force_scan_end(il); 1729 mutex_unlock(&il->mutex); 1730 } 1731 } 1732 EXPORT_SYMBOL(il_cancel_scan_deferred_work); 1733 1734 /* il->sta_lock must be held */ 1735 static void 1736 il_sta_ucode_activate(struct il_priv *il, u8 sta_id) 1737 { 1738 1739 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) 1740 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", 1741 sta_id, il->stations[sta_id].sta.sta.addr); 1742 1743 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { 1744 D_ASSOC("STA id %u addr %pM already present" 1745 " in uCode (according to driver)\n", sta_id, 1746 il->stations[sta_id].sta.sta.addr); 1747 } else { 1748 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; 1749 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, 1750 il->stations[sta_id].sta.sta.addr); 1751 } 1752 } 1753 1754 static int 1755 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, 1756 struct il_rx_pkt *pkt, bool sync) 1757 { 1758 u8 sta_id = addsta->sta.sta_id; 1759 unsigned long flags; 1760 int ret = -EIO; 1761 1762 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 1763 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); 1764 return ret; 1765 } 1766 1767 D_INFO("Processing response for adding station %u\n", sta_id); 1768 1769 spin_lock_irqsave(&il->sta_lock, flags); 1770 1771 switch (pkt->u.add_sta.status) { 1772 case ADD_STA_SUCCESS_MSK: 1773 D_INFO("C_ADD_STA PASSED\n"); 1774 il_sta_ucode_activate(il, sta_id); 1775 ret = 0; 1776 break; 1777 case ADD_STA_NO_ROOM_IN_TBL: 1778 IL_ERR("Adding station %d failed, no room in table.\n", sta_id); 1779 break; 1780 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 1781 IL_ERR("Adding station %d failed, no block ack resource.\n", 1782 sta_id); 1783 break; 1784 case ADD_STA_MODIFY_NON_EXIST_STA: 1785 IL_ERR("Attempting to modify non-existing station %d\n", 1786 sta_id); 1787 break; 1788 default: 1789 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); 1790 break; 1791 } 1792 1793 D_INFO("%s station id %u addr %pM\n", 1794 il->stations[sta_id].sta.mode == 1795 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, 1796 il->stations[sta_id].sta.sta.addr); 1797 1798 /* 1799 * XXX: The MAC address in the command buffer is often changed from 1800 * the original sent to the device. That is, the MAC address 1801 * written to the command buffer often is not the same MAC address 1802 * read from the command buffer when the command returns. This 1803 * issue has not yet been resolved and this debugging is left to 1804 * observe the problem. 1805 */ 1806 D_INFO("%s station according to cmd buffer %pM\n", 1807 il->stations[sta_id].sta.mode == 1808 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); 1809 spin_unlock_irqrestore(&il->sta_lock, flags); 1810 1811 return ret; 1812 } 1813 1814 static void 1815 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, 1816 struct il_rx_pkt *pkt) 1817 { 1818 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; 1819 1820 il_process_add_sta_resp(il, addsta, pkt, false); 1821 1822 } 1823 1824 int 1825 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) 1826 { 1827 struct il_rx_pkt *pkt = NULL; 1828 int ret = 0; 1829 u8 data[sizeof(*sta)]; 1830 struct il_host_cmd cmd = { 1831 .id = C_ADD_STA, 1832 .flags = flags, 1833 .data = data, 1834 }; 1835 u8 sta_id __maybe_unused = sta->sta.sta_id; 1836 1837 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, 1838 flags & CMD_ASYNC ? "a" : ""); 1839 1840 if (flags & CMD_ASYNC) 1841 cmd.callback = il_add_sta_callback; 1842 else { 1843 cmd.flags |= CMD_WANT_SKB; 1844 might_sleep(); 1845 } 1846 1847 cmd.len = il->ops->build_addsta_hcmd(sta, data); 1848 ret = il_send_cmd(il, &cmd); 1849 if (ret) 1850 return ret; 1851 if (flags & CMD_ASYNC) 1852 return 0; 1853 1854 pkt = (struct il_rx_pkt *)cmd.reply_page; 1855 ret = il_process_add_sta_resp(il, sta, pkt, true); 1856 1857 il_free_pages(il, cmd.reply_page); 1858 1859 return ret; 1860 } 1861 EXPORT_SYMBOL(il_send_add_sta); 1862 1863 static void 1864 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) 1865 { 1866 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap; 1867 __le32 sta_flags; 1868 1869 if (!sta || !sta_ht_inf->ht_supported) 1870 goto done; 1871 1872 D_ASSOC("spatial multiplexing power save mode: %s\n", 1873 (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" : 1874 (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : 1875 "disabled"); 1876 1877 sta_flags = il->stations[idx].sta.station_flags; 1878 1879 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 1880 1881 switch (sta->deflink.smps_mode) { 1882 case IEEE80211_SMPS_STATIC: 1883 sta_flags |= STA_FLG_MIMO_DIS_MSK; 1884 break; 1885 case IEEE80211_SMPS_DYNAMIC: 1886 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 1887 break; 1888 case IEEE80211_SMPS_OFF: 1889 break; 1890 default: 1891 IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode); 1892 break; 1893 } 1894 1895 sta_flags |= 1896 cpu_to_le32((u32) sta_ht_inf-> 1897 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); 1898 1899 sta_flags |= 1900 cpu_to_le32((u32) sta_ht_inf-> 1901 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 1902 1903 if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap)) 1904 sta_flags |= STA_FLG_HT40_EN_MSK; 1905 else 1906 sta_flags &= ~STA_FLG_HT40_EN_MSK; 1907 1908 il->stations[idx].sta.station_flags = sta_flags; 1909 done: 1910 return; 1911 } 1912 1913 /* 1914 * il_prep_station - Prepare station information for addition 1915 * 1916 * should be called with sta_lock held 1917 */ 1918 u8 1919 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, 1920 struct ieee80211_sta *sta) 1921 { 1922 struct il_station_entry *station; 1923 int i; 1924 u8 sta_id = IL_INVALID_STATION; 1925 u16 rate; 1926 1927 if (is_ap) 1928 sta_id = IL_AP_ID; 1929 else if (is_broadcast_ether_addr(addr)) 1930 sta_id = il->hw_params.bcast_id; 1931 else 1932 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1933 if (ether_addr_equal(il->stations[i].sta.sta.addr, 1934 addr)) { 1935 sta_id = i; 1936 break; 1937 } 1938 1939 if (!il->stations[i].used && 1940 sta_id == IL_INVALID_STATION) 1941 sta_id = i; 1942 } 1943 1944 /* 1945 * These two conditions have the same outcome, but keep them 1946 * separate 1947 */ 1948 if (unlikely(sta_id == IL_INVALID_STATION)) 1949 return sta_id; 1950 1951 /* 1952 * uCode is not able to deal with multiple requests to add a 1953 * station. Keep track if one is in progress so that we do not send 1954 * another. 1955 */ 1956 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 1957 D_INFO("STA %d already in process of being added.\n", sta_id); 1958 return sta_id; 1959 } 1960 1961 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1962 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1963 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { 1964 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1965 sta_id, addr); 1966 return sta_id; 1967 } 1968 1969 station = &il->stations[sta_id]; 1970 station->used = IL_STA_DRIVER_ACTIVE; 1971 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); 1972 il->num_stations++; 1973 1974 /* Set up the C_ADD_STA command to send to device */ 1975 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); 1976 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 1977 station->sta.mode = 0; 1978 station->sta.sta.sta_id = sta_id; 1979 station->sta.station_flags = 0; 1980 1981 /* 1982 * OK to call unconditionally, since local stations (IBSS BSSID 1983 * STA and broadcast STA) pass in a NULL sta, and mac80211 1984 * doesn't allow HT IBSS. 1985 */ 1986 il_set_ht_add_station(il, sta_id, sta); 1987 1988 /* 3945 only */ 1989 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 1990 /* Turn on both antennas for the station... */ 1991 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 1992 1993 return sta_id; 1994 1995 } 1996 EXPORT_SYMBOL_GPL(il_prep_station); 1997 1998 #define STA_WAIT_TIMEOUT (HZ/2) 1999 2000 /* 2001 * il_add_station_common - 2002 */ 2003 int 2004 il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, 2005 struct ieee80211_sta *sta, u8 *sta_id_r) 2006 { 2007 unsigned long flags_spin; 2008 int ret = 0; 2009 u8 sta_id; 2010 struct il_addsta_cmd sta_cmd; 2011 2012 *sta_id_r = 0; 2013 spin_lock_irqsave(&il->sta_lock, flags_spin); 2014 sta_id = il_prep_station(il, addr, is_ap, sta); 2015 if (sta_id == IL_INVALID_STATION) { 2016 IL_ERR("Unable to prepare station %pM for addition\n", addr); 2017 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2018 return -EINVAL; 2019 } 2020 2021 /* 2022 * uCode is not able to deal with multiple requests to add a 2023 * station. Keep track if one is in progress so that we do not send 2024 * another. 2025 */ 2026 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 2027 D_INFO("STA %d already in process of being added.\n", sta_id); 2028 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2029 return -EEXIST; 2030 } 2031 2032 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 2033 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2034 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 2035 sta_id, addr); 2036 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2037 return -EEXIST; 2038 } 2039 2040 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; 2041 memcpy(&sta_cmd, &il->stations[sta_id].sta, 2042 sizeof(struct il_addsta_cmd)); 2043 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2044 2045 /* Add station to device's station table */ 2046 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2047 if (ret) { 2048 spin_lock_irqsave(&il->sta_lock, flags_spin); 2049 IL_ERR("Adding station %pM failed.\n", 2050 il->stations[sta_id].sta.sta.addr); 2051 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2052 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2053 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2054 } 2055 *sta_id_r = sta_id; 2056 return ret; 2057 } 2058 EXPORT_SYMBOL(il_add_station_common); 2059 2060 /* 2061 * il_sta_ucode_deactivate - deactivate ucode status for a station 2062 * 2063 * il->sta_lock must be held 2064 */ 2065 static void 2066 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) 2067 { 2068 /* Ucode must be active and driver must be non active */ 2069 if ((il->stations[sta_id]. 2070 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != 2071 IL_STA_UCODE_ACTIVE) 2072 IL_ERR("removed non active STA %u\n", sta_id); 2073 2074 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; 2075 2076 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); 2077 D_ASSOC("Removed STA %u\n", sta_id); 2078 } 2079 2080 static int 2081 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, 2082 bool temporary) 2083 { 2084 struct il_rx_pkt *pkt; 2085 int ret; 2086 2087 unsigned long flags_spin; 2088 struct il_rem_sta_cmd rm_sta_cmd; 2089 2090 struct il_host_cmd cmd = { 2091 .id = C_REM_STA, 2092 .len = sizeof(struct il_rem_sta_cmd), 2093 .flags = CMD_SYNC, 2094 .data = &rm_sta_cmd, 2095 }; 2096 2097 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 2098 rm_sta_cmd.num_sta = 1; 2099 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); 2100 2101 cmd.flags |= CMD_WANT_SKB; 2102 2103 ret = il_send_cmd(il, &cmd); 2104 2105 if (ret) 2106 return ret; 2107 2108 pkt = (struct il_rx_pkt *)cmd.reply_page; 2109 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 2110 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); 2111 ret = -EIO; 2112 } 2113 2114 if (!ret) { 2115 switch (pkt->u.rem_sta.status) { 2116 case REM_STA_SUCCESS_MSK: 2117 if (!temporary) { 2118 spin_lock_irqsave(&il->sta_lock, flags_spin); 2119 il_sta_ucode_deactivate(il, sta_id); 2120 spin_unlock_irqrestore(&il->sta_lock, 2121 flags_spin); 2122 } 2123 D_ASSOC("C_REM_STA PASSED\n"); 2124 break; 2125 default: 2126 ret = -EIO; 2127 IL_ERR("C_REM_STA failed\n"); 2128 break; 2129 } 2130 } 2131 il_free_pages(il, cmd.reply_page); 2132 2133 return ret; 2134 } 2135 2136 /* 2137 * il_remove_station - Remove driver's knowledge of station. 2138 */ 2139 int 2140 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) 2141 { 2142 unsigned long flags; 2143 2144 if (!il_is_ready(il)) { 2145 D_INFO("Unable to remove station %pM, device not ready.\n", 2146 addr); 2147 /* 2148 * It is typical for stations to be removed when we are 2149 * going down. Return success since device will be down 2150 * soon anyway 2151 */ 2152 return 0; 2153 } 2154 2155 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); 2156 2157 if (WARN_ON(sta_id == IL_INVALID_STATION)) 2158 return -EINVAL; 2159 2160 spin_lock_irqsave(&il->sta_lock, flags); 2161 2162 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2163 D_INFO("Removing %pM but non DRIVER active\n", addr); 2164 goto out_err; 2165 } 2166 2167 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2168 D_INFO("Removing %pM but non UCODE active\n", addr); 2169 goto out_err; 2170 } 2171 2172 if (il->stations[sta_id].used & IL_STA_LOCAL) { 2173 kfree(il->stations[sta_id].lq); 2174 il->stations[sta_id].lq = NULL; 2175 } 2176 2177 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2178 2179 il->num_stations--; 2180 2181 BUG_ON(il->num_stations < 0); 2182 2183 spin_unlock_irqrestore(&il->sta_lock, flags); 2184 2185 return il_send_remove_station(il, addr, sta_id, false); 2186 out_err: 2187 spin_unlock_irqrestore(&il->sta_lock, flags); 2188 return -EINVAL; 2189 } 2190 EXPORT_SYMBOL_GPL(il_remove_station); 2191 2192 /* 2193 * il_clear_ucode_stations - clear ucode station table bits 2194 * 2195 * This function clears all the bits in the driver indicating 2196 * which stations are active in the ucode. Call when something 2197 * other than explicit station management would cause this in 2198 * the ucode, e.g. unassociated RXON. 2199 */ 2200 void 2201 il_clear_ucode_stations(struct il_priv *il) 2202 { 2203 int i; 2204 unsigned long flags_spin; 2205 bool cleared = false; 2206 2207 D_INFO("Clearing ucode stations in driver\n"); 2208 2209 spin_lock_irqsave(&il->sta_lock, flags_spin); 2210 for (i = 0; i < il->hw_params.max_stations; i++) { 2211 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { 2212 D_INFO("Clearing ucode active for station %d\n", i); 2213 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2214 cleared = true; 2215 } 2216 } 2217 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2218 2219 if (!cleared) 2220 D_INFO("No active stations found to be cleared\n"); 2221 } 2222 EXPORT_SYMBOL(il_clear_ucode_stations); 2223 2224 /* 2225 * il_restore_stations() - Restore driver known stations to device 2226 * 2227 * All stations considered active by driver, but not present in ucode, is 2228 * restored. 2229 * 2230 * Function sleeps. 2231 */ 2232 void 2233 il_restore_stations(struct il_priv *il) 2234 { 2235 struct il_addsta_cmd sta_cmd; 2236 struct il_link_quality_cmd lq; 2237 unsigned long flags_spin; 2238 int i; 2239 bool found = false; 2240 int ret; 2241 bool send_lq; 2242 2243 if (!il_is_ready(il)) { 2244 D_INFO("Not ready yet, not restoring any stations.\n"); 2245 return; 2246 } 2247 2248 D_ASSOC("Restoring all known stations ... start.\n"); 2249 spin_lock_irqsave(&il->sta_lock, flags_spin); 2250 for (i = 0; i < il->hw_params.max_stations; i++) { 2251 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && 2252 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { 2253 D_ASSOC("Restoring sta %pM\n", 2254 il->stations[i].sta.sta.addr); 2255 il->stations[i].sta.mode = 0; 2256 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; 2257 found = true; 2258 } 2259 } 2260 2261 for (i = 0; i < il->hw_params.max_stations; i++) { 2262 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { 2263 memcpy(&sta_cmd, &il->stations[i].sta, 2264 sizeof(struct il_addsta_cmd)); 2265 send_lq = false; 2266 if (il->stations[i].lq) { 2267 memcpy(&lq, il->stations[i].lq, 2268 sizeof(struct il_link_quality_cmd)); 2269 send_lq = true; 2270 } 2271 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2272 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2273 if (ret) { 2274 spin_lock_irqsave(&il->sta_lock, flags_spin); 2275 IL_ERR("Adding station %pM failed.\n", 2276 il->stations[i].sta.sta.addr); 2277 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; 2278 il->stations[i].used &= 2279 ~IL_STA_UCODE_INPROGRESS; 2280 spin_unlock_irqrestore(&il->sta_lock, 2281 flags_spin); 2282 } 2283 /* 2284 * Rate scaling has already been initialized, send 2285 * current LQ command 2286 */ 2287 if (send_lq) 2288 il_send_lq_cmd(il, &lq, CMD_SYNC, true); 2289 spin_lock_irqsave(&il->sta_lock, flags_spin); 2290 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; 2291 } 2292 } 2293 2294 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2295 if (!found) 2296 D_INFO("Restoring all known stations" 2297 " .... no stations to be restored.\n"); 2298 else 2299 D_INFO("Restoring all known stations" " .... complete.\n"); 2300 } 2301 EXPORT_SYMBOL(il_restore_stations); 2302 2303 int 2304 il_get_free_ucode_key_idx(struct il_priv *il) 2305 { 2306 int i; 2307 2308 for (i = 0; i < il->sta_key_max_num; i++) 2309 if (!test_and_set_bit(i, &il->ucode_key_table)) 2310 return i; 2311 2312 return WEP_INVALID_OFFSET; 2313 } 2314 EXPORT_SYMBOL(il_get_free_ucode_key_idx); 2315 2316 void 2317 il_dealloc_bcast_stations(struct il_priv *il) 2318 { 2319 unsigned long flags; 2320 int i; 2321 2322 spin_lock_irqsave(&il->sta_lock, flags); 2323 for (i = 0; i < il->hw_params.max_stations; i++) { 2324 if (!(il->stations[i].used & IL_STA_BCAST)) 2325 continue; 2326 2327 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2328 il->num_stations--; 2329 BUG_ON(il->num_stations < 0); 2330 kfree(il->stations[i].lq); 2331 il->stations[i].lq = NULL; 2332 } 2333 spin_unlock_irqrestore(&il->sta_lock, flags); 2334 } 2335 EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); 2336 2337 #ifdef CONFIG_IWLEGACY_DEBUG 2338 static void 2339 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2340 { 2341 int i; 2342 D_RATE("lq station id 0x%x\n", lq->sta_id); 2343 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, 2344 lq->general_params.dual_stream_ant_msk); 2345 2346 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 2347 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); 2348 } 2349 #else 2350 static inline void 2351 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2352 { 2353 } 2354 #endif 2355 2356 /* 2357 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity 2358 * 2359 * It sometimes happens when a HT rate has been in use and we 2360 * loose connectivity with AP then mac80211 will first tell us that the 2361 * current channel is not HT anymore before removing the station. In such a 2362 * scenario the RXON flags will be updated to indicate we are not 2363 * communicating HT anymore, but the LQ command may still contain HT rates. 2364 * Test for this to prevent driver from sending LQ command between the time 2365 * RXON flags are updated and when LQ command is updated. 2366 */ 2367 static bool 2368 il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) 2369 { 2370 int i; 2371 2372 if (il->ht.enabled) 2373 return true; 2374 2375 D_INFO("Channel %u is not an HT channel\n", il->active.channel); 2376 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 2377 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 2378 D_INFO("idx %d of LQ expects HT channel\n", i); 2379 return false; 2380 } 2381 } 2382 return true; 2383 } 2384 2385 /* 2386 * il_send_lq_cmd() - Send link quality command 2387 * @init: This command is sent as part of station initialization right 2388 * after station has been added. 2389 * 2390 * The link quality command is sent as the last step of station creation. 2391 * This is the special case in which init is set and we call a callback in 2392 * this case to clear the state indicating that station creation is in 2393 * progress. 2394 */ 2395 int 2396 il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, 2397 u8 flags, bool init) 2398 { 2399 int ret = 0; 2400 unsigned long flags_spin; 2401 2402 struct il_host_cmd cmd = { 2403 .id = C_TX_LINK_QUALITY_CMD, 2404 .len = sizeof(struct il_link_quality_cmd), 2405 .flags = flags, 2406 .data = lq, 2407 }; 2408 2409 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) 2410 return -EINVAL; 2411 2412 spin_lock_irqsave(&il->sta_lock, flags_spin); 2413 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2414 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2415 return -EINVAL; 2416 } 2417 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2418 2419 il_dump_lq_cmd(il, lq); 2420 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 2421 2422 if (il_is_lq_table_valid(il, lq)) 2423 ret = il_send_cmd(il, &cmd); 2424 else 2425 ret = -EINVAL; 2426 2427 if (cmd.flags & CMD_ASYNC) 2428 return ret; 2429 2430 if (init) { 2431 D_INFO("init LQ command complete," 2432 " clearing sta addition status for sta %d\n", 2433 lq->sta_id); 2434 spin_lock_irqsave(&il->sta_lock, flags_spin); 2435 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2436 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2437 } 2438 return ret; 2439 } 2440 EXPORT_SYMBOL(il_send_lq_cmd); 2441 2442 int 2443 il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2444 struct ieee80211_sta *sta) 2445 { 2446 struct il_priv *il = hw->priv; 2447 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; 2448 int ret; 2449 2450 mutex_lock(&il->mutex); 2451 D_MAC80211("enter station %pM\n", sta->addr); 2452 2453 ret = il_remove_station(il, sta_common->sta_id, sta->addr); 2454 if (ret) 2455 IL_ERR("Error removing station %pM\n", sta->addr); 2456 2457 D_MAC80211("leave ret %d\n", ret); 2458 mutex_unlock(&il->mutex); 2459 2460 return ret; 2461 } 2462 EXPORT_SYMBOL(il_mac_sta_remove); 2463 2464 /************************** RX-FUNCTIONS ****************************/ 2465 /* 2466 * Rx theory of operation 2467 * 2468 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 2469 * each of which point to Receive Buffers to be filled by the NIC. These get 2470 * used not only for Rx frames, but for any command response or notification 2471 * from the NIC. The driver and NIC manage the Rx buffers by means 2472 * of idxes into the circular buffer. 2473 * 2474 * Rx Queue Indexes 2475 * The host/firmware share two idx registers for managing the Rx buffers. 2476 * 2477 * The READ idx maps to the first position that the firmware may be writing 2478 * to -- the driver can read up to (but not including) this position and get 2479 * good data. 2480 * The READ idx is managed by the firmware once the card is enabled. 2481 * 2482 * The WRITE idx maps to the last position the driver has read from -- the 2483 * position preceding WRITE is the last slot the firmware can place a packet. 2484 * 2485 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 2486 * WRITE = READ. 2487 * 2488 * During initialization, the host sets up the READ queue position to the first 2489 * IDX position, and WRITE to the last (READ - 1 wrapped) 2490 * 2491 * When the firmware places a packet in a buffer, it will advance the READ idx 2492 * and fire the RX interrupt. The driver can then query the READ idx and 2493 * process as many packets as possible, moving the WRITE idx forward as it 2494 * resets the Rx queue buffers with new memory. 2495 * 2496 * The management in the driver is as follows: 2497 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 2498 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 2499 * to replenish the iwl->rxq->rx_free. 2500 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the 2501 * iwl->rxq is replenished and the READ IDX is updated (updating the 2502 * 'processed' and 'read' driver idxes as well) 2503 * + A received packet is processed and handed to the kernel network stack, 2504 * detached from the iwl->rxq. The driver 'processed' idx is updated. 2505 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 2506 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 2507 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 2508 * were enough free buffers and RX_STALLED is set it is cleared. 2509 * 2510 * 2511 * Driver sequence: 2512 * 2513 * il_rx_queue_alloc() Allocates rx_free 2514 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls 2515 * il_rx_queue_restock 2516 * il_rx_queue_restock() Moves available buffers from rx_free into Rx 2517 * queue, updates firmware pointers, and updates 2518 * the WRITE idx. If insufficient rx_free buffers 2519 * are available, schedules il_rx_replenish 2520 * 2521 * -- enable interrupts -- 2522 * ISR - il_rx() Detach il_rx_bufs from pool up to the 2523 * READ IDX, detaching the SKB from the pool. 2524 * Moves the packet buffer from queue to rx_used. 2525 * Calls il_rx_queue_restock to refill any empty 2526 * slots. 2527 * ... 2528 * 2529 */ 2530 2531 /* 2532 * il_rx_queue_space - Return number of free slots available in queue. 2533 */ 2534 int 2535 il_rx_queue_space(const struct il_rx_queue *q) 2536 { 2537 int s = q->read - q->write; 2538 if (s <= 0) 2539 s += RX_QUEUE_SIZE; 2540 /* keep some buffer to not confuse full and empty queue */ 2541 s -= 2; 2542 if (s < 0) 2543 s = 0; 2544 return s; 2545 } 2546 EXPORT_SYMBOL(il_rx_queue_space); 2547 2548 /* 2549 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue 2550 */ 2551 void 2552 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) 2553 { 2554 unsigned long flags; 2555 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; 2556 u32 reg; 2557 2558 spin_lock_irqsave(&q->lock, flags); 2559 2560 if (q->need_update == 0) 2561 goto exit_unlock; 2562 2563 /* If power-saving is in use, make sure device is awake */ 2564 if (test_bit(S_POWER_PMI, &il->status)) { 2565 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2566 2567 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2568 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", 2569 reg); 2570 il_set_bit(il, CSR_GP_CNTRL, 2571 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2572 goto exit_unlock; 2573 } 2574 2575 q->write_actual = (q->write & ~0x7); 2576 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2577 2578 /* Else device is assumed to be awake */ 2579 } else { 2580 /* Device expects a multiple of 8 */ 2581 q->write_actual = (q->write & ~0x7); 2582 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2583 } 2584 2585 q->need_update = 0; 2586 2587 exit_unlock: 2588 spin_unlock_irqrestore(&q->lock, flags); 2589 } 2590 EXPORT_SYMBOL(il_rx_queue_update_write_ptr); 2591 2592 int 2593 il_rx_queue_alloc(struct il_priv *il) 2594 { 2595 struct il_rx_queue *rxq = &il->rxq; 2596 struct device *dev = &il->pci_dev->dev; 2597 int i; 2598 2599 spin_lock_init(&rxq->lock); 2600 INIT_LIST_HEAD(&rxq->rx_free); 2601 INIT_LIST_HEAD(&rxq->rx_used); 2602 2603 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2604 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2605 GFP_KERNEL); 2606 if (!rxq->bd) 2607 goto err_bd; 2608 2609 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2610 &rxq->rb_stts_dma, GFP_KERNEL); 2611 if (!rxq->rb_stts) 2612 goto err_rb; 2613 2614 /* Fill the rx_used queue with _all_ of the Rx buffers */ 2615 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 2616 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2617 2618 /* Set us so that we have processed and used all buffers, but have 2619 * not restocked the Rx queue with fresh buffers */ 2620 rxq->read = rxq->write = 0; 2621 rxq->write_actual = 0; 2622 rxq->free_count = 0; 2623 rxq->need_update = 0; 2624 return 0; 2625 2626 err_rb: 2627 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 2628 rxq->bd_dma); 2629 err_bd: 2630 return -ENOMEM; 2631 } 2632 EXPORT_SYMBOL(il_rx_queue_alloc); 2633 2634 void 2635 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) 2636 { 2637 struct il_rx_pkt *pkt = rxb_addr(rxb); 2638 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); 2639 2640 if (!report->state) { 2641 D_11H("Spectrum Measure Notification: Start\n"); 2642 return; 2643 } 2644 2645 memcpy(&il->measure_report, report, sizeof(*report)); 2646 il->measurement_status |= MEASUREMENT_READY; 2647 } 2648 EXPORT_SYMBOL(il_hdl_spectrum_measurement); 2649 2650 /* 2651 * returns non-zero if packet should be dropped 2652 */ 2653 int 2654 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, 2655 u32 decrypt_res, struct ieee80211_rx_status *stats) 2656 { 2657 u16 fc = le16_to_cpu(hdr->frame_control); 2658 2659 /* 2660 * All contexts have the same setting here due to it being 2661 * a module parameter, so OK to check any context. 2662 */ 2663 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 2664 return 0; 2665 2666 if (!(fc & IEEE80211_FCTL_PROTECTED)) 2667 return 0; 2668 2669 D_RX("decrypt_res:0x%x\n", decrypt_res); 2670 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2671 case RX_RES_STATUS_SEC_TYPE_TKIP: 2672 /* The uCode has got a bad phase 1 Key, pushes the packet. 2673 * Decryption will be done in SW. */ 2674 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2675 RX_RES_STATUS_BAD_KEY_TTAK) 2676 break; 2677 fallthrough; 2678 2679 case RX_RES_STATUS_SEC_TYPE_WEP: 2680 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2681 RX_RES_STATUS_BAD_ICV_MIC) { 2682 /* bad ICV, the packet is destroyed since the 2683 * decryption is inplace, drop it */ 2684 D_RX("Packet destroyed\n"); 2685 return -1; 2686 } 2687 fallthrough; 2688 case RX_RES_STATUS_SEC_TYPE_CCMP: 2689 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2690 RX_RES_STATUS_DECRYPT_OK) { 2691 D_RX("hw decrypt successfully!!!\n"); 2692 stats->flag |= RX_FLAG_DECRYPTED; 2693 } 2694 break; 2695 2696 default: 2697 break; 2698 } 2699 return 0; 2700 } 2701 EXPORT_SYMBOL(il_set_decrypted_flag); 2702 2703 /* 2704 * il_txq_update_write_ptr - Send new write idx to hardware 2705 */ 2706 void 2707 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) 2708 { 2709 u32 reg = 0; 2710 int txq_id = txq->q.id; 2711 2712 if (txq->need_update == 0) 2713 return; 2714 2715 /* if we're trying to save power */ 2716 if (test_bit(S_POWER_PMI, &il->status)) { 2717 /* wake up nic if it's powered down ... 2718 * uCode will wake up, and interrupt us again, so next 2719 * time we'll skip this part. */ 2720 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2721 2722 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2723 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", 2724 txq_id, reg); 2725 il_set_bit(il, CSR_GP_CNTRL, 2726 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2727 return; 2728 } 2729 2730 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2731 2732 /* 2733 * else not in power-save mode, 2734 * uCode will never sleep when we're 2735 * trying to tx (during RFKILL, we're not trying to tx). 2736 */ 2737 } else 2738 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2739 txq->need_update = 0; 2740 } 2741 EXPORT_SYMBOL(il_txq_update_write_ptr); 2742 2743 /* 2744 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 2745 */ 2746 void 2747 il_tx_queue_unmap(struct il_priv *il, int txq_id) 2748 { 2749 struct il_tx_queue *txq = &il->txq[txq_id]; 2750 struct il_queue *q = &txq->q; 2751 2752 if (q->n_bd == 0) 2753 return; 2754 2755 while (q->write_ptr != q->read_ptr) { 2756 il->ops->txq_free_tfd(il, txq); 2757 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2758 } 2759 } 2760 EXPORT_SYMBOL(il_tx_queue_unmap); 2761 2762 /* 2763 * il_tx_queue_free - Deallocate DMA queue. 2764 * @txq: Transmit queue to deallocate. 2765 * 2766 * Empty queue by removing and destroying all BD's. 2767 * Free all buffers. 2768 * 0-fill, but do not free "txq" descriptor structure. 2769 */ 2770 void 2771 il_tx_queue_free(struct il_priv *il, int txq_id) 2772 { 2773 struct il_tx_queue *txq = &il->txq[txq_id]; 2774 struct device *dev = &il->pci_dev->dev; 2775 int i; 2776 2777 il_tx_queue_unmap(il, txq_id); 2778 2779 /* De-alloc array of command/tx buffers */ 2780 if (txq->cmd) { 2781 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2782 kfree(txq->cmd[i]); 2783 } 2784 2785 /* De-alloc circular buffer of TFDs */ 2786 if (txq->q.n_bd) 2787 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2788 txq->tfds, txq->q.dma_addr); 2789 2790 /* De-alloc array of per-TFD driver data */ 2791 kfree(txq->skbs); 2792 txq->skbs = NULL; 2793 2794 /* deallocate arrays */ 2795 kfree(txq->cmd); 2796 kfree(txq->meta); 2797 txq->cmd = NULL; 2798 txq->meta = NULL; 2799 2800 /* 0-fill queue descriptor structure */ 2801 memset(txq, 0, sizeof(*txq)); 2802 } 2803 EXPORT_SYMBOL(il_tx_queue_free); 2804 2805 /* 2806 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 2807 */ 2808 void 2809 il_cmd_queue_unmap(struct il_priv *il) 2810 { 2811 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2812 struct il_queue *q = &txq->q; 2813 int i; 2814 2815 if (q->n_bd == 0) 2816 return; 2817 2818 while (q->read_ptr != q->write_ptr) { 2819 i = il_get_cmd_idx(q, q->read_ptr, 0); 2820 2821 if (txq->meta[i].flags & CMD_MAPPED) { 2822 dma_unmap_single(&il->pci_dev->dev, 2823 dma_unmap_addr(&txq->meta[i], mapping), 2824 dma_unmap_len(&txq->meta[i], len), 2825 DMA_BIDIRECTIONAL); 2826 txq->meta[i].flags = 0; 2827 } 2828 2829 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2830 } 2831 2832 i = q->n_win; 2833 if (txq->meta[i].flags & CMD_MAPPED) { 2834 dma_unmap_single(&il->pci_dev->dev, 2835 dma_unmap_addr(&txq->meta[i], mapping), 2836 dma_unmap_len(&txq->meta[i], len), 2837 DMA_BIDIRECTIONAL); 2838 txq->meta[i].flags = 0; 2839 } 2840 } 2841 EXPORT_SYMBOL(il_cmd_queue_unmap); 2842 2843 /* 2844 * il_cmd_queue_free - Deallocate DMA queue. 2845 * 2846 * Empty queue by removing and destroying all BD's. 2847 * Free all buffers. 2848 * 0-fill, but do not free "txq" descriptor structure. 2849 */ 2850 void 2851 il_cmd_queue_free(struct il_priv *il) 2852 { 2853 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2854 struct device *dev = &il->pci_dev->dev; 2855 int i; 2856 2857 il_cmd_queue_unmap(il); 2858 2859 /* De-alloc array of command/tx buffers */ 2860 if (txq->cmd) { 2861 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2862 kfree(txq->cmd[i]); 2863 } 2864 2865 /* De-alloc circular buffer of TFDs */ 2866 if (txq->q.n_bd) 2867 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2868 txq->tfds, txq->q.dma_addr); 2869 2870 /* deallocate arrays */ 2871 kfree(txq->cmd); 2872 kfree(txq->meta); 2873 txq->cmd = NULL; 2874 txq->meta = NULL; 2875 2876 /* 0-fill queue descriptor structure */ 2877 memset(txq, 0, sizeof(*txq)); 2878 } 2879 EXPORT_SYMBOL(il_cmd_queue_free); 2880 2881 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2882 * DMA services 2883 * 2884 * Theory of operation 2885 * 2886 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2887 * of buffer descriptors, each of which points to one or more data buffers for 2888 * the device to read from or fill. Driver and device exchange status of each 2889 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 2890 * entries in each circular buffer, to protect against confusing empty and full 2891 * queue states. 2892 * 2893 * The device reads or writes the data in the queues via the device's several 2894 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 2895 * 2896 * For Tx queue, there are low mark and high mark limits. If, after queuing 2897 * the packet for Tx, free space become < low mark, Tx queue stopped. When 2898 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 2899 * Tx queue resumed. 2900 * 2901 * See more detailed info in 4965.h. 2902 ***************************************************/ 2903 2904 int 2905 il_queue_space(const struct il_queue *q) 2906 { 2907 int s = q->read_ptr - q->write_ptr; 2908 2909 if (q->read_ptr > q->write_ptr) 2910 s -= q->n_bd; 2911 2912 if (s <= 0) 2913 s += q->n_win; 2914 /* keep some reserve to not confuse empty and full situations */ 2915 s -= 2; 2916 if (s < 0) 2917 s = 0; 2918 return s; 2919 } 2920 EXPORT_SYMBOL(il_queue_space); 2921 2922 2923 /* 2924 * il_queue_init - Initialize queue's high/low-water and read/write idxes 2925 */ 2926 static int 2927 il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) 2928 { 2929 /* 2930 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 2931 * il_queue_inc_wrap and il_queue_dec_wrap are broken. 2932 */ 2933 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 2934 /* FIXME: remove q->n_bd */ 2935 q->n_bd = TFD_QUEUE_SIZE_MAX; 2936 2937 q->n_win = slots; 2938 q->id = id; 2939 2940 /* slots_must be power-of-two size, otherwise 2941 * il_get_cmd_idx is broken. */ 2942 BUG_ON(!is_power_of_2(slots)); 2943 2944 q->low_mark = q->n_win / 4; 2945 if (q->low_mark < 4) 2946 q->low_mark = 4; 2947 2948 q->high_mark = q->n_win / 8; 2949 if (q->high_mark < 2) 2950 q->high_mark = 2; 2951 2952 q->write_ptr = q->read_ptr = 0; 2953 2954 return 0; 2955 } 2956 2957 /* 2958 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 2959 */ 2960 static int 2961 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) 2962 { 2963 struct device *dev = &il->pci_dev->dev; 2964 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 2965 2966 /* Driver ilate data, only for Tx (not command) queues, 2967 * not shared with device. */ 2968 if (id != il->cmd_queue) { 2969 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, 2970 sizeof(struct sk_buff *), 2971 GFP_KERNEL); 2972 if (!txq->skbs) { 2973 IL_ERR("Fail to alloc skbs\n"); 2974 goto error; 2975 } 2976 } else 2977 txq->skbs = NULL; 2978 2979 /* Circular buffer of transmit frame descriptors (TFDs), 2980 * shared with device */ 2981 txq->tfds = 2982 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 2983 if (!txq->tfds) 2984 goto error; 2985 2986 txq->q.id = id; 2987 2988 return 0; 2989 2990 error: 2991 kfree(txq->skbs); 2992 txq->skbs = NULL; 2993 2994 return -ENOMEM; 2995 } 2996 2997 /* 2998 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 2999 */ 3000 int 3001 il_tx_queue_init(struct il_priv *il, u32 txq_id) 3002 { 3003 int i, len, ret; 3004 int slots, actual_slots; 3005 struct il_tx_queue *txq = &il->txq[txq_id]; 3006 3007 /* 3008 * Alloc buffer array for commands (Tx or other types of commands). 3009 * For the command queue (#4/#9), allocate command space + one big 3010 * command for scan, since scan command is very huge; the system will 3011 * not have two scans at the same time, so only one is needed. 3012 * For normal Tx queues (all other queues), no super-size command 3013 * space is needed. 3014 */ 3015 if (txq_id == il->cmd_queue) { 3016 slots = TFD_CMD_SLOTS; 3017 actual_slots = slots + 1; 3018 } else { 3019 slots = TFD_TX_CMD_SLOTS; 3020 actual_slots = slots; 3021 } 3022 3023 txq->meta = 3024 kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL); 3025 txq->cmd = 3026 kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL); 3027 3028 if (!txq->meta || !txq->cmd) 3029 goto out_free_arrays; 3030 3031 len = sizeof(struct il_device_cmd); 3032 for (i = 0; i < actual_slots; i++) { 3033 /* only happens for cmd queue */ 3034 if (i == slots) 3035 len = IL_MAX_CMD_SIZE; 3036 3037 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 3038 if (!txq->cmd[i]) 3039 goto err; 3040 } 3041 3042 /* Alloc driver data array and TFD circular buffer */ 3043 ret = il_tx_queue_alloc(il, txq, txq_id); 3044 if (ret) 3045 goto err; 3046 3047 txq->need_update = 0; 3048 3049 /* 3050 * For the default queues 0-3, set up the swq_id 3051 * already -- all others need to get one later 3052 * (if they need one at all). 3053 */ 3054 if (txq_id < 4) 3055 il_set_swq_id(txq, txq_id, txq_id); 3056 3057 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3058 il_queue_init(il, &txq->q, slots, txq_id); 3059 3060 /* Tell device where to find queue */ 3061 il->ops->txq_init(il, txq); 3062 3063 return 0; 3064 err: 3065 for (i = 0; i < actual_slots; i++) 3066 kfree(txq->cmd[i]); 3067 out_free_arrays: 3068 kfree(txq->meta); 3069 txq->meta = NULL; 3070 kfree(txq->cmd); 3071 txq->cmd = NULL; 3072 3073 return -ENOMEM; 3074 } 3075 EXPORT_SYMBOL(il_tx_queue_init); 3076 3077 void 3078 il_tx_queue_reset(struct il_priv *il, u32 txq_id) 3079 { 3080 int slots, actual_slots; 3081 struct il_tx_queue *txq = &il->txq[txq_id]; 3082 3083 if (txq_id == il->cmd_queue) { 3084 slots = TFD_CMD_SLOTS; 3085 actual_slots = TFD_CMD_SLOTS + 1; 3086 } else { 3087 slots = TFD_TX_CMD_SLOTS; 3088 actual_slots = TFD_TX_CMD_SLOTS; 3089 } 3090 3091 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 3092 txq->need_update = 0; 3093 3094 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3095 il_queue_init(il, &txq->q, slots, txq_id); 3096 3097 /* Tell device where to find queue */ 3098 il->ops->txq_init(il, txq); 3099 } 3100 EXPORT_SYMBOL(il_tx_queue_reset); 3101 3102 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 3103 3104 /* 3105 * il_enqueue_hcmd - enqueue a uCode command 3106 * @il: device ilate data point 3107 * @cmd: a point to the ucode command structure 3108 * 3109 * The function returns < 0 values to indicate the operation is 3110 * failed. On success, it turns the idx (> 0) of command in the 3111 * command queue. 3112 */ 3113 int 3114 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) 3115 { 3116 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3117 struct il_queue *q = &txq->q; 3118 struct il_device_cmd *out_cmd; 3119 struct il_cmd_meta *out_meta; 3120 dma_addr_t phys_addr; 3121 unsigned long flags; 3122 u32 idx; 3123 u16 fix_size; 3124 3125 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); 3126 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); 3127 3128 /* If any of the command structures end up being larger than 3129 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 3130 * we will need to increase the size of the TFD entries 3131 * Also, check to see if command buffer should not exceed the size 3132 * of device_cmd and max_cmd_size. */ 3133 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 3134 !(cmd->flags & CMD_SIZE_HUGE)); 3135 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 3136 3137 if (il_is_rfkill(il) || il_is_ctkill(il)) { 3138 IL_WARN("Not sending command - %s KILL\n", 3139 il_is_rfkill(il) ? "RF" : "CT"); 3140 return -EIO; 3141 } 3142 3143 spin_lock_irqsave(&il->hcmd_lock, flags); 3144 3145 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 3146 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3147 3148 IL_ERR("Restarting adapter due to command queue full\n"); 3149 queue_work(il->workqueue, &il->restart); 3150 return -ENOSPC; 3151 } 3152 3153 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 3154 out_cmd = txq->cmd[idx]; 3155 out_meta = &txq->meta[idx]; 3156 3157 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 3158 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3159 return -ENOSPC; 3160 } 3161 3162 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 3163 out_meta->flags = cmd->flags | CMD_MAPPED; 3164 if (cmd->flags & CMD_WANT_SKB) 3165 out_meta->source = cmd; 3166 if (cmd->flags & CMD_ASYNC) 3167 out_meta->callback = cmd->callback; 3168 3169 out_cmd->hdr.cmd = cmd->id; 3170 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 3171 3172 /* At this point, the out_cmd now has all of the incoming cmd 3173 * information */ 3174 3175 out_cmd->hdr.flags = 0; 3176 out_cmd->hdr.sequence = 3177 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); 3178 if (cmd->flags & CMD_SIZE_HUGE) 3179 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 3180 3181 #ifdef CONFIG_IWLEGACY_DEBUG 3182 switch (out_cmd->hdr.cmd) { 3183 case C_TX_LINK_QUALITY_CMD: 3184 case C_SENSITIVITY: 3185 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 3186 "%d bytes at %d[%d]:%d\n", 3187 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3188 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 3189 q->write_ptr, idx, il->cmd_queue); 3190 break; 3191 default: 3192 D_HC("Sending command %s (#%x), seq: 0x%04X, " 3193 "%d bytes at %d[%d]:%d\n", 3194 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3195 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, 3196 idx, il->cmd_queue); 3197 } 3198 #endif 3199 3200 phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size, 3201 DMA_BIDIRECTIONAL); 3202 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) { 3203 idx = -ENOMEM; 3204 goto out; 3205 } 3206 dma_unmap_addr_set(out_meta, mapping, phys_addr); 3207 dma_unmap_len_set(out_meta, len, fix_size); 3208 3209 txq->need_update = 1; 3210 3211 if (il->ops->txq_update_byte_cnt_tbl) 3212 /* Set up entry in queue's byte count circular buffer */ 3213 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); 3214 3215 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, 3216 U32_PAD(cmd->len)); 3217 3218 /* Increment and update queue's write idx */ 3219 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 3220 il_txq_update_write_ptr(il, txq); 3221 3222 out: 3223 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3224 return idx; 3225 } 3226 3227 /* 3228 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 3229 * 3230 * When FW advances 'R' idx, all entries between old and new 'R' idx 3231 * need to be reclaimed. As result, some free space forms. If there is 3232 * enough free space (> low mark), wake the stack that feeds us. 3233 */ 3234 static void 3235 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) 3236 { 3237 struct il_tx_queue *txq = &il->txq[txq_id]; 3238 struct il_queue *q = &txq->q; 3239 int nfreed = 0; 3240 3241 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { 3242 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " 3243 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, 3244 q->write_ptr, q->read_ptr); 3245 return; 3246 } 3247 3248 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 3249 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 3250 3251 if (nfreed++ > 0) { 3252 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, 3253 q->write_ptr, q->read_ptr); 3254 queue_work(il->workqueue, &il->restart); 3255 } 3256 3257 } 3258 } 3259 3260 /* 3261 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3262 * @rxb: Rx buffer to reclaim 3263 * 3264 * If an Rx buffer has an async callback associated with it the callback 3265 * will be executed. The attached skb (if present) will only be freed 3266 * if the callback returns 1 3267 */ 3268 void 3269 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) 3270 { 3271 struct il_rx_pkt *pkt = rxb_addr(rxb); 3272 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3273 int txq_id = SEQ_TO_QUEUE(sequence); 3274 int idx = SEQ_TO_IDX(sequence); 3275 int cmd_idx; 3276 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 3277 struct il_device_cmd *cmd; 3278 struct il_cmd_meta *meta; 3279 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3280 unsigned long flags; 3281 3282 /* If a Tx command is being handled and it isn't in the actual 3283 * command queue then there a command routing bug has been introduced 3284 * in the queue management code. */ 3285 if (WARN 3286 (txq_id != il->cmd_queue, 3287 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 3288 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, 3289 il->txq[il->cmd_queue].q.write_ptr)) { 3290 il_print_hex_error(il, pkt, 32); 3291 return; 3292 } 3293 3294 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); 3295 cmd = txq->cmd[cmd_idx]; 3296 meta = &txq->meta[cmd_idx]; 3297 3298 txq->time_stamp = jiffies; 3299 3300 dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping), 3301 dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); 3302 3303 /* Input error checking is done when commands are added to queue. */ 3304 if (meta->flags & CMD_WANT_SKB) { 3305 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 3306 rxb->page = NULL; 3307 } else if (meta->callback) 3308 meta->callback(il, cmd, pkt); 3309 3310 spin_lock_irqsave(&il->hcmd_lock, flags); 3311 3312 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); 3313 3314 if (!(meta->flags & CMD_ASYNC)) { 3315 clear_bit(S_HCMD_ACTIVE, &il->status); 3316 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 3317 il_get_cmd_string(cmd->hdr.cmd)); 3318 wake_up(&il->wait_command_queue); 3319 } 3320 3321 /* Mark as unmapped */ 3322 meta->flags = 0; 3323 3324 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3325 } 3326 EXPORT_SYMBOL(il_tx_cmd_complete); 3327 3328 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); 3329 MODULE_VERSION(IWLWIFI_VERSION); 3330 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 3331 MODULE_LICENSE("GPL"); 3332 3333 /* 3334 * set bt_coex_active to true, uCode will do kill/defer 3335 * every time the priority line is asserted (BT is sending signals on the 3336 * priority line in the PCIx). 3337 * set bt_coex_active to false, uCode will ignore the BT activity and 3338 * perform the normal operation 3339 * 3340 * User might experience transmit issue on some platform due to WiFi/BT 3341 * co-exist problem. The possible behaviors are: 3342 * Able to scan and finding all the available AP 3343 * Not able to associate with any AP 3344 * On those platforms, WiFi communication can be restored by set 3345 * "bt_coex_active" module parameter to "false" 3346 * 3347 * default: bt_coex_active = true (BT_COEX_ENABLE) 3348 */ 3349 static bool bt_coex_active = true; 3350 module_param(bt_coex_active, bool, 0444); 3351 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 3352 3353 u32 il_debug_level; 3354 EXPORT_SYMBOL(il_debug_level); 3355 3356 const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3357 EXPORT_SYMBOL(il_bcast_addr); 3358 3359 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 3360 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 3361 static void 3362 il_init_ht_hw_capab(const struct il_priv *il, 3363 struct ieee80211_sta_ht_cap *ht_info, 3364 enum nl80211_band band) 3365 { 3366 u16 max_bit_rate = 0; 3367 u8 rx_chains_num = il->hw_params.rx_chains_num; 3368 u8 tx_chains_num = il->hw_params.tx_chains_num; 3369 3370 ht_info->cap = 0; 3371 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 3372 3373 ht_info->ht_supported = true; 3374 3375 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 3376 max_bit_rate = MAX_BIT_RATE_20_MHZ; 3377 if (il->hw_params.ht40_channel & BIT(band)) { 3378 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3379 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 3380 ht_info->mcs.rx_mask[4] = 0x01; 3381 max_bit_rate = MAX_BIT_RATE_40_MHZ; 3382 } 3383 3384 if (il->cfg->mod_params->amsdu_size_8K) 3385 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3386 3387 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3388 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3389 3390 ht_info->mcs.rx_mask[0] = 0xFF; 3391 if (rx_chains_num >= 2) 3392 ht_info->mcs.rx_mask[1] = 0xFF; 3393 if (rx_chains_num >= 3) 3394 ht_info->mcs.rx_mask[2] = 0xFF; 3395 3396 /* Highest supported Rx data rate */ 3397 max_bit_rate *= rx_chains_num; 3398 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); 3399 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); 3400 3401 /* Tx MCS capabilities */ 3402 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 3403 if (tx_chains_num != rx_chains_num) { 3404 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 3405 ht_info->mcs.tx_params |= 3406 ((tx_chains_num - 3407 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3408 } 3409 } 3410 3411 /* 3412 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom 3413 */ 3414 int 3415 il_init_geos(struct il_priv *il) 3416 { 3417 struct il_channel_info *ch; 3418 struct ieee80211_supported_band *sband; 3419 struct ieee80211_channel *channels; 3420 struct ieee80211_channel *geo_ch; 3421 struct ieee80211_rate *rates; 3422 int i = 0; 3423 s8 max_tx_power = 0; 3424 3425 if (il->bands[NL80211_BAND_2GHZ].n_bitrates || 3426 il->bands[NL80211_BAND_5GHZ].n_bitrates) { 3427 D_INFO("Geography modes already initialized.\n"); 3428 set_bit(S_GEO_CONFIGURED, &il->status); 3429 return 0; 3430 } 3431 3432 channels = 3433 kcalloc(il->channel_count, sizeof(struct ieee80211_channel), 3434 GFP_KERNEL); 3435 if (!channels) 3436 return -ENOMEM; 3437 3438 rates = 3439 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), 3440 GFP_KERNEL); 3441 if (!rates) { 3442 kfree(channels); 3443 return -ENOMEM; 3444 } 3445 3446 /* 5.2GHz channels start after the 2.4GHz channels */ 3447 sband = &il->bands[NL80211_BAND_5GHZ]; 3448 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3449 /* just OFDM */ 3450 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3451 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3452 3453 if (il->cfg->sku & IL_SKU_N) 3454 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); 3455 3456 sband = &il->bands[NL80211_BAND_2GHZ]; 3457 sband->channels = channels; 3458 /* OFDM & CCK */ 3459 sband->bitrates = rates; 3460 sband->n_bitrates = RATE_COUNT_LEGACY; 3461 3462 if (il->cfg->sku & IL_SKU_N) 3463 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); 3464 3465 il->ieee_channels = channels; 3466 il->ieee_rates = rates; 3467 3468 for (i = 0; i < il->channel_count; i++) { 3469 ch = &il->channel_info[i]; 3470 3471 if (!il_is_channel_valid(ch)) 3472 continue; 3473 3474 sband = &il->bands[ch->band]; 3475 3476 geo_ch = &sband->channels[sband->n_channels++]; 3477 3478 geo_ch->center_freq = 3479 ieee80211_channel_to_frequency(ch->channel, ch->band); 3480 geo_ch->max_power = ch->max_power_avg; 3481 geo_ch->max_antenna_gain = 0xff; 3482 geo_ch->hw_value = ch->channel; 3483 3484 if (il_is_channel_valid(ch)) { 3485 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) 3486 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3487 3488 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) 3489 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3490 3491 if (ch->flags & EEPROM_CHANNEL_RADAR) 3492 geo_ch->flags |= IEEE80211_CHAN_RADAR; 3493 3494 geo_ch->flags |= ch->ht40_extension_channel; 3495 3496 if (ch->max_power_avg > max_tx_power) 3497 max_tx_power = ch->max_power_avg; 3498 } else { 3499 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 3500 } 3501 3502 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, 3503 geo_ch->center_freq, 3504 il_is_channel_a_band(ch) ? "5.2" : "2.4", 3505 geo_ch-> 3506 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", 3507 geo_ch->flags); 3508 } 3509 3510 il->tx_power_device_lmt = max_tx_power; 3511 il->tx_power_user_lmt = max_tx_power; 3512 il->tx_power_next = max_tx_power; 3513 3514 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && 3515 (il->cfg->sku & IL_SKU_A)) { 3516 IL_INFO("Incorrectly detected BG card as ABG. " 3517 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3518 il->pci_dev->device, il->pci_dev->subsystem_device); 3519 il->cfg->sku &= ~IL_SKU_A; 3520 } 3521 3522 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3523 il->bands[NL80211_BAND_2GHZ].n_channels, 3524 il->bands[NL80211_BAND_5GHZ].n_channels); 3525 3526 set_bit(S_GEO_CONFIGURED, &il->status); 3527 3528 return 0; 3529 } 3530 EXPORT_SYMBOL(il_init_geos); 3531 3532 /* 3533 * il_free_geos - undo allocations in il_init_geos 3534 */ 3535 void 3536 il_free_geos(struct il_priv *il) 3537 { 3538 kfree(il->ieee_channels); 3539 kfree(il->ieee_rates); 3540 clear_bit(S_GEO_CONFIGURED, &il->status); 3541 } 3542 EXPORT_SYMBOL(il_free_geos); 3543 3544 static bool 3545 il_is_channel_extension(struct il_priv *il, enum nl80211_band band, 3546 u16 channel, u8 extension_chan_offset) 3547 { 3548 const struct il_channel_info *ch_info; 3549 3550 ch_info = il_get_channel_info(il, band, channel); 3551 if (!il_is_channel_valid(ch_info)) 3552 return false; 3553 3554 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 3555 return !(ch_info-> 3556 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); 3557 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) 3558 return !(ch_info-> 3559 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); 3560 3561 return false; 3562 } 3563 3564 bool 3565 il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) 3566 { 3567 if (!il->ht.enabled || !il->ht.is_40mhz) 3568 return false; 3569 3570 /* 3571 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 3572 * the bit will not set if it is pure 40MHz case 3573 */ 3574 if (ht_cap && !ht_cap->ht_supported) 3575 return false; 3576 3577 #ifdef CONFIG_IWLEGACY_DEBUGFS 3578 if (il->disable_ht40) 3579 return false; 3580 #endif 3581 3582 return il_is_channel_extension(il, il->band, 3583 le16_to_cpu(il->staging.channel), 3584 il->ht.extension_chan_offset); 3585 } 3586 EXPORT_SYMBOL(il_is_ht40_tx_allowed); 3587 3588 static u16 noinline 3589 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 3590 { 3591 u16 new_val; 3592 u16 beacon_factor; 3593 3594 /* 3595 * If mac80211 hasn't given us a beacon interval, program 3596 * the default into the device. 3597 */ 3598 if (!beacon_val) 3599 return DEFAULT_BEACON_INTERVAL; 3600 3601 /* 3602 * If the beacon interval we obtained from the peer 3603 * is too large, we'll have to wake up more often 3604 * (and in IBSS case, we'll beacon too much) 3605 * 3606 * For example, if max_beacon_val is 4096, and the 3607 * requested beacon interval is 7000, we'll have to 3608 * use 3500 to be able to wake up on the beacons. 3609 * 3610 * This could badly influence beacon detection stats. 3611 */ 3612 3613 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 3614 new_val = beacon_val / beacon_factor; 3615 3616 if (!new_val) 3617 new_val = max_beacon_val; 3618 3619 return new_val; 3620 } 3621 3622 int 3623 il_send_rxon_timing(struct il_priv *il) 3624 { 3625 u64 tsf; 3626 s32 interval_tm, rem; 3627 struct ieee80211_conf *conf = NULL; 3628 u16 beacon_int; 3629 struct ieee80211_vif *vif = il->vif; 3630 3631 conf = &il->hw->conf; 3632 3633 lockdep_assert_held(&il->mutex); 3634 3635 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); 3636 3637 il->timing.timestamp = cpu_to_le64(il->timestamp); 3638 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); 3639 3640 beacon_int = vif ? vif->bss_conf.beacon_int : 0; 3641 3642 /* 3643 * TODO: For IBSS we need to get atim_win from mac80211, 3644 * for now just always use 0 3645 */ 3646 il->timing.atim_win = 0; 3647 3648 beacon_int = 3649 il_adjust_beacon_interval(beacon_int, 3650 il->hw_params.max_beacon_itrvl * 3651 TIME_UNIT); 3652 il->timing.beacon_interval = cpu_to_le16(beacon_int); 3653 3654 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ 3655 interval_tm = beacon_int * TIME_UNIT; 3656 rem = do_div(tsf, interval_tm); 3657 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 3658 3659 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; 3660 3661 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 3662 le16_to_cpu(il->timing.beacon_interval), 3663 le32_to_cpu(il->timing.beacon_init_val), 3664 le16_to_cpu(il->timing.atim_win)); 3665 3666 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), 3667 &il->timing); 3668 } 3669 EXPORT_SYMBOL(il_send_rxon_timing); 3670 3671 void 3672 il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) 3673 { 3674 struct il_rxon_cmd *rxon = &il->staging; 3675 3676 if (hw_decrypt) 3677 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 3678 else 3679 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 3680 3681 } 3682 EXPORT_SYMBOL(il_set_rxon_hwcrypto); 3683 3684 /* validate RXON structure is valid */ 3685 int 3686 il_check_rxon_cmd(struct il_priv *il) 3687 { 3688 struct il_rxon_cmd *rxon = &il->staging; 3689 bool error = false; 3690 3691 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 3692 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 3693 IL_WARN("check 2.4G: wrong narrow\n"); 3694 error = true; 3695 } 3696 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 3697 IL_WARN("check 2.4G: wrong radar\n"); 3698 error = true; 3699 } 3700 } else { 3701 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 3702 IL_WARN("check 5.2G: not short slot!\n"); 3703 error = true; 3704 } 3705 if (rxon->flags & RXON_FLG_CCK_MSK) { 3706 IL_WARN("check 5.2G: CCK!\n"); 3707 error = true; 3708 } 3709 } 3710 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 3711 IL_WARN("mac/bssid mcast!\n"); 3712 error = true; 3713 } 3714 3715 /* make sure basic rates 6Mbps and 1Mbps are supported */ 3716 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && 3717 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { 3718 IL_WARN("neither 1 nor 6 are basic\n"); 3719 error = true; 3720 } 3721 3722 if (le16_to_cpu(rxon->assoc_id) > 2007) { 3723 IL_WARN("aid > 2007\n"); 3724 error = true; 3725 } 3726 3727 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == 3728 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 3729 IL_WARN("CCK and short slot\n"); 3730 error = true; 3731 } 3732 3733 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == 3734 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 3735 IL_WARN("CCK and auto detect"); 3736 error = true; 3737 } 3738 3739 if ((rxon-> 3740 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == 3741 RXON_FLG_TGG_PROTECT_MSK) { 3742 IL_WARN("TGg but no auto-detect\n"); 3743 error = true; 3744 } 3745 3746 if (error) 3747 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); 3748 3749 if (error) { 3750 IL_ERR("Invalid RXON\n"); 3751 return -EINVAL; 3752 } 3753 return 0; 3754 } 3755 EXPORT_SYMBOL(il_check_rxon_cmd); 3756 3757 /* 3758 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 3759 * @il: staging_rxon is compared to active_rxon 3760 * 3761 * If the RXON structure is changing enough to require a new tune, 3762 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 3763 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 3764 */ 3765 int 3766 il_full_rxon_required(struct il_priv *il) 3767 { 3768 const struct il_rxon_cmd *staging = &il->staging; 3769 const struct il_rxon_cmd *active = &il->active; 3770 3771 #define CHK(cond) \ 3772 if ((cond)) { \ 3773 D_INFO("need full RXON - " #cond "\n"); \ 3774 return 1; \ 3775 } 3776 3777 #define CHK_NEQ(c1, c2) \ 3778 if ((c1) != (c2)) { \ 3779 D_INFO("need full RXON - " \ 3780 #c1 " != " #c2 " - %d != %d\n", \ 3781 (c1), (c2)); \ 3782 return 1; \ 3783 } 3784 3785 /* These items are only settable from the full RXON command */ 3786 CHK(!il_is_associated(il)); 3787 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); 3788 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); 3789 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, 3790 active->wlap_bssid_addr)); 3791 CHK_NEQ(staging->dev_type, active->dev_type); 3792 CHK_NEQ(staging->channel, active->channel); 3793 CHK_NEQ(staging->air_propagation, active->air_propagation); 3794 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, 3795 active->ofdm_ht_single_stream_basic_rates); 3796 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, 3797 active->ofdm_ht_dual_stream_basic_rates); 3798 CHK_NEQ(staging->assoc_id, active->assoc_id); 3799 3800 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 3801 * be updated with the RXON_ASSOC command -- however only some 3802 * flag transitions are allowed using RXON_ASSOC */ 3803 3804 /* Check if we are not switching bands */ 3805 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, 3806 active->flags & RXON_FLG_BAND_24G_MSK); 3807 3808 /* Check if we are switching association toggle */ 3809 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, 3810 active->filter_flags & RXON_FILTER_ASSOC_MSK); 3811 3812 #undef CHK 3813 #undef CHK_NEQ 3814 3815 return 0; 3816 } 3817 EXPORT_SYMBOL(il_full_rxon_required); 3818 3819 u8 3820 il_get_lowest_plcp(struct il_priv *il) 3821 { 3822 /* 3823 * Assign the lowest rate -- should really get this from 3824 * the beacon skb from mac80211. 3825 */ 3826 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) 3827 return RATE_1M_PLCP; 3828 else 3829 return RATE_6M_PLCP; 3830 } 3831 EXPORT_SYMBOL(il_get_lowest_plcp); 3832 3833 static void 3834 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3835 { 3836 struct il_rxon_cmd *rxon = &il->staging; 3837 3838 if (!il->ht.enabled) { 3839 rxon->flags &= 3840 ~(RXON_FLG_CHANNEL_MODE_MSK | 3841 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK 3842 | RXON_FLG_HT_PROT_MSK); 3843 return; 3844 } 3845 3846 rxon->flags |= 3847 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); 3848 3849 /* Set up channel bandwidth: 3850 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 3851 /* clear the HT channel mode before set the mode */ 3852 rxon->flags &= 3853 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3854 if (il_is_ht40_tx_allowed(il, NULL)) { 3855 /* pure ht40 */ 3856 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 3857 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 3858 /* Note: control channel is opposite of extension channel */ 3859 switch (il->ht.extension_chan_offset) { 3860 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3861 rxon->flags &= 3862 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3863 break; 3864 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3865 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3866 break; 3867 } 3868 } else { 3869 /* Note: control channel is opposite of extension channel */ 3870 switch (il->ht.extension_chan_offset) { 3871 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3872 rxon->flags &= 3873 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3874 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3875 break; 3876 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3877 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3878 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3879 break; 3880 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 3881 default: 3882 /* channel location only valid if in Mixed mode */ 3883 IL_ERR("invalid extension channel offset\n"); 3884 break; 3885 } 3886 } 3887 } else { 3888 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 3889 } 3890 3891 if (il->ops->set_rxon_chain) 3892 il->ops->set_rxon_chain(il); 3893 3894 D_ASSOC("rxon flags 0x%X operation mode :0x%X " 3895 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), 3896 il->ht.protection, il->ht.extension_chan_offset); 3897 } 3898 3899 void 3900 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3901 { 3902 _il_set_rxon_ht(il, ht_conf); 3903 } 3904 EXPORT_SYMBOL(il_set_rxon_ht); 3905 3906 /* Return valid, unused, channel for a passive scan to reset the RF */ 3907 u8 3908 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) 3909 { 3910 const struct il_channel_info *ch_info; 3911 int i; 3912 u8 channel = 0; 3913 u8 min, max; 3914 3915 if (band == NL80211_BAND_5GHZ) { 3916 min = 14; 3917 max = il->channel_count; 3918 } else { 3919 min = 0; 3920 max = 14; 3921 } 3922 3923 for (i = min; i < max; i++) { 3924 channel = il->channel_info[i].channel; 3925 if (channel == le16_to_cpu(il->staging.channel)) 3926 continue; 3927 3928 ch_info = il_get_channel_info(il, band, channel); 3929 if (il_is_channel_valid(ch_info)) 3930 break; 3931 } 3932 3933 return channel; 3934 } 3935 EXPORT_SYMBOL(il_get_single_channel_number); 3936 3937 /* 3938 * il_set_rxon_channel - Set the band and channel values in staging RXON 3939 * @ch: requested channel as a pointer to struct ieee80211_channel 3940 3941 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 3942 * in the staging RXON flag structure based on the ch->band 3943 */ 3944 int 3945 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3946 { 3947 enum nl80211_band band = ch->band; 3948 u16 channel = ch->hw_value; 3949 3950 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3951 return 0; 3952 3953 il->staging.channel = cpu_to_le16(channel); 3954 if (band == NL80211_BAND_5GHZ) 3955 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3956 else 3957 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3958 3959 il->band = band; 3960 3961 D_INFO("Staging channel set to %d [%d]\n", channel, band); 3962 3963 return 0; 3964 } 3965 EXPORT_SYMBOL(il_set_rxon_channel); 3966 3967 void 3968 il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, 3969 struct ieee80211_vif *vif) 3970 { 3971 if (band == NL80211_BAND_5GHZ) { 3972 il->staging.flags &= 3973 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 3974 RXON_FLG_CCK_MSK); 3975 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3976 } else { 3977 /* Copied from il_post_associate() */ 3978 if (vif && vif->bss_conf.use_short_slot) 3979 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3980 else 3981 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3982 3983 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3984 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; 3985 il->staging.flags &= ~RXON_FLG_CCK_MSK; 3986 } 3987 } 3988 EXPORT_SYMBOL(il_set_flags_for_band); 3989 3990 /* 3991 * initialize rxon structure with default values from eeprom 3992 */ 3993 void 3994 il_connection_init_rx_config(struct il_priv *il) 3995 { 3996 const struct il_channel_info *ch_info; 3997 3998 memset(&il->staging, 0, sizeof(il->staging)); 3999 4000 switch (il->iw_mode) { 4001 case NL80211_IFTYPE_UNSPECIFIED: 4002 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4003 break; 4004 case NL80211_IFTYPE_STATION: 4005 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4006 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 4007 break; 4008 case NL80211_IFTYPE_ADHOC: 4009 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 4010 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 4011 il->staging.filter_flags = 4012 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 4013 break; 4014 default: 4015 IL_ERR("Unsupported interface type %d\n", il->vif->type); 4016 return; 4017 } 4018 4019 #if 0 4020 /* TODO: Figure out when short_preamble would be set and cache from 4021 * that */ 4022 if (!hw_to_local(il->hw)->short_preamble) 4023 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4024 else 4025 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4026 #endif 4027 4028 ch_info = 4029 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); 4030 4031 if (!ch_info) 4032 ch_info = &il->channel_info[0]; 4033 4034 il->staging.channel = cpu_to_le16(ch_info->channel); 4035 il->band = ch_info->band; 4036 4037 il_set_flags_for_band(il, il->band, il->vif); 4038 4039 il->staging.ofdm_basic_rates = 4040 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4041 il->staging.cck_basic_rates = 4042 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4043 4044 /* clear both MIX and PURE40 mode flag */ 4045 il->staging.flags &= 4046 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); 4047 if (il->vif) 4048 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); 4049 4050 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; 4051 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 4052 } 4053 EXPORT_SYMBOL(il_connection_init_rx_config); 4054 4055 void 4056 il_set_rate(struct il_priv *il) 4057 { 4058 const struct ieee80211_supported_band *hw = NULL; 4059 struct ieee80211_rate *rate; 4060 int i; 4061 4062 hw = il_get_hw_mode(il, il->band); 4063 if (!hw) { 4064 IL_ERR("Failed to set rate: unable to get hw mode\n"); 4065 return; 4066 } 4067 4068 il->active_rate = 0; 4069 4070 for (i = 0; i < hw->n_bitrates; i++) { 4071 rate = &(hw->bitrates[i]); 4072 if (rate->hw_value < RATE_COUNT_LEGACY) 4073 il->active_rate |= (1 << rate->hw_value); 4074 } 4075 4076 D_RATE("Set active_rate = %0x\n", il->active_rate); 4077 4078 il->staging.cck_basic_rates = 4079 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4080 4081 il->staging.ofdm_basic_rates = 4082 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4083 } 4084 EXPORT_SYMBOL(il_set_rate); 4085 4086 void 4087 il_chswitch_done(struct il_priv *il, bool is_success) 4088 { 4089 if (test_bit(S_EXIT_PENDING, &il->status)) 4090 return; 4091 4092 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4093 ieee80211_chswitch_done(il->vif, is_success, 0); 4094 } 4095 EXPORT_SYMBOL(il_chswitch_done); 4096 4097 void 4098 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) 4099 { 4100 struct il_rx_pkt *pkt = rxb_addr(rxb); 4101 struct il_csa_notification *csa = &(pkt->u.csa_notif); 4102 struct il_rxon_cmd *rxon = (void *)&il->active; 4103 4104 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4105 return; 4106 4107 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { 4108 rxon->channel = csa->channel; 4109 il->staging.channel = csa->channel; 4110 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); 4111 il_chswitch_done(il, true); 4112 } else { 4113 IL_ERR("CSA notif (fail) : channel %d\n", 4114 le16_to_cpu(csa->channel)); 4115 il_chswitch_done(il, false); 4116 } 4117 } 4118 EXPORT_SYMBOL(il_hdl_csa); 4119 4120 #ifdef CONFIG_IWLEGACY_DEBUG 4121 void 4122 il_print_rx_config_cmd(struct il_priv *il) 4123 { 4124 struct il_rxon_cmd *rxon = &il->staging; 4125 4126 D_RADIO("RX CONFIG:\n"); 4127 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4128 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4129 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4130 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); 4131 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); 4132 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); 4133 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 4134 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); 4135 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 4136 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 4137 } 4138 EXPORT_SYMBOL(il_print_rx_config_cmd); 4139 #endif 4140 /* 4141 * il_irq_handle_error - called for HW or SW error interrupt from card 4142 */ 4143 void 4144 il_irq_handle_error(struct il_priv *il) 4145 { 4146 /* Set the FW error flag -- cleared on il_down */ 4147 set_bit(S_FW_ERROR, &il->status); 4148 4149 /* Cancel currently queued command. */ 4150 clear_bit(S_HCMD_ACTIVE, &il->status); 4151 4152 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); 4153 4154 il->ops->dump_nic_error_log(il); 4155 if (il->ops->dump_fh) 4156 il->ops->dump_fh(il, NULL, false); 4157 #ifdef CONFIG_IWLEGACY_DEBUG 4158 if (il_get_debug_level(il) & IL_DL_FW_ERRORS) 4159 il_print_rx_config_cmd(il); 4160 #endif 4161 4162 wake_up(&il->wait_command_queue); 4163 4164 /* Keep the restart process from trying to send host 4165 * commands by clearing the INIT status bit */ 4166 clear_bit(S_READY, &il->status); 4167 4168 if (!test_bit(S_EXIT_PENDING, &il->status)) { 4169 IL_DBG(IL_DL_FW_ERRORS, 4170 "Restarting adapter due to uCode error.\n"); 4171 4172 if (il->cfg->mod_params->restart_fw) 4173 queue_work(il->workqueue, &il->restart); 4174 } 4175 } 4176 EXPORT_SYMBOL(il_irq_handle_error); 4177 4178 static int 4179 _il_apm_stop_master(struct il_priv *il) 4180 { 4181 int ret = 0; 4182 4183 /* stop device's busmaster DMA activity */ 4184 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 4185 4186 ret = 4187 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 4188 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 4189 if (ret < 0) 4190 IL_WARN("Master Disable Timed Out, 100 usec\n"); 4191 4192 D_INFO("stop master\n"); 4193 4194 return ret; 4195 } 4196 4197 void 4198 _il_apm_stop(struct il_priv *il) 4199 { 4200 lockdep_assert_held(&il->reg_lock); 4201 4202 D_INFO("Stop card, put in low power state\n"); 4203 4204 /* Stop device's DMA activity */ 4205 _il_apm_stop_master(il); 4206 4207 /* Reset the entire device */ 4208 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 4209 4210 udelay(10); 4211 4212 /* 4213 * Clear "initialization complete" bit to move adapter from 4214 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 4215 */ 4216 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4217 } 4218 EXPORT_SYMBOL(_il_apm_stop); 4219 4220 void 4221 il_apm_stop(struct il_priv *il) 4222 { 4223 unsigned long flags; 4224 4225 spin_lock_irqsave(&il->reg_lock, flags); 4226 _il_apm_stop(il); 4227 spin_unlock_irqrestore(&il->reg_lock, flags); 4228 } 4229 EXPORT_SYMBOL(il_apm_stop); 4230 4231 /* 4232 * Start up NIC's basic functionality after it has been reset 4233 * (e.g. after platform boot, or shutdown via il_apm_stop()) 4234 * NOTE: This does not load uCode nor start the embedded processor 4235 */ 4236 int 4237 il_apm_init(struct il_priv *il) 4238 { 4239 int ret = 0; 4240 u16 lctl; 4241 4242 D_INFO("Init card's basic functions\n"); 4243 4244 /* 4245 * Use "set_bit" below rather than "write", to preserve any hardware 4246 * bits already set by default after reset. 4247 */ 4248 4249 /* Disable L0S exit timer (platform NMI Work/Around) */ 4250 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4251 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4252 4253 /* 4254 * Disable L0s without affecting L1; 4255 * don't wait for ICH L0s (ICH bug W/A) 4256 */ 4257 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4258 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 4259 4260 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 4261 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 4262 4263 /* 4264 * Enable HAP INTA (interrupt from management bus) to 4265 * wake device's PCI Express link L1a -> L0s 4266 * NOTE: This is no-op for 3945 (non-existent bit) 4267 */ 4268 il_set_bit(il, CSR_HW_IF_CONFIG_REG, 4269 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 4270 4271 /* 4272 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. 4273 * Check if BIOS (or OS) enabled L1-ASPM on this device. 4274 * If so (likely), disable L0S, so device moves directly L0->L1; 4275 * costs negligible amount of power savings. 4276 * If not (unlikely), enable L0S, so there is at least some 4277 * power savings, even without L1. 4278 */ 4279 if (il->cfg->set_l0s) { 4280 ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 4281 if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { 4282 /* L1-ASPM enabled; disable(!) L0S */ 4283 il_set_bit(il, CSR_GIO_REG, 4284 CSR_GIO_REG_VAL_L0S_ENABLED); 4285 D_POWER("L1 Enabled; Disabling L0S\n"); 4286 } else { 4287 /* L1-ASPM disabled; enable(!) L0S */ 4288 il_clear_bit(il, CSR_GIO_REG, 4289 CSR_GIO_REG_VAL_L0S_ENABLED); 4290 D_POWER("L1 Disabled; Enabling L0S\n"); 4291 } 4292 } 4293 4294 /* Configure analog phase-lock-loop before activating to D0A */ 4295 if (il->cfg->pll_cfg_val) 4296 il_set_bit(il, CSR_ANA_PLL_CFG, 4297 il->cfg->pll_cfg_val); 4298 4299 /* 4300 * Set "initialization complete" bit to move adapter from 4301 * D0U* --> D0A* (powered-up active) state. 4302 */ 4303 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4304 4305 /* 4306 * Wait for clock stabilization; once stabilized, access to 4307 * device-internal resources is supported, e.g. il_wr_prph() 4308 * and accesses to uCode SRAM. 4309 */ 4310 ret = 4311 _il_poll_bit(il, CSR_GP_CNTRL, 4312 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 4313 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 4314 if (ret < 0) { 4315 D_INFO("Failed to init the card\n"); 4316 goto out; 4317 } 4318 4319 /* 4320 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 4321 * BSM (Boostrap State Machine) is only in 3945 and 4965. 4322 * 4323 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 4324 * do not disable clocks. This preserves any hardware bits already 4325 * set by default in "CLK_CTRL_REG" after reset. 4326 */ 4327 if (il->cfg->use_bsm) 4328 il_wr_prph(il, APMG_CLK_EN_REG, 4329 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 4330 else 4331 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 4332 udelay(20); 4333 4334 /* Disable L1-Active */ 4335 il_set_bits_prph(il, APMG_PCIDEV_STT_REG, 4336 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4337 4338 out: 4339 return ret; 4340 } 4341 EXPORT_SYMBOL(il_apm_init); 4342 4343 int 4344 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) 4345 { 4346 int ret; 4347 s8 prev_tx_power; 4348 bool defer; 4349 4350 lockdep_assert_held(&il->mutex); 4351 4352 if (il->tx_power_user_lmt == tx_power && !force) 4353 return 0; 4354 4355 if (!il->ops->send_tx_power) 4356 return -EOPNOTSUPP; 4357 4358 /* 0 dBm mean 1 milliwatt */ 4359 if (tx_power < 0) { 4360 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); 4361 return -EINVAL; 4362 } 4363 4364 if (tx_power > il->tx_power_device_lmt) { 4365 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", 4366 tx_power, il->tx_power_device_lmt); 4367 return -EINVAL; 4368 } 4369 4370 if (!il_is_ready_rf(il)) 4371 return -EIO; 4372 4373 /* scan complete and commit_rxon use tx_power_next value, 4374 * it always need to be updated for newest request */ 4375 il->tx_power_next = tx_power; 4376 4377 /* do not set tx power when scanning or channel changing */ 4378 defer = test_bit(S_SCANNING, &il->status) || 4379 memcmp(&il->active, &il->staging, sizeof(il->staging)); 4380 if (defer && !force) { 4381 D_INFO("Deferring tx power set\n"); 4382 return 0; 4383 } 4384 4385 prev_tx_power = il->tx_power_user_lmt; 4386 il->tx_power_user_lmt = tx_power; 4387 4388 ret = il->ops->send_tx_power(il); 4389 4390 /* if fail to set tx_power, restore the orig. tx power */ 4391 if (ret) { 4392 il->tx_power_user_lmt = prev_tx_power; 4393 il->tx_power_next = prev_tx_power; 4394 } 4395 return ret; 4396 } 4397 EXPORT_SYMBOL(il_set_tx_power); 4398 4399 void 4400 il_send_bt_config(struct il_priv *il) 4401 { 4402 struct il_bt_cmd bt_cmd = { 4403 .lead_time = BT_LEAD_TIME_DEF, 4404 .max_kill = BT_MAX_KILL_DEF, 4405 .kill_ack_mask = 0, 4406 .kill_cts_mask = 0, 4407 }; 4408 4409 if (!bt_coex_active) 4410 bt_cmd.flags = BT_COEX_DISABLE; 4411 else 4412 bt_cmd.flags = BT_COEX_ENABLE; 4413 4414 D_INFO("BT coex %s\n", 4415 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 4416 4417 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) 4418 IL_ERR("failed to send BT Coex Config\n"); 4419 } 4420 EXPORT_SYMBOL(il_send_bt_config); 4421 4422 int 4423 il_send_stats_request(struct il_priv *il, u8 flags, bool clear) 4424 { 4425 struct il_stats_cmd stats_cmd = { 4426 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, 4427 }; 4428 4429 if (flags & CMD_ASYNC) 4430 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), 4431 &stats_cmd, NULL); 4432 else 4433 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), 4434 &stats_cmd); 4435 } 4436 EXPORT_SYMBOL(il_send_stats_request); 4437 4438 void 4439 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) 4440 { 4441 #ifdef CONFIG_IWLEGACY_DEBUG 4442 struct il_rx_pkt *pkt = rxb_addr(rxb); 4443 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); 4444 D_RX("sleep mode: %d, src: %d\n", 4445 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 4446 #endif 4447 } 4448 EXPORT_SYMBOL(il_hdl_pm_sleep); 4449 4450 void 4451 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) 4452 { 4453 struct il_rx_pkt *pkt = rxb_addr(rxb); 4454 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4455 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, 4456 il_get_cmd_string(pkt->hdr.cmd)); 4457 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); 4458 } 4459 EXPORT_SYMBOL(il_hdl_pm_debug_stats); 4460 4461 void 4462 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) 4463 { 4464 struct il_rx_pkt *pkt = rxb_addr(rxb); 4465 4466 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " 4467 "seq 0x%04X ser 0x%08X\n", 4468 le32_to_cpu(pkt->u.err_resp.error_type), 4469 il_get_cmd_string(pkt->u.err_resp.cmd_id), 4470 pkt->u.err_resp.cmd_id, 4471 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 4472 le32_to_cpu(pkt->u.err_resp.error_info)); 4473 } 4474 EXPORT_SYMBOL(il_hdl_error); 4475 4476 void 4477 il_clear_isr_stats(struct il_priv *il) 4478 { 4479 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); 4480 } 4481 4482 int 4483 il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4484 unsigned int link_id, u16 queue, 4485 const struct ieee80211_tx_queue_params *params) 4486 { 4487 struct il_priv *il = hw->priv; 4488 unsigned long flags; 4489 int q; 4490 4491 D_MAC80211("enter\n"); 4492 4493 if (!il_is_ready_rf(il)) { 4494 D_MAC80211("leave - RF not ready\n"); 4495 return -EIO; 4496 } 4497 4498 if (queue >= AC_NUM) { 4499 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4500 return 0; 4501 } 4502 4503 q = AC_NUM - 1 - queue; 4504 4505 spin_lock_irqsave(&il->lock, flags); 4506 4507 il->qos_data.def_qos_parm.ac[q].cw_min = 4508 cpu_to_le16(params->cw_min); 4509 il->qos_data.def_qos_parm.ac[q].cw_max = 4510 cpu_to_le16(params->cw_max); 4511 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 4512 il->qos_data.def_qos_parm.ac[q].edca_txop = 4513 cpu_to_le16((params->txop * 32)); 4514 4515 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; 4516 4517 spin_unlock_irqrestore(&il->lock, flags); 4518 4519 D_MAC80211("leave\n"); 4520 return 0; 4521 } 4522 EXPORT_SYMBOL(il_mac_conf_tx); 4523 4524 int 4525 il_mac_tx_last_beacon(struct ieee80211_hw *hw) 4526 { 4527 struct il_priv *il = hw->priv; 4528 int ret; 4529 4530 D_MAC80211("enter\n"); 4531 4532 ret = (il->ibss_manager == IL_IBSS_MANAGER); 4533 4534 D_MAC80211("leave ret %d\n", ret); 4535 return ret; 4536 } 4537 EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); 4538 4539 static int 4540 il_set_mode(struct il_priv *il) 4541 { 4542 il_connection_init_rx_config(il); 4543 4544 if (il->ops->set_rxon_chain) 4545 il->ops->set_rxon_chain(il); 4546 4547 return il_commit_rxon(il); 4548 } 4549 4550 int 4551 il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4552 { 4553 struct il_priv *il = hw->priv; 4554 int err; 4555 bool reset; 4556 4557 mutex_lock(&il->mutex); 4558 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4559 4560 if (!il_is_ready_rf(il)) { 4561 IL_WARN("Try to add interface when device not ready\n"); 4562 err = -EINVAL; 4563 goto out; 4564 } 4565 4566 /* 4567 * We do not support multiple virtual interfaces, but on hardware reset 4568 * we have to add the same interface again. 4569 */ 4570 reset = (il->vif == vif); 4571 if (il->vif && !reset) { 4572 err = -EOPNOTSUPP; 4573 goto out; 4574 } 4575 4576 il->vif = vif; 4577 il->iw_mode = vif->type; 4578 4579 err = il_set_mode(il); 4580 if (err) { 4581 IL_WARN("Fail to set mode %d\n", vif->type); 4582 if (!reset) { 4583 il->vif = NULL; 4584 il->iw_mode = NL80211_IFTYPE_STATION; 4585 } 4586 } 4587 4588 out: 4589 D_MAC80211("leave err %d\n", err); 4590 mutex_unlock(&il->mutex); 4591 4592 return err; 4593 } 4594 EXPORT_SYMBOL(il_mac_add_interface); 4595 4596 static void 4597 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4598 { 4599 lockdep_assert_held(&il->mutex); 4600 4601 if (il->scan_vif == vif) { 4602 il_scan_cancel_timeout(il, 200); 4603 il_force_scan_end(il); 4604 } 4605 4606 il_set_mode(il); 4607 } 4608 4609 void 4610 il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4611 { 4612 struct il_priv *il = hw->priv; 4613 4614 mutex_lock(&il->mutex); 4615 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4616 4617 WARN_ON(il->vif != vif); 4618 il->vif = NULL; 4619 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4620 il_teardown_interface(il, vif); 4621 eth_zero_addr(il->bssid); 4622 4623 D_MAC80211("leave\n"); 4624 mutex_unlock(&il->mutex); 4625 } 4626 EXPORT_SYMBOL(il_mac_remove_interface); 4627 4628 int 4629 il_alloc_txq_mem(struct il_priv *il) 4630 { 4631 if (!il->txq) 4632 il->txq = 4633 kcalloc(il->cfg->num_of_queues, 4634 sizeof(struct il_tx_queue), 4635 GFP_KERNEL); 4636 if (!il->txq) { 4637 IL_ERR("Not enough memory for txq\n"); 4638 return -ENOMEM; 4639 } 4640 return 0; 4641 } 4642 EXPORT_SYMBOL(il_alloc_txq_mem); 4643 4644 void 4645 il_free_txq_mem(struct il_priv *il) 4646 { 4647 kfree(il->txq); 4648 il->txq = NULL; 4649 } 4650 EXPORT_SYMBOL(il_free_txq_mem); 4651 4652 int 4653 il_force_reset(struct il_priv *il, bool external) 4654 { 4655 struct il_force_reset *force_reset; 4656 4657 if (test_bit(S_EXIT_PENDING, &il->status)) 4658 return -EINVAL; 4659 4660 force_reset = &il->force_reset; 4661 force_reset->reset_request_count++; 4662 if (!external) { 4663 if (force_reset->last_force_reset_jiffies && 4664 time_after(force_reset->last_force_reset_jiffies + 4665 force_reset->reset_duration, jiffies)) { 4666 D_INFO("force reset rejected\n"); 4667 force_reset->reset_reject_count++; 4668 return -EAGAIN; 4669 } 4670 } 4671 force_reset->reset_success_count++; 4672 force_reset->last_force_reset_jiffies = jiffies; 4673 4674 /* 4675 * if the request is from external(ex: debugfs), 4676 * then always perform the request in regardless the module 4677 * parameter setting 4678 * if the request is from internal (uCode error or driver 4679 * detect failure), then fw_restart module parameter 4680 * need to be check before performing firmware reload 4681 */ 4682 4683 if (!external && !il->cfg->mod_params->restart_fw) { 4684 D_INFO("Cancel firmware reload based on " 4685 "module parameter setting\n"); 4686 return 0; 4687 } 4688 4689 IL_ERR("On demand firmware reload\n"); 4690 4691 /* Set the FW error flag -- cleared on il_down */ 4692 set_bit(S_FW_ERROR, &il->status); 4693 wake_up(&il->wait_command_queue); 4694 /* 4695 * Keep the restart process from trying to send host 4696 * commands by clearing the INIT status bit 4697 */ 4698 clear_bit(S_READY, &il->status); 4699 queue_work(il->workqueue, &il->restart); 4700 4701 return 0; 4702 } 4703 EXPORT_SYMBOL(il_force_reset); 4704 4705 int 4706 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4707 enum nl80211_iftype newtype, bool newp2p) 4708 { 4709 struct il_priv *il = hw->priv; 4710 int err; 4711 4712 mutex_lock(&il->mutex); 4713 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n", 4714 vif->type, vif->addr, newtype, newp2p); 4715 4716 if (newp2p) { 4717 err = -EOPNOTSUPP; 4718 goto out; 4719 } 4720 4721 if (!il->vif || !il_is_ready_rf(il)) { 4722 /* 4723 * Huh? But wait ... this can maybe happen when 4724 * we're in the middle of a firmware restart! 4725 */ 4726 err = -EBUSY; 4727 goto out; 4728 } 4729 4730 /* success */ 4731 vif->type = newtype; 4732 vif->p2p = false; 4733 il->iw_mode = newtype; 4734 il_teardown_interface(il, vif); 4735 err = 0; 4736 4737 out: 4738 D_MAC80211("leave err %d\n", err); 4739 mutex_unlock(&il->mutex); 4740 4741 return err; 4742 } 4743 EXPORT_SYMBOL(il_mac_change_interface); 4744 4745 void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4746 u32 queues, bool drop) 4747 { 4748 struct il_priv *il = hw->priv; 4749 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4750 int i; 4751 4752 mutex_lock(&il->mutex); 4753 D_MAC80211("enter\n"); 4754 4755 if (il->txq == NULL) 4756 goto out; 4757 4758 for (i = 0; i < il->hw_params.max_txq_num; i++) { 4759 struct il_queue *q; 4760 4761 if (i == il->cmd_queue) 4762 continue; 4763 4764 q = &il->txq[i].q; 4765 if (q->read_ptr == q->write_ptr) 4766 continue; 4767 4768 if (time_after(jiffies, timeout)) { 4769 IL_ERR("Failed to flush queue %d\n", q->id); 4770 break; 4771 } 4772 4773 msleep(20); 4774 } 4775 out: 4776 D_MAC80211("leave\n"); 4777 mutex_unlock(&il->mutex); 4778 } 4779 EXPORT_SYMBOL(il_mac_flush); 4780 4781 /* 4782 * On every watchdog tick we check (latest) time stamp. If it does not 4783 * change during timeout period and queue is not empty we reset firmware. 4784 */ 4785 static int 4786 il_check_stuck_queue(struct il_priv *il, int cnt) 4787 { 4788 struct il_tx_queue *txq = &il->txq[cnt]; 4789 struct il_queue *q = &txq->q; 4790 unsigned long timeout; 4791 unsigned long now = jiffies; 4792 int ret; 4793 4794 if (q->read_ptr == q->write_ptr) { 4795 txq->time_stamp = now; 4796 return 0; 4797 } 4798 4799 timeout = 4800 txq->time_stamp + 4801 msecs_to_jiffies(il->cfg->wd_timeout); 4802 4803 if (time_after(now, timeout)) { 4804 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4805 jiffies_to_msecs(now - txq->time_stamp)); 4806 ret = il_force_reset(il, false); 4807 return (ret == -EAGAIN) ? 0 : 1; 4808 } 4809 4810 return 0; 4811 } 4812 4813 /* 4814 * Making watchdog tick be a quarter of timeout assure we will 4815 * discover the queue hung between timeout and 1.25*timeout 4816 */ 4817 #define IL_WD_TICK(timeout) ((timeout) / 4) 4818 4819 /* 4820 * Watchdog timer callback, we check each tx queue for stuck, if hung 4821 * we reset the firmware. If everything is fine just rearm the timer. 4822 */ 4823 void 4824 il_bg_watchdog(struct timer_list *t) 4825 { 4826 struct il_priv *il = from_timer(il, t, watchdog); 4827 int cnt; 4828 unsigned long timeout; 4829 4830 if (test_bit(S_EXIT_PENDING, &il->status)) 4831 return; 4832 4833 timeout = il->cfg->wd_timeout; 4834 if (timeout == 0) 4835 return; 4836 4837 /* monitor and check for stuck cmd queue */ 4838 if (il_check_stuck_queue(il, il->cmd_queue)) 4839 return; 4840 4841 /* monitor and check for other stuck queues */ 4842 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4843 /* skip as we already checked the command queue */ 4844 if (cnt == il->cmd_queue) 4845 continue; 4846 if (il_check_stuck_queue(il, cnt)) 4847 return; 4848 } 4849 4850 mod_timer(&il->watchdog, 4851 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4852 } 4853 EXPORT_SYMBOL(il_bg_watchdog); 4854 4855 void 4856 il_setup_watchdog(struct il_priv *il) 4857 { 4858 unsigned int timeout = il->cfg->wd_timeout; 4859 4860 if (timeout) 4861 mod_timer(&il->watchdog, 4862 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4863 else 4864 del_timer(&il->watchdog); 4865 } 4866 EXPORT_SYMBOL(il_setup_watchdog); 4867 4868 /* 4869 * extended beacon time format 4870 * time in usec will be changed into a 32-bit value in extended:internal format 4871 * the extended part is the beacon counts 4872 * the internal part is the time in usec within one beacon interval 4873 */ 4874 u32 4875 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) 4876 { 4877 u32 quot; 4878 u32 rem; 4879 u32 interval = beacon_interval * TIME_UNIT; 4880 4881 if (!interval || !usec) 4882 return 0; 4883 4884 quot = 4885 (usec / 4886 interval) & (il_beacon_time_mask_high(il, 4887 il->hw_params. 4888 beacon_time_tsf_bits) >> il-> 4889 hw_params.beacon_time_tsf_bits); 4890 rem = 4891 (usec % interval) & il_beacon_time_mask_low(il, 4892 il->hw_params. 4893 beacon_time_tsf_bits); 4894 4895 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; 4896 } 4897 EXPORT_SYMBOL(il_usecs_to_beacons); 4898 4899 /* base is usually what we get from ucode with each received frame, 4900 * the same as HW timer counter counting down 4901 */ 4902 __le32 4903 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 4904 u32 beacon_interval) 4905 { 4906 u32 base_low = base & il_beacon_time_mask_low(il, 4907 il->hw_params. 4908 beacon_time_tsf_bits); 4909 u32 addon_low = addon & il_beacon_time_mask_low(il, 4910 il->hw_params. 4911 beacon_time_tsf_bits); 4912 u32 interval = beacon_interval * TIME_UNIT; 4913 u32 res = (base & il_beacon_time_mask_high(il, 4914 il->hw_params. 4915 beacon_time_tsf_bits)) + 4916 (addon & il_beacon_time_mask_high(il, 4917 il->hw_params. 4918 beacon_time_tsf_bits)); 4919 4920 if (base_low > addon_low) 4921 res += base_low - addon_low; 4922 else if (base_low < addon_low) { 4923 res += interval + base_low - addon_low; 4924 res += (1 << il->hw_params.beacon_time_tsf_bits); 4925 } else 4926 res += (1 << il->hw_params.beacon_time_tsf_bits); 4927 4928 return cpu_to_le32(res); 4929 } 4930 EXPORT_SYMBOL(il_add_beacon_time); 4931 4932 #ifdef CONFIG_PM_SLEEP 4933 4934 static int 4935 il_pci_suspend(struct device *device) 4936 { 4937 struct il_priv *il = dev_get_drvdata(device); 4938 4939 /* 4940 * This function is called when system goes into suspend state 4941 * mac80211 will call il_mac_stop() from the mac80211 suspend function 4942 * first but since il_mac_stop() has no knowledge of who the caller is, 4943 * it will not call apm_ops.stop() to stop the DMA operation. 4944 * Calling apm_ops.stop here to make sure we stop the DMA. 4945 */ 4946 il_apm_stop(il); 4947 4948 return 0; 4949 } 4950 4951 static int 4952 il_pci_resume(struct device *device) 4953 { 4954 struct pci_dev *pdev = to_pci_dev(device); 4955 struct il_priv *il = pci_get_drvdata(pdev); 4956 bool hw_rfkill = false; 4957 4958 /* 4959 * We disable the RETRY_TIMEOUT register (0x41) to keep 4960 * PCI Tx retries from interfering with C3 CPU state. 4961 */ 4962 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4963 4964 il_enable_interrupts(il); 4965 4966 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4967 hw_rfkill = true; 4968 4969 if (hw_rfkill) 4970 set_bit(S_RFKILL, &il->status); 4971 else 4972 clear_bit(S_RFKILL, &il->status); 4973 4974 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); 4975 4976 return 0; 4977 } 4978 4979 SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 4980 EXPORT_SYMBOL(il_pm_ops); 4981 4982 #endif /* CONFIG_PM_SLEEP */ 4983 4984 static void 4985 il_update_qos(struct il_priv *il) 4986 { 4987 if (test_bit(S_EXIT_PENDING, &il->status)) 4988 return; 4989 4990 il->qos_data.def_qos_parm.qos_flags = 0; 4991 4992 if (il->qos_data.qos_active) 4993 il->qos_data.def_qos_parm.qos_flags |= 4994 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 4995 4996 if (il->ht.enabled) 4997 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 4998 4999 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 5000 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); 5001 5002 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), 5003 &il->qos_data.def_qos_parm, NULL); 5004 } 5005 5006 /* 5007 * il_mac_config - mac80211 config callback 5008 */ 5009 int 5010 il_mac_config(struct ieee80211_hw *hw, u32 changed) 5011 { 5012 struct il_priv *il = hw->priv; 5013 const struct il_channel_info *ch_info; 5014 struct ieee80211_conf *conf = &hw->conf; 5015 struct ieee80211_channel *channel = conf->chandef.chan; 5016 struct il_ht_config *ht_conf = &il->current_ht_config; 5017 unsigned long flags = 0; 5018 int ret = 0; 5019 u16 ch; 5020 int scan_active = 0; 5021 bool ht_changed = false; 5022 5023 mutex_lock(&il->mutex); 5024 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, 5025 changed); 5026 5027 if (unlikely(test_bit(S_SCANNING, &il->status))) { 5028 scan_active = 1; 5029 D_MAC80211("scan active\n"); 5030 } 5031 5032 if (changed & 5033 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { 5034 /* mac80211 uses static for non-HT which is what we want */ 5035 il->current_ht_config.smps = conf->smps_mode; 5036 5037 /* 5038 * Recalculate chain counts. 5039 * 5040 * If monitor mode is enabled then mac80211 will 5041 * set up the SM PS mode to OFF if an HT channel is 5042 * configured. 5043 */ 5044 if (il->ops->set_rxon_chain) 5045 il->ops->set_rxon_chain(il); 5046 } 5047 5048 /* during scanning mac80211 will delay channel setting until 5049 * scan finish with changed = 0 5050 */ 5051 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 5052 5053 if (scan_active) 5054 goto set_ch_out; 5055 5056 ch = channel->hw_value; 5057 ch_info = il_get_channel_info(il, channel->band, ch); 5058 if (!il_is_channel_valid(ch_info)) { 5059 D_MAC80211("leave - invalid channel\n"); 5060 ret = -EINVAL; 5061 goto set_ch_out; 5062 } 5063 5064 if (il->iw_mode == NL80211_IFTYPE_ADHOC && 5065 !il_is_channel_ibss(ch_info)) { 5066 D_MAC80211("leave - not IBSS channel\n"); 5067 ret = -EINVAL; 5068 goto set_ch_out; 5069 } 5070 5071 spin_lock_irqsave(&il->lock, flags); 5072 5073 /* Configure HT40 channels */ 5074 if (il->ht.enabled != conf_is_ht(conf)) { 5075 il->ht.enabled = conf_is_ht(conf); 5076 ht_changed = true; 5077 } 5078 if (il->ht.enabled) { 5079 if (conf_is_ht40_minus(conf)) { 5080 il->ht.extension_chan_offset = 5081 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 5082 il->ht.is_40mhz = true; 5083 } else if (conf_is_ht40_plus(conf)) { 5084 il->ht.extension_chan_offset = 5085 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 5086 il->ht.is_40mhz = true; 5087 } else { 5088 il->ht.extension_chan_offset = 5089 IEEE80211_HT_PARAM_CHA_SEC_NONE; 5090 il->ht.is_40mhz = false; 5091 } 5092 } else 5093 il->ht.is_40mhz = false; 5094 5095 /* 5096 * Default to no protection. Protection mode will 5097 * later be set from BSS config in il_ht_conf 5098 */ 5099 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 5100 5101 /* if we are switching from ht to 2.4 clear flags 5102 * from any ht related info since 2.4 does not 5103 * support ht */ 5104 if ((le16_to_cpu(il->staging.channel) != ch)) 5105 il->staging.flags = 0; 5106 5107 il_set_rxon_channel(il, channel); 5108 il_set_rxon_ht(il, ht_conf); 5109 5110 il_set_flags_for_band(il, channel->band, il->vif); 5111 5112 spin_unlock_irqrestore(&il->lock, flags); 5113 5114 if (il->ops->update_bcast_stations) 5115 ret = il->ops->update_bcast_stations(il); 5116 5117 set_ch_out: 5118 /* The list of supported rates and rate mask can be different 5119 * for each band; since the band may have changed, reset 5120 * the rate mask to what mac80211 lists */ 5121 il_set_rate(il); 5122 } 5123 5124 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5125 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); 5126 if (!il->power_data.ps_disabled) 5127 IL_WARN_ONCE("Enabling power save might cause firmware crashes\n"); 5128 ret = il_power_update_mode(il, false); 5129 if (ret) 5130 D_MAC80211("Error setting sleep level\n"); 5131 } 5132 5133 if (changed & IEEE80211_CONF_CHANGE_POWER) { 5134 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, 5135 conf->power_level); 5136 5137 il_set_tx_power(il, conf->power_level, false); 5138 } 5139 5140 if (!il_is_ready(il)) { 5141 D_MAC80211("leave - not ready\n"); 5142 goto out; 5143 } 5144 5145 if (scan_active) 5146 goto out; 5147 5148 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) 5149 il_commit_rxon(il); 5150 else 5151 D_INFO("Not re-sending same RXON configuration.\n"); 5152 if (ht_changed) 5153 il_update_qos(il); 5154 5155 out: 5156 D_MAC80211("leave ret %d\n", ret); 5157 mutex_unlock(&il->mutex); 5158 5159 return ret; 5160 } 5161 EXPORT_SYMBOL(il_mac_config); 5162 5163 void 5164 il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5165 { 5166 struct il_priv *il = hw->priv; 5167 unsigned long flags; 5168 5169 mutex_lock(&il->mutex); 5170 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 5171 5172 spin_lock_irqsave(&il->lock, flags); 5173 5174 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); 5175 5176 /* new association get rid of ibss beacon skb */ 5177 dev_consume_skb_irq(il->beacon_skb); 5178 il->beacon_skb = NULL; 5179 il->timestamp = 0; 5180 5181 spin_unlock_irqrestore(&il->lock, flags); 5182 5183 il_scan_cancel_timeout(il, 100); 5184 if (!il_is_ready_rf(il)) { 5185 D_MAC80211("leave - not ready\n"); 5186 mutex_unlock(&il->mutex); 5187 return; 5188 } 5189 5190 /* we are restarting association process */ 5191 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5192 il_commit_rxon(il); 5193 5194 il_set_rate(il); 5195 5196 D_MAC80211("leave\n"); 5197 mutex_unlock(&il->mutex); 5198 } 5199 EXPORT_SYMBOL(il_mac_reset_tsf); 5200 5201 static void 5202 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) 5203 { 5204 struct il_ht_config *ht_conf = &il->current_ht_config; 5205 struct ieee80211_sta *sta; 5206 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 5207 5208 D_ASSOC("enter:\n"); 5209 5210 if (!il->ht.enabled) 5211 return; 5212 5213 il->ht.protection = 5214 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 5215 il->ht.non_gf_sta_present = 5216 !!(bss_conf-> 5217 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 5218 5219 ht_conf->single_chain_sufficient = false; 5220 5221 switch (vif->type) { 5222 case NL80211_IFTYPE_STATION: 5223 rcu_read_lock(); 5224 sta = ieee80211_find_sta(vif, bss_conf->bssid); 5225 if (sta) { 5226 struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 5227 int maxstreams; 5228 5229 maxstreams = 5230 (ht_cap->mcs. 5231 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 5232 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 5233 maxstreams += 1; 5234 5235 if (ht_cap->mcs.rx_mask[1] == 0 && 5236 ht_cap->mcs.rx_mask[2] == 0) 5237 ht_conf->single_chain_sufficient = true; 5238 if (maxstreams <= 1) 5239 ht_conf->single_chain_sufficient = true; 5240 } else { 5241 /* 5242 * If at all, this can only happen through a race 5243 * when the AP disconnects us while we're still 5244 * setting up the connection, in that case mac80211 5245 * will soon tell us about that. 5246 */ 5247 ht_conf->single_chain_sufficient = true; 5248 } 5249 rcu_read_unlock(); 5250 break; 5251 case NL80211_IFTYPE_ADHOC: 5252 ht_conf->single_chain_sufficient = true; 5253 break; 5254 default: 5255 break; 5256 } 5257 5258 D_ASSOC("leave\n"); 5259 } 5260 5261 static inline void 5262 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) 5263 { 5264 /* 5265 * inform the ucode that there is no longer an 5266 * association and that no more packets should be 5267 * sent 5268 */ 5269 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5270 il->staging.assoc_id = 0; 5271 il_commit_rxon(il); 5272 } 5273 5274 static void 5275 il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5276 { 5277 struct il_priv *il = hw->priv; 5278 unsigned long flags; 5279 __le64 timestamp; 5280 struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0); 5281 5282 if (!skb) 5283 return; 5284 5285 D_MAC80211("enter\n"); 5286 5287 lockdep_assert_held(&il->mutex); 5288 5289 if (!il->beacon_enabled) { 5290 IL_ERR("update beacon with no beaconing enabled\n"); 5291 dev_kfree_skb(skb); 5292 return; 5293 } 5294 5295 spin_lock_irqsave(&il->lock, flags); 5296 dev_consume_skb_irq(il->beacon_skb); 5297 il->beacon_skb = skb; 5298 5299 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 5300 il->timestamp = le64_to_cpu(timestamp); 5301 5302 D_MAC80211("leave\n"); 5303 spin_unlock_irqrestore(&il->lock, flags); 5304 5305 if (!il_is_ready_rf(il)) { 5306 D_MAC80211("leave - RF not ready\n"); 5307 return; 5308 } 5309 5310 il->ops->post_associate(il); 5311 } 5312 5313 void 5314 il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5315 struct ieee80211_bss_conf *bss_conf, u64 changes) 5316 { 5317 struct il_priv *il = hw->priv; 5318 int ret; 5319 5320 mutex_lock(&il->mutex); 5321 D_MAC80211("enter: changes 0x%llx\n", changes); 5322 5323 if (!il_is_alive(il)) { 5324 D_MAC80211("leave - not alive\n"); 5325 mutex_unlock(&il->mutex); 5326 return; 5327 } 5328 5329 if (changes & BSS_CHANGED_QOS) { 5330 unsigned long flags; 5331 5332 spin_lock_irqsave(&il->lock, flags); 5333 il->qos_data.qos_active = bss_conf->qos; 5334 il_update_qos(il); 5335 spin_unlock_irqrestore(&il->lock, flags); 5336 } 5337 5338 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5339 /* FIXME: can we remove beacon_enabled ? */ 5340 if (vif->bss_conf.enable_beacon) 5341 il->beacon_enabled = true; 5342 else 5343 il->beacon_enabled = false; 5344 } 5345 5346 if (changes & BSS_CHANGED_BSSID) { 5347 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5348 5349 /* 5350 * On passive channel we wait with blocked queues to see if 5351 * there is traffic on that channel. If no frame will be 5352 * received (what is very unlikely since scan detects AP on 5353 * that channel, but theoretically possible), mac80211 associate 5354 * procedure will time out and mac80211 will call us with NULL 5355 * bssid. We have to unblock queues on such condition. 5356 */ 5357 if (is_zero_ether_addr(bss_conf->bssid)) 5358 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); 5359 5360 /* 5361 * If there is currently a HW scan going on in the background, 5362 * then we need to cancel it, otherwise sometimes we are not 5363 * able to authenticate (FIXME: why ?) 5364 */ 5365 if (il_scan_cancel_timeout(il, 100)) { 5366 D_MAC80211("leave - scan abort failed\n"); 5367 mutex_unlock(&il->mutex); 5368 return; 5369 } 5370 5371 /* mac80211 only sets assoc when in STATION mode */ 5372 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 5373 5374 /* FIXME: currently needed in a few places */ 5375 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5376 } 5377 5378 /* 5379 * This needs to be after setting the BSSID in case 5380 * mac80211 decides to do both changes at once because 5381 * it will invoke post_associate. 5382 */ 5383 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) 5384 il_beacon_update(hw, vif); 5385 5386 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 5387 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); 5388 if (bss_conf->use_short_preamble) 5389 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 5390 else 5391 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 5392 } 5393 5394 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5395 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5396 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) 5397 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5398 else 5399 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5400 if (bss_conf->use_cts_prot) 5401 il->staging.flags |= RXON_FLG_SELF_CTS_EN; 5402 else 5403 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; 5404 } 5405 5406 if (changes & BSS_CHANGED_BASIC_RATES) { 5407 /* XXX use this information 5408 * 5409 * To do that, remove code from il_set_rate() and put something 5410 * like this here: 5411 * 5412 if (A-band) 5413 il->staging.ofdm_basic_rates = 5414 bss_conf->basic_rates; 5415 else 5416 il->staging.ofdm_basic_rates = 5417 bss_conf->basic_rates >> 4; 5418 il->staging.cck_basic_rates = 5419 bss_conf->basic_rates & 0xF; 5420 */ 5421 } 5422 5423 if (changes & BSS_CHANGED_HT) { 5424 il_ht_conf(il, vif); 5425 5426 if (il->ops->set_rxon_chain) 5427 il->ops->set_rxon_chain(il); 5428 } 5429 5430 if (changes & BSS_CHANGED_ASSOC) { 5431 D_MAC80211("ASSOC %d\n", vif->cfg.assoc); 5432 if (vif->cfg.assoc) { 5433 il->timestamp = bss_conf->sync_tsf; 5434 5435 if (!il_is_rfkill(il)) 5436 il->ops->post_associate(il); 5437 } else 5438 il_set_no_assoc(il, vif); 5439 } 5440 5441 if (changes && il_is_associated(il) && vif->cfg.aid) { 5442 D_MAC80211("Changes (%#llx) while associated\n", changes); 5443 ret = il_send_rxon_assoc(il); 5444 if (!ret) { 5445 /* Sync active_rxon with latest change. */ 5446 memcpy((void *)&il->active, &il->staging, 5447 sizeof(struct il_rxon_cmd)); 5448 } 5449 } 5450 5451 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5452 if (vif->bss_conf.enable_beacon) { 5453 memcpy(il->staging.bssid_addr, bss_conf->bssid, 5454 ETH_ALEN); 5455 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5456 il->ops->config_ap(il); 5457 } else 5458 il_set_no_assoc(il, vif); 5459 } 5460 5461 if (changes & BSS_CHANGED_IBSS) { 5462 ret = il->ops->manage_ibss_station(il, vif, 5463 vif->cfg.ibss_joined); 5464 if (ret) 5465 IL_ERR("failed to %s IBSS station %pM\n", 5466 vif->cfg.ibss_joined ? "add" : "remove", 5467 bss_conf->bssid); 5468 } 5469 5470 D_MAC80211("leave\n"); 5471 mutex_unlock(&il->mutex); 5472 } 5473 EXPORT_SYMBOL(il_mac_bss_info_changed); 5474 5475 irqreturn_t 5476 il_isr(int irq, void *data) 5477 { 5478 struct il_priv *il = data; 5479 u32 inta, inta_mask; 5480 u32 inta_fh; 5481 unsigned long flags; 5482 if (!il) 5483 return IRQ_NONE; 5484 5485 spin_lock_irqsave(&il->lock, flags); 5486 5487 /* Disable (but don't clear!) interrupts here to avoid 5488 * back-to-back ISRs and sporadic interrupts from our NIC. 5489 * If we have something to service, the tasklet will re-enable ints. 5490 * If we *don't* have something, we'll re-enable before leaving here. */ 5491 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ 5492 _il_wr(il, CSR_INT_MASK, 0x00000000); 5493 5494 /* Discover which interrupts are active/pending */ 5495 inta = _il_rd(il, CSR_INT); 5496 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 5497 5498 /* Ignore interrupt if there's nothing in NIC to service. 5499 * This may be due to IRQ shared with another device, 5500 * or due to sporadic interrupts thrown from our NIC. */ 5501 if (!inta && !inta_fh) { 5502 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 5503 goto none; 5504 } 5505 5506 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { 5507 /* Hardware disappeared. It might have already raised 5508 * an interrupt */ 5509 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); 5510 goto unplugged; 5511 } 5512 5513 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, 5514 inta_fh); 5515 5516 inta &= ~CSR_INT_BIT_SCD; 5517 5518 /* il_irq_tasklet() will service interrupts and re-enable them */ 5519 if (likely(inta || inta_fh)) 5520 tasklet_schedule(&il->irq_tasklet); 5521 5522 unplugged: 5523 spin_unlock_irqrestore(&il->lock, flags); 5524 return IRQ_HANDLED; 5525 5526 none: 5527 /* re-enable interrupts here since we don't have anything to service. */ 5528 /* only Re-enable if disabled by irq */ 5529 if (test_bit(S_INT_ENABLED, &il->status)) 5530 il_enable_interrupts(il); 5531 spin_unlock_irqrestore(&il->lock, flags); 5532 return IRQ_NONE; 5533 } 5534 EXPORT_SYMBOL(il_isr); 5535 5536 /* 5537 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this 5538 * function. 5539 */ 5540 void 5541 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, 5542 __le16 fc, __le32 *tx_flags) 5543 { 5544 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 5545 *tx_flags |= TX_CMD_FLG_RTS_MSK; 5546 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 5547 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5548 5549 if (!ieee80211_is_mgmt(fc)) 5550 return; 5551 5552 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 5553 case cpu_to_le16(IEEE80211_STYPE_AUTH): 5554 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 5555 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): 5556 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): 5557 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5558 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5559 break; 5560 } 5561 } else if (info->control.rates[0]. 5562 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 5563 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5564 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5565 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5566 } 5567 } 5568 EXPORT_SYMBOL(il_tx_cmd_protection); 5569