1 /****************************************************************************** 2 * 3 * GPL LICENSE SUMMARY 4 * 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 19 * USA 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 *****************************************************************************/ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/etherdevice.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/types.h> 35 #include <linux/lockdep.h> 36 #include <linux/pci.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/delay.h> 39 #include <linux/skbuff.h> 40 #include <net/mac80211.h> 41 42 #include "common.h" 43 44 int 45 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) 46 { 47 const int interval = 10; /* microseconds */ 48 int t = 0; 49 50 do { 51 if ((_il_rd(il, addr) & mask) == (bits & mask)) 52 return t; 53 udelay(interval); 54 t += interval; 55 } while (t < timeout); 56 57 return -ETIMEDOUT; 58 } 59 EXPORT_SYMBOL(_il_poll_bit); 60 61 void 62 il_set_bit(struct il_priv *p, u32 r, u32 m) 63 { 64 unsigned long reg_flags; 65 66 spin_lock_irqsave(&p->reg_lock, reg_flags); 67 _il_set_bit(p, r, m); 68 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 69 } 70 EXPORT_SYMBOL(il_set_bit); 71 72 void 73 il_clear_bit(struct il_priv *p, u32 r, u32 m) 74 { 75 unsigned long reg_flags; 76 77 spin_lock_irqsave(&p->reg_lock, reg_flags); 78 _il_clear_bit(p, r, m); 79 spin_unlock_irqrestore(&p->reg_lock, reg_flags); 80 } 81 EXPORT_SYMBOL(il_clear_bit); 82 83 bool 84 _il_grab_nic_access(struct il_priv *il) 85 { 86 int ret; 87 u32 val; 88 89 /* this bit wakes up the NIC */ 90 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 91 92 /* 93 * These bits say the device is running, and should keep running for 94 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 95 * but they do not indicate that embedded SRAM is restored yet; 96 * 3945 and 4965 have volatile SRAM, and must save/restore contents 97 * to/from host DRAM when sleeping/waking for power-saving. 98 * Each direction takes approximately 1/4 millisecond; with this 99 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 100 * series of register accesses are expected (e.g. reading Event Log), 101 * to keep device from sleeping. 102 * 103 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 104 * SRAM is okay/restored. We don't check that here because this call 105 * is just for hardware register access; but GP1 MAC_SLEEP check is a 106 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). 107 * 108 */ 109 ret = 110 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 111 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 112 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 113 if (unlikely(ret < 0)) { 114 val = _il_rd(il, CSR_GP_CNTRL); 115 WARN_ONCE(1, "Timeout waiting for ucode processor access " 116 "(CSR_GP_CNTRL 0x%08x)\n", val); 117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 118 return false; 119 } 120 121 return true; 122 } 123 EXPORT_SYMBOL_GPL(_il_grab_nic_access); 124 125 int 126 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) 127 { 128 const int interval = 10; /* microseconds */ 129 int t = 0; 130 131 do { 132 if ((il_rd(il, addr) & mask) == mask) 133 return t; 134 udelay(interval); 135 t += interval; 136 } while (t < timeout); 137 138 return -ETIMEDOUT; 139 } 140 EXPORT_SYMBOL(il_poll_bit); 141 142 u32 143 il_rd_prph(struct il_priv *il, u32 reg) 144 { 145 unsigned long reg_flags; 146 u32 val; 147 148 spin_lock_irqsave(&il->reg_lock, reg_flags); 149 _il_grab_nic_access(il); 150 val = _il_rd_prph(il, reg); 151 _il_release_nic_access(il); 152 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 153 return val; 154 } 155 EXPORT_SYMBOL(il_rd_prph); 156 157 void 158 il_wr_prph(struct il_priv *il, u32 addr, u32 val) 159 { 160 unsigned long reg_flags; 161 162 spin_lock_irqsave(&il->reg_lock, reg_flags); 163 if (likely(_il_grab_nic_access(il))) { 164 _il_wr_prph(il, addr, val); 165 _il_release_nic_access(il); 166 } 167 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 168 } 169 EXPORT_SYMBOL(il_wr_prph); 170 171 u32 172 il_read_targ_mem(struct il_priv *il, u32 addr) 173 { 174 unsigned long reg_flags; 175 u32 value; 176 177 spin_lock_irqsave(&il->reg_lock, reg_flags); 178 _il_grab_nic_access(il); 179 180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr); 181 value = _il_rd(il, HBUS_TARG_MEM_RDAT); 182 183 _il_release_nic_access(il); 184 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 185 return value; 186 } 187 EXPORT_SYMBOL(il_read_targ_mem); 188 189 void 190 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) 191 { 192 unsigned long reg_flags; 193 194 spin_lock_irqsave(&il->reg_lock, reg_flags); 195 if (likely(_il_grab_nic_access(il))) { 196 _il_wr(il, HBUS_TARG_MEM_WADDR, addr); 197 _il_wr(il, HBUS_TARG_MEM_WDAT, val); 198 _il_release_nic_access(il); 199 } 200 spin_unlock_irqrestore(&il->reg_lock, reg_flags); 201 } 202 EXPORT_SYMBOL(il_write_targ_mem); 203 204 const char * 205 il_get_cmd_string(u8 cmd) 206 { 207 switch (cmd) { 208 IL_CMD(N_ALIVE); 209 IL_CMD(N_ERROR); 210 IL_CMD(C_RXON); 211 IL_CMD(C_RXON_ASSOC); 212 IL_CMD(C_QOS_PARAM); 213 IL_CMD(C_RXON_TIMING); 214 IL_CMD(C_ADD_STA); 215 IL_CMD(C_REM_STA); 216 IL_CMD(C_WEPKEY); 217 IL_CMD(N_3945_RX); 218 IL_CMD(C_TX); 219 IL_CMD(C_RATE_SCALE); 220 IL_CMD(C_LEDS); 221 IL_CMD(C_TX_LINK_QUALITY_CMD); 222 IL_CMD(C_CHANNEL_SWITCH); 223 IL_CMD(N_CHANNEL_SWITCH); 224 IL_CMD(C_SPECTRUM_MEASUREMENT); 225 IL_CMD(N_SPECTRUM_MEASUREMENT); 226 IL_CMD(C_POWER_TBL); 227 IL_CMD(N_PM_SLEEP); 228 IL_CMD(N_PM_DEBUG_STATS); 229 IL_CMD(C_SCAN); 230 IL_CMD(C_SCAN_ABORT); 231 IL_CMD(N_SCAN_START); 232 IL_CMD(N_SCAN_RESULTS); 233 IL_CMD(N_SCAN_COMPLETE); 234 IL_CMD(N_BEACON); 235 IL_CMD(C_TX_BEACON); 236 IL_CMD(C_TX_PWR_TBL); 237 IL_CMD(C_BT_CONFIG); 238 IL_CMD(C_STATS); 239 IL_CMD(N_STATS); 240 IL_CMD(N_CARD_STATE); 241 IL_CMD(N_MISSED_BEACONS); 242 IL_CMD(C_CT_KILL_CONFIG); 243 IL_CMD(C_SENSITIVITY); 244 IL_CMD(C_PHY_CALIBRATION); 245 IL_CMD(N_RX_PHY); 246 IL_CMD(N_RX_MPDU); 247 IL_CMD(N_RX); 248 IL_CMD(N_COMPRESSED_BA); 249 default: 250 return "UNKNOWN"; 251 252 } 253 } 254 EXPORT_SYMBOL(il_get_cmd_string); 255 256 #define HOST_COMPLETE_TIMEOUT (HZ / 2) 257 258 static void 259 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, 260 struct il_rx_pkt *pkt) 261 { 262 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 263 IL_ERR("Bad return from %s (0x%08X)\n", 264 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 265 return; 266 } 267 #ifdef CONFIG_IWLEGACY_DEBUG 268 switch (cmd->hdr.cmd) { 269 case C_TX_LINK_QUALITY_CMD: 270 case C_SENSITIVITY: 271 D_HC_DUMP("back from %s (0x%08X)\n", 272 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 273 break; 274 default: 275 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd), 276 pkt->hdr.flags); 277 } 278 #endif 279 } 280 281 static int 282 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) 283 { 284 int ret; 285 286 BUG_ON(!(cmd->flags & CMD_ASYNC)); 287 288 /* An asynchronous command can not expect an SKB to be set. */ 289 BUG_ON(cmd->flags & CMD_WANT_SKB); 290 291 /* Assign a generic callback if one is not provided */ 292 if (!cmd->callback) 293 cmd->callback = il_generic_cmd_callback; 294 295 if (test_bit(S_EXIT_PENDING, &il->status)) 296 return -EBUSY; 297 298 ret = il_enqueue_hcmd(il, cmd); 299 if (ret < 0) { 300 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 301 il_get_cmd_string(cmd->id), ret); 302 return ret; 303 } 304 return 0; 305 } 306 307 int 308 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) 309 { 310 int cmd_idx; 311 int ret; 312 313 lockdep_assert_held(&il->mutex); 314 315 BUG_ON(cmd->flags & CMD_ASYNC); 316 317 /* A synchronous command can not have a callback set. */ 318 BUG_ON(cmd->callback); 319 320 D_INFO("Attempting to send sync command %s\n", 321 il_get_cmd_string(cmd->id)); 322 323 set_bit(S_HCMD_ACTIVE, &il->status); 324 D_INFO("Setting HCMD_ACTIVE for command %s\n", 325 il_get_cmd_string(cmd->id)); 326 327 cmd_idx = il_enqueue_hcmd(il, cmd); 328 if (cmd_idx < 0) { 329 ret = cmd_idx; 330 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n", 331 il_get_cmd_string(cmd->id), ret); 332 goto out; 333 } 334 335 ret = wait_event_timeout(il->wait_command_queue, 336 !test_bit(S_HCMD_ACTIVE, &il->status), 337 HOST_COMPLETE_TIMEOUT); 338 if (!ret) { 339 if (test_bit(S_HCMD_ACTIVE, &il->status)) { 340 IL_ERR("Error sending %s: time out after %dms.\n", 341 il_get_cmd_string(cmd->id), 342 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 343 344 clear_bit(S_HCMD_ACTIVE, &il->status); 345 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 346 il_get_cmd_string(cmd->id)); 347 ret = -ETIMEDOUT; 348 goto cancel; 349 } 350 } 351 352 if (test_bit(S_RFKILL, &il->status)) { 353 IL_ERR("Command %s aborted: RF KILL Switch\n", 354 il_get_cmd_string(cmd->id)); 355 ret = -ECANCELED; 356 goto fail; 357 } 358 if (test_bit(S_FW_ERROR, &il->status)) { 359 IL_ERR("Command %s failed: FW Error\n", 360 il_get_cmd_string(cmd->id)); 361 ret = -EIO; 362 goto fail; 363 } 364 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 365 IL_ERR("Error: Response NULL in '%s'\n", 366 il_get_cmd_string(cmd->id)); 367 ret = -EIO; 368 goto cancel; 369 } 370 371 ret = 0; 372 goto out; 373 374 cancel: 375 if (cmd->flags & CMD_WANT_SKB) { 376 /* 377 * Cancel the CMD_WANT_SKB flag for the cmd in the 378 * TX cmd queue. Otherwise in case the cmd comes 379 * in later, it will possibly set an invalid 380 * address (cmd->meta.source). 381 */ 382 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; 383 } 384 fail: 385 if (cmd->reply_page) { 386 il_free_pages(il, cmd->reply_page); 387 cmd->reply_page = 0; 388 } 389 out: 390 return ret; 391 } 392 EXPORT_SYMBOL(il_send_cmd_sync); 393 394 int 395 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) 396 { 397 if (cmd->flags & CMD_ASYNC) 398 return il_send_cmd_async(il, cmd); 399 400 return il_send_cmd_sync(il, cmd); 401 } 402 EXPORT_SYMBOL(il_send_cmd); 403 404 int 405 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) 406 { 407 struct il_host_cmd cmd = { 408 .id = id, 409 .len = len, 410 .data = data, 411 }; 412 413 return il_send_cmd_sync(il, &cmd); 414 } 415 EXPORT_SYMBOL(il_send_cmd_pdu); 416 417 int 418 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, 419 void (*callback) (struct il_priv *il, 420 struct il_device_cmd *cmd, 421 struct il_rx_pkt *pkt)) 422 { 423 struct il_host_cmd cmd = { 424 .id = id, 425 .len = len, 426 .data = data, 427 }; 428 429 cmd.flags |= CMD_ASYNC; 430 cmd.callback = callback; 431 432 return il_send_cmd_async(il, &cmd); 433 } 434 EXPORT_SYMBOL(il_send_cmd_pdu_async); 435 436 /* default: IL_LED_BLINK(0) using blinking idx table */ 437 static int led_mode; 438 module_param(led_mode, int, S_IRUGO); 439 MODULE_PARM_DESC(led_mode, 440 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); 441 442 /* Throughput OFF time(ms) ON time (ms) 443 * >300 25 25 444 * >200 to 300 40 40 445 * >100 to 200 55 55 446 * >70 to 100 65 65 447 * >50 to 70 75 75 448 * >20 to 50 85 85 449 * >10 to 20 95 95 450 * >5 to 10 110 110 451 * >1 to 5 130 130 452 * >0 to 1 167 167 453 * <=0 SOLID ON 454 */ 455 static const struct ieee80211_tpt_blink il_blink[] = { 456 {.throughput = 0, .blink_time = 334}, 457 {.throughput = 1 * 1024 - 1, .blink_time = 260}, 458 {.throughput = 5 * 1024 - 1, .blink_time = 220}, 459 {.throughput = 10 * 1024 - 1, .blink_time = 190}, 460 {.throughput = 20 * 1024 - 1, .blink_time = 170}, 461 {.throughput = 50 * 1024 - 1, .blink_time = 150}, 462 {.throughput = 70 * 1024 - 1, .blink_time = 130}, 463 {.throughput = 100 * 1024 - 1, .blink_time = 110}, 464 {.throughput = 200 * 1024 - 1, .blink_time = 80}, 465 {.throughput = 300 * 1024 - 1, .blink_time = 50}, 466 }; 467 468 /* 469 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 470 * Led blink rate analysis showed an average deviation of 0% on 3945, 471 * 5% on 4965 HW. 472 * Need to compensate on the led on/off time per HW according to the deviation 473 * to achieve the desired led frequency 474 * The calculation is: (100-averageDeviation)/100 * blinkTime 475 * For code efficiency the calculation will be: 476 * compensation = (100 - averageDeviation) * 64 / 100 477 * NewBlinkTime = (compensation * BlinkTime) / 64 478 */ 479 static inline u8 480 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) 481 { 482 if (!compensation) { 483 IL_ERR("undefined blink compensation: " 484 "use pre-defined blinking time\n"); 485 return time; 486 } 487 488 return (u8) ((time * compensation) >> 6); 489 } 490 491 /* Set led pattern command */ 492 static int 493 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) 494 { 495 struct il_led_cmd led_cmd = { 496 .id = IL_LED_LINK, 497 .interval = IL_DEF_LED_INTRVL 498 }; 499 int ret; 500 501 if (!test_bit(S_READY, &il->status)) 502 return -EBUSY; 503 504 if (il->blink_on == on && il->blink_off == off) 505 return 0; 506 507 if (off == 0) { 508 /* led is SOLID_ON */ 509 on = IL_LED_SOLID; 510 } 511 512 D_LED("Led blink time compensation=%u\n", 513 il->cfg->led_compensation); 514 led_cmd.on = 515 il_blink_compensation(il, on, 516 il->cfg->led_compensation); 517 led_cmd.off = 518 il_blink_compensation(il, off, 519 il->cfg->led_compensation); 520 521 ret = il->ops->send_led_cmd(il, &led_cmd); 522 if (!ret) { 523 il->blink_on = on; 524 il->blink_off = off; 525 } 526 return ret; 527 } 528 529 static void 530 il_led_brightness_set(struct led_classdev *led_cdev, 531 enum led_brightness brightness) 532 { 533 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 534 unsigned long on = 0; 535 536 if (brightness > 0) 537 on = IL_LED_SOLID; 538 539 il_led_cmd(il, on, 0); 540 } 541 542 static int 543 il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, 544 unsigned long *delay_off) 545 { 546 struct il_priv *il = container_of(led_cdev, struct il_priv, led); 547 548 return il_led_cmd(il, *delay_on, *delay_off); 549 } 550 551 void 552 il_leds_init(struct il_priv *il) 553 { 554 int mode = led_mode; 555 int ret; 556 557 if (mode == IL_LED_DEFAULT) 558 mode = il->cfg->led_mode; 559 560 il->led.name = 561 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy)); 562 il->led.brightness_set = il_led_brightness_set; 563 il->led.blink_set = il_led_blink_set; 564 il->led.max_brightness = 1; 565 566 switch (mode) { 567 case IL_LED_DEFAULT: 568 WARN_ON(1); 569 break; 570 case IL_LED_BLINK: 571 il->led.default_trigger = 572 ieee80211_create_tpt_led_trigger(il->hw, 573 IEEE80211_TPT_LEDTRIG_FL_CONNECTED, 574 il_blink, 575 ARRAY_SIZE(il_blink)); 576 break; 577 case IL_LED_RF_STATE: 578 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw); 579 break; 580 } 581 582 ret = led_classdev_register(&il->pci_dev->dev, &il->led); 583 if (ret) { 584 kfree(il->led.name); 585 return; 586 } 587 588 il->led_registered = true; 589 } 590 EXPORT_SYMBOL(il_leds_init); 591 592 void 593 il_leds_exit(struct il_priv *il) 594 { 595 if (!il->led_registered) 596 return; 597 598 led_classdev_unregister(&il->led); 599 kfree(il->led.name); 600 } 601 EXPORT_SYMBOL(il_leds_exit); 602 603 /************************** EEPROM BANDS **************************** 604 * 605 * The il_eeprom_band definitions below provide the mapping from the 606 * EEPROM contents to the specific channel number supported for each 607 * band. 608 * 609 * For example, il_priv->eeprom.band_3_channels[4] from the band_3 610 * definition below maps to physical channel 42 in the 5.2GHz spectrum. 611 * The specific geography and calibration information for that channel 612 * is contained in the eeprom map itself. 613 * 614 * During init, we copy the eeprom information and channel map 615 * information into il->channel_info_24/52 and il->channel_map_24/52 616 * 617 * channel_map_24/52 provides the idx in the channel_info array for a 618 * given channel. We have to have two separate maps as there is channel 619 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and 620 * band_2 621 * 622 * A value of 0xff stored in the channel_map indicates that the channel 623 * is not supported by the hardware at all. 624 * 625 * A value of 0xfe in the channel_map indicates that the channel is not 626 * valid for Tx with the current hardware. This means that 627 * while the system can tune and receive on a given channel, it may not 628 * be able to associate or transmit any frames on that 629 * channel. There is no corresponding channel information for that 630 * entry. 631 * 632 *********************************************************************/ 633 634 /* 2.4 GHz */ 635 const u8 il_eeprom_band_1[14] = { 636 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 637 }; 638 639 /* 5.2 GHz bands */ 640 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ 641 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 642 }; 643 644 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ 645 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 646 }; 647 648 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ 649 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 650 }; 651 652 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ 653 145, 149, 153, 157, 161, 165 654 }; 655 656 static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ 657 1, 2, 3, 4, 5, 6, 7 658 }; 659 660 static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ 661 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 662 }; 663 664 /****************************************************************************** 665 * 666 * EEPROM related functions 667 * 668 ******************************************************************************/ 669 670 static int 671 il_eeprom_verify_signature(struct il_priv *il) 672 { 673 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 674 int ret = 0; 675 676 D_EEPROM("EEPROM signature=0x%08x\n", gp); 677 switch (gp) { 678 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 679 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 680 break; 681 default: 682 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp); 683 ret = -ENOENT; 684 break; 685 } 686 return ret; 687 } 688 689 const u8 * 690 il_eeprom_query_addr(const struct il_priv *il, size_t offset) 691 { 692 BUG_ON(offset >= il->cfg->eeprom_size); 693 return &il->eeprom[offset]; 694 } 695 EXPORT_SYMBOL(il_eeprom_query_addr); 696 697 u16 698 il_eeprom_query16(const struct il_priv *il, size_t offset) 699 { 700 if (!il->eeprom) 701 return 0; 702 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); 703 } 704 EXPORT_SYMBOL(il_eeprom_query16); 705 706 /** 707 * il_eeprom_init - read EEPROM contents 708 * 709 * Load the EEPROM contents from adapter into il->eeprom 710 * 711 * NOTE: This routine uses the non-debug IO access functions. 712 */ 713 int 714 il_eeprom_init(struct il_priv *il) 715 { 716 __le16 *e; 717 u32 gp = _il_rd(il, CSR_EEPROM_GP); 718 int sz; 719 int ret; 720 u16 addr; 721 722 /* allocate eeprom */ 723 sz = il->cfg->eeprom_size; 724 D_EEPROM("NVM size = %d\n", sz); 725 il->eeprom = kzalloc(sz, GFP_KERNEL); 726 if (!il->eeprom) 727 return -ENOMEM; 728 729 e = (__le16 *) il->eeprom; 730 731 il->ops->apm_init(il); 732 733 ret = il_eeprom_verify_signature(il); 734 if (ret < 0) { 735 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp); 736 ret = -ENOENT; 737 goto err; 738 } 739 740 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 741 ret = il->ops->eeprom_acquire_semaphore(il); 742 if (ret < 0) { 743 IL_ERR("Failed to acquire EEPROM semaphore.\n"); 744 ret = -ENOENT; 745 goto err; 746 } 747 748 /* eeprom is an array of 16bit values */ 749 for (addr = 0; addr < sz; addr += sizeof(u16)) { 750 u32 r; 751 752 _il_wr(il, CSR_EEPROM_REG, 753 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 754 755 ret = 756 _il_poll_bit(il, CSR_EEPROM_REG, 757 CSR_EEPROM_REG_READ_VALID_MSK, 758 CSR_EEPROM_REG_READ_VALID_MSK, 759 IL_EEPROM_ACCESS_TIMEOUT); 760 if (ret < 0) { 761 IL_ERR("Time out reading EEPROM[%d]\n", addr); 762 goto done; 763 } 764 r = _il_rd(il, CSR_EEPROM_REG); 765 e[addr / 2] = cpu_to_le16(r >> 16); 766 } 767 768 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM", 769 il_eeprom_query16(il, EEPROM_VERSION)); 770 771 ret = 0; 772 done: 773 il->ops->eeprom_release_semaphore(il); 774 775 err: 776 if (ret) 777 il_eeprom_free(il); 778 /* Reset chip to save power until we load uCode during "up". */ 779 il_apm_stop(il); 780 return ret; 781 } 782 EXPORT_SYMBOL(il_eeprom_init); 783 784 void 785 il_eeprom_free(struct il_priv *il) 786 { 787 kfree(il->eeprom); 788 il->eeprom = NULL; 789 } 790 EXPORT_SYMBOL(il_eeprom_free); 791 792 static void 793 il_init_band_reference(const struct il_priv *il, int eep_band, 794 int *eeprom_ch_count, 795 const struct il_eeprom_channel **eeprom_ch_info, 796 const u8 **eeprom_ch_idx) 797 { 798 u32 offset = il->cfg->regulatory_bands[eep_band - 1]; 799 800 switch (eep_band) { 801 case 1: /* 2.4GHz band */ 802 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); 803 *eeprom_ch_info = 804 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 805 offset); 806 *eeprom_ch_idx = il_eeprom_band_1; 807 break; 808 case 2: /* 4.9GHz band */ 809 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); 810 *eeprom_ch_info = 811 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 812 offset); 813 *eeprom_ch_idx = il_eeprom_band_2; 814 break; 815 case 3: /* 5.2GHz band */ 816 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); 817 *eeprom_ch_info = 818 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 819 offset); 820 *eeprom_ch_idx = il_eeprom_band_3; 821 break; 822 case 4: /* 5.5GHz band */ 823 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); 824 *eeprom_ch_info = 825 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 826 offset); 827 *eeprom_ch_idx = il_eeprom_band_4; 828 break; 829 case 5: /* 5.7GHz band */ 830 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); 831 *eeprom_ch_info = 832 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 833 offset); 834 *eeprom_ch_idx = il_eeprom_band_5; 835 break; 836 case 6: /* 2.4GHz ht40 channels */ 837 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); 838 *eeprom_ch_info = 839 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 840 offset); 841 *eeprom_ch_idx = il_eeprom_band_6; 842 break; 843 case 7: /* 5 GHz ht40 channels */ 844 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); 845 *eeprom_ch_info = 846 (struct il_eeprom_channel *)il_eeprom_query_addr(il, 847 offset); 848 *eeprom_ch_idx = il_eeprom_band_7; 849 break; 850 default: 851 BUG(); 852 } 853 } 854 855 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 856 ? # x " " : "") 857 /** 858 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. 859 * 860 * Does not set up a command, or touch hardware. 861 */ 862 static int 863 il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, 864 const struct il_eeprom_channel *eeprom_ch, 865 u8 clear_ht40_extension_channel) 866 { 867 struct il_channel_info *ch_info; 868 869 ch_info = 870 (struct il_channel_info *)il_get_channel_info(il, band, channel); 871 872 if (!il_is_channel_valid(ch_info)) 873 return -1; 874 875 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 876 " Ad-Hoc %ssupported\n", ch_info->channel, 877 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 878 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), 879 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), 880 CHECK_AND_PRINT(DFS), eeprom_ch->flags, 881 eeprom_ch->max_power_avg, 882 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && 883 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); 884 885 ch_info->ht40_eeprom = *eeprom_ch; 886 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 887 ch_info->ht40_flags = eeprom_ch->flags; 888 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) 889 ch_info->ht40_extension_channel &= 890 ~clear_ht40_extension_channel; 891 892 return 0; 893 } 894 895 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 896 ? # x " " : "") 897 898 /** 899 * il_init_channel_map - Set up driver's info for all possible channels 900 */ 901 int 902 il_init_channel_map(struct il_priv *il) 903 { 904 int eeprom_ch_count = 0; 905 const u8 *eeprom_ch_idx = NULL; 906 const struct il_eeprom_channel *eeprom_ch_info = NULL; 907 int band, ch; 908 struct il_channel_info *ch_info; 909 910 if (il->channel_count) { 911 D_EEPROM("Channel map already initialized.\n"); 912 return 0; 913 } 914 915 D_EEPROM("Initializing regulatory info from EEPROM\n"); 916 917 il->channel_count = 918 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + 919 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + 920 ARRAY_SIZE(il_eeprom_band_5); 921 922 D_EEPROM("Parsing data for %d channels.\n", il->channel_count); 923 924 il->channel_info = 925 kzalloc(sizeof(struct il_channel_info) * il->channel_count, 926 GFP_KERNEL); 927 if (!il->channel_info) { 928 IL_ERR("Could not allocate channel_info\n"); 929 il->channel_count = 0; 930 return -ENOMEM; 931 } 932 933 ch_info = il->channel_info; 934 935 /* Loop through the 5 EEPROM bands adding them in order to the 936 * channel map we maintain (that contains additional information than 937 * what just in the EEPROM) */ 938 for (band = 1; band <= 5; band++) { 939 940 il_init_band_reference(il, band, &eeprom_ch_count, 941 &eeprom_ch_info, &eeprom_ch_idx); 942 943 /* Loop through each band adding each of the channels */ 944 for (ch = 0; ch < eeprom_ch_count; ch++) { 945 ch_info->channel = eeprom_ch_idx[ch]; 946 ch_info->band = 947 (band == 948 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 949 950 /* permanently store EEPROM's channel regulatory flags 951 * and max power in channel info database. */ 952 ch_info->eeprom = eeprom_ch_info[ch]; 953 954 /* Copy the run-time flags so they are there even on 955 * invalid channels */ 956 ch_info->flags = eeprom_ch_info[ch].flags; 957 /* First write that ht40 is not enabled, and then enable 958 * one by one */ 959 ch_info->ht40_extension_channel = 960 IEEE80211_CHAN_NO_HT40; 961 962 if (!(il_is_channel_valid(ch_info))) { 963 D_EEPROM("Ch. %d Flags %x [%sGHz] - " 964 "No traffic\n", ch_info->channel, 965 ch_info->flags, 966 il_is_channel_a_band(ch_info) ? "5.2" : 967 "2.4"); 968 ch_info++; 969 continue; 970 } 971 972 /* Initialize regulatory-based run-time data */ 973 ch_info->max_power_avg = ch_info->curr_txpow = 974 eeprom_ch_info[ch].max_power_avg; 975 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 976 ch_info->min_power = 0; 977 978 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" 979 " Ad-Hoc %ssupported\n", ch_info->channel, 980 il_is_channel_a_band(ch_info) ? "5.2" : "2.4", 981 CHECK_AND_PRINT_I(VALID), 982 CHECK_AND_PRINT_I(IBSS), 983 CHECK_AND_PRINT_I(ACTIVE), 984 CHECK_AND_PRINT_I(RADAR), 985 CHECK_AND_PRINT_I(WIDE), 986 CHECK_AND_PRINT_I(DFS), 987 eeprom_ch_info[ch].flags, 988 eeprom_ch_info[ch].max_power_avg, 989 ((eeprom_ch_info[ch]. 990 flags & EEPROM_CHANNEL_IBSS) && 991 !(eeprom_ch_info[ch]. 992 flags & EEPROM_CHANNEL_RADAR)) ? "" : 993 "not "); 994 995 ch_info++; 996 } 997 } 998 999 /* Check if we do have HT40 channels */ 1000 if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && 1001 il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) 1002 return 0; 1003 1004 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ 1005 for (band = 6; band <= 7; band++) { 1006 enum nl80211_band ieeeband; 1007 1008 il_init_band_reference(il, band, &eeprom_ch_count, 1009 &eeprom_ch_info, &eeprom_ch_idx); 1010 1011 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ 1012 ieeeband = 1013 (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1014 1015 /* Loop through each band adding each of the channels */ 1016 for (ch = 0; ch < eeprom_ch_count; ch++) { 1017 /* Set up driver's info for lower half */ 1018 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch], 1019 &eeprom_ch_info[ch], 1020 IEEE80211_CHAN_NO_HT40PLUS); 1021 1022 /* Set up driver's info for upper half */ 1023 il_mod_ht40_chan_info(il, ieeeband, 1024 eeprom_ch_idx[ch] + 4, 1025 &eeprom_ch_info[ch], 1026 IEEE80211_CHAN_NO_HT40MINUS); 1027 } 1028 } 1029 1030 return 0; 1031 } 1032 EXPORT_SYMBOL(il_init_channel_map); 1033 1034 /* 1035 * il_free_channel_map - undo allocations in il_init_channel_map 1036 */ 1037 void 1038 il_free_channel_map(struct il_priv *il) 1039 { 1040 kfree(il->channel_info); 1041 il->channel_count = 0; 1042 } 1043 EXPORT_SYMBOL(il_free_channel_map); 1044 1045 /** 1046 * il_get_channel_info - Find driver's ilate channel info 1047 * 1048 * Based on band and channel number. 1049 */ 1050 const struct il_channel_info * 1051 il_get_channel_info(const struct il_priv *il, enum nl80211_band band, 1052 u16 channel) 1053 { 1054 int i; 1055 1056 switch (band) { 1057 case NL80211_BAND_5GHZ: 1058 for (i = 14; i < il->channel_count; i++) { 1059 if (il->channel_info[i].channel == channel) 1060 return &il->channel_info[i]; 1061 } 1062 break; 1063 case NL80211_BAND_2GHZ: 1064 if (channel >= 1 && channel <= 14) 1065 return &il->channel_info[channel - 1]; 1066 break; 1067 default: 1068 BUG(); 1069 } 1070 1071 return NULL; 1072 } 1073 EXPORT_SYMBOL(il_get_channel_info); 1074 1075 /* 1076 * Setting power level allows the card to go to sleep when not busy. 1077 * 1078 * We calculate a sleep command based on the required latency, which 1079 * we get from mac80211. 1080 */ 1081 1082 #define SLP_VEC(X0, X1, X2, X3, X4) { \ 1083 cpu_to_le32(X0), \ 1084 cpu_to_le32(X1), \ 1085 cpu_to_le32(X2), \ 1086 cpu_to_le32(X3), \ 1087 cpu_to_le32(X4) \ 1088 } 1089 1090 static void 1091 il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) 1092 { 1093 const __le32 interval[3][IL_POWER_VEC_SIZE] = { 1094 SLP_VEC(2, 2, 4, 6, 0xFF), 1095 SLP_VEC(2, 4, 7, 10, 10), 1096 SLP_VEC(4, 7, 10, 10, 0xFF) 1097 }; 1098 int i, dtim_period, no_dtim; 1099 u32 max_sleep; 1100 bool skip; 1101 1102 memset(cmd, 0, sizeof(*cmd)); 1103 1104 if (il->power_data.pci_pm) 1105 cmd->flags |= IL_POWER_PCI_PM_MSK; 1106 1107 /* if no Power Save, we are done */ 1108 if (il->power_data.ps_disabled) 1109 return; 1110 1111 cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; 1112 cmd->keep_alive_seconds = 0; 1113 cmd->debug_flags = 0; 1114 cmd->rx_data_timeout = cpu_to_le32(25 * 1024); 1115 cmd->tx_data_timeout = cpu_to_le32(25 * 1024); 1116 cmd->keep_alive_beacons = 0; 1117 1118 dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; 1119 1120 if (dtim_period <= 2) { 1121 memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); 1122 no_dtim = 2; 1123 } else if (dtim_period <= 10) { 1124 memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); 1125 no_dtim = 2; 1126 } else { 1127 memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); 1128 no_dtim = 0; 1129 } 1130 1131 if (dtim_period == 0) { 1132 dtim_period = 1; 1133 skip = false; 1134 } else { 1135 skip = !!no_dtim; 1136 } 1137 1138 if (skip) { 1139 __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; 1140 1141 max_sleep = le32_to_cpu(tmp); 1142 if (max_sleep == 0xFF) 1143 max_sleep = dtim_period * (skip + 1); 1144 else if (max_sleep > dtim_period) 1145 max_sleep = (max_sleep / dtim_period) * dtim_period; 1146 cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; 1147 } else { 1148 max_sleep = dtim_period; 1149 cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; 1150 } 1151 1152 for (i = 0; i < IL_POWER_VEC_SIZE; i++) 1153 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 1154 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 1155 } 1156 1157 static int 1158 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) 1159 { 1160 D_POWER("Sending power/sleep command\n"); 1161 D_POWER("Flags value = 0x%08X\n", cmd->flags); 1162 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); 1163 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); 1164 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", 1165 le32_to_cpu(cmd->sleep_interval[0]), 1166 le32_to_cpu(cmd->sleep_interval[1]), 1167 le32_to_cpu(cmd->sleep_interval[2]), 1168 le32_to_cpu(cmd->sleep_interval[3]), 1169 le32_to_cpu(cmd->sleep_interval[4])); 1170 1171 return il_send_cmd_pdu(il, C_POWER_TBL, 1172 sizeof(struct il_powertable_cmd), cmd); 1173 } 1174 1175 static int 1176 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1177 { 1178 int ret; 1179 bool update_chains; 1180 1181 lockdep_assert_held(&il->mutex); 1182 1183 /* Don't update the RX chain when chain noise calibration is running */ 1184 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || 1185 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; 1186 1187 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 1188 return 0; 1189 1190 if (!il_is_ready_rf(il)) 1191 return -EIO; 1192 1193 /* scan complete use sleep_power_next, need to be updated */ 1194 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 1195 if (test_bit(S_SCANNING, &il->status) && !force) { 1196 D_INFO("Defer power set mode while scanning\n"); 1197 return 0; 1198 } 1199 1200 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) 1201 set_bit(S_POWER_PMI, &il->status); 1202 1203 ret = il_set_power(il, cmd); 1204 if (!ret) { 1205 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 1206 clear_bit(S_POWER_PMI, &il->status); 1207 1208 if (il->ops->update_chain_flags && update_chains) 1209 il->ops->update_chain_flags(il); 1210 else if (il->ops->update_chain_flags) 1211 D_POWER("Cannot update the power, chain noise " 1212 "calibration running: %d\n", 1213 il->chain_noise_data.state); 1214 1215 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); 1216 } else 1217 IL_ERR("set power fail, ret = %d", ret); 1218 1219 return ret; 1220 } 1221 1222 int 1223 il_power_update_mode(struct il_priv *il, bool force) 1224 { 1225 struct il_powertable_cmd cmd; 1226 1227 il_build_powertable_cmd(il, &cmd); 1228 1229 return il_power_set_mode(il, &cmd, force); 1230 } 1231 EXPORT_SYMBOL(il_power_update_mode); 1232 1233 /* initialize to default */ 1234 void 1235 il_power_initialize(struct il_priv *il) 1236 { 1237 u16 lctl; 1238 1239 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 1240 il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 1241 1242 il->power_data.debug_sleep_level_override = -1; 1243 1244 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); 1245 } 1246 EXPORT_SYMBOL(il_power_initialize); 1247 1248 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 1249 * sending probe req. This should be set long enough to hear probe responses 1250 * from more than one AP. */ 1251 #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ 1252 #define IL_ACTIVE_DWELL_TIME_52 (20) 1253 1254 #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) 1255 #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) 1256 1257 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 1258 * Must be set longer than active dwell time. 1259 * For the most reliable scan, set > AP beacon interval (typically 100msec). */ 1260 #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ 1261 #define IL_PASSIVE_DWELL_TIME_52 (10) 1262 #define IL_PASSIVE_DWELL_BASE (100) 1263 #define IL_CHANNEL_TUNE_TIME 5 1264 1265 static int 1266 il_send_scan_abort(struct il_priv *il) 1267 { 1268 int ret; 1269 struct il_rx_pkt *pkt; 1270 struct il_host_cmd cmd = { 1271 .id = C_SCAN_ABORT, 1272 .flags = CMD_WANT_SKB, 1273 }; 1274 1275 /* Exit instantly with error when device is not ready 1276 * to receive scan abort command or it does not perform 1277 * hardware scan currently */ 1278 if (!test_bit(S_READY, &il->status) || 1279 !test_bit(S_GEO_CONFIGURED, &il->status) || 1280 !test_bit(S_SCAN_HW, &il->status) || 1281 test_bit(S_FW_ERROR, &il->status) || 1282 test_bit(S_EXIT_PENDING, &il->status)) 1283 return -EIO; 1284 1285 ret = il_send_cmd_sync(il, &cmd); 1286 if (ret) 1287 return ret; 1288 1289 pkt = (struct il_rx_pkt *)cmd.reply_page; 1290 if (pkt->u.status != CAN_ABORT_STATUS) { 1291 /* The scan abort will return 1 for success or 1292 * 2 for "failure". A failure condition can be 1293 * due to simply not being in an active scan which 1294 * can occur if we send the scan abort before we 1295 * the microcode has notified us that a scan is 1296 * completed. */ 1297 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status); 1298 ret = -EIO; 1299 } 1300 1301 il_free_pages(il, cmd.reply_page); 1302 return ret; 1303 } 1304 1305 static void 1306 il_complete_scan(struct il_priv *il, bool aborted) 1307 { 1308 /* check if scan was requested from mac80211 */ 1309 if (il->scan_request) { 1310 D_SCAN("Complete scan in mac80211\n"); 1311 ieee80211_scan_completed(il->hw, aborted); 1312 } 1313 1314 il->scan_vif = NULL; 1315 il->scan_request = NULL; 1316 } 1317 1318 void 1319 il_force_scan_end(struct il_priv *il) 1320 { 1321 lockdep_assert_held(&il->mutex); 1322 1323 if (!test_bit(S_SCANNING, &il->status)) { 1324 D_SCAN("Forcing scan end while not scanning\n"); 1325 return; 1326 } 1327 1328 D_SCAN("Forcing scan end\n"); 1329 clear_bit(S_SCANNING, &il->status); 1330 clear_bit(S_SCAN_HW, &il->status); 1331 clear_bit(S_SCAN_ABORTING, &il->status); 1332 il_complete_scan(il, true); 1333 } 1334 1335 static void 1336 il_do_scan_abort(struct il_priv *il) 1337 { 1338 int ret; 1339 1340 lockdep_assert_held(&il->mutex); 1341 1342 if (!test_bit(S_SCANNING, &il->status)) { 1343 D_SCAN("Not performing scan to abort\n"); 1344 return; 1345 } 1346 1347 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) { 1348 D_SCAN("Scan abort in progress\n"); 1349 return; 1350 } 1351 1352 ret = il_send_scan_abort(il); 1353 if (ret) { 1354 D_SCAN("Send scan abort failed %d\n", ret); 1355 il_force_scan_end(il); 1356 } else 1357 D_SCAN("Successfully send scan abort\n"); 1358 } 1359 1360 /** 1361 * il_scan_cancel - Cancel any currently executing HW scan 1362 */ 1363 int 1364 il_scan_cancel(struct il_priv *il) 1365 { 1366 D_SCAN("Queuing abort scan\n"); 1367 queue_work(il->workqueue, &il->abort_scan); 1368 return 0; 1369 } 1370 EXPORT_SYMBOL(il_scan_cancel); 1371 1372 /** 1373 * il_scan_cancel_timeout - Cancel any currently executing HW scan 1374 * @ms: amount of time to wait (in milliseconds) for scan to abort 1375 * 1376 */ 1377 int 1378 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) 1379 { 1380 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 1381 1382 lockdep_assert_held(&il->mutex); 1383 1384 D_SCAN("Scan cancel timeout\n"); 1385 1386 il_do_scan_abort(il); 1387 1388 while (time_before_eq(jiffies, timeout)) { 1389 if (!test_bit(S_SCAN_HW, &il->status)) 1390 break; 1391 msleep(20); 1392 } 1393 1394 return test_bit(S_SCAN_HW, &il->status); 1395 } 1396 EXPORT_SYMBOL(il_scan_cancel_timeout); 1397 1398 /* Service response to C_SCAN (0x80) */ 1399 static void 1400 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) 1401 { 1402 #ifdef CONFIG_IWLEGACY_DEBUG 1403 struct il_rx_pkt *pkt = rxb_addr(rxb); 1404 struct il_scanreq_notification *notif = 1405 (struct il_scanreq_notification *)pkt->u.raw; 1406 1407 D_SCAN("Scan request status = 0x%x\n", notif->status); 1408 #endif 1409 } 1410 1411 /* Service N_SCAN_START (0x82) */ 1412 static void 1413 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) 1414 { 1415 struct il_rx_pkt *pkt = rxb_addr(rxb); 1416 struct il_scanstart_notification *notif = 1417 (struct il_scanstart_notification *)pkt->u.raw; 1418 il->scan_start_tsf = le32_to_cpu(notif->tsf_low); 1419 D_SCAN("Scan start: " "%d [802.11%s] " 1420 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel, 1421 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high), 1422 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); 1423 } 1424 1425 /* Service N_SCAN_RESULTS (0x83) */ 1426 static void 1427 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) 1428 { 1429 #ifdef CONFIG_IWLEGACY_DEBUG 1430 struct il_rx_pkt *pkt = rxb_addr(rxb); 1431 struct il_scanresults_notification *notif = 1432 (struct il_scanresults_notification *)pkt->u.raw; 1433 1434 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " 1435 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a", 1436 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), 1437 le32_to_cpu(notif->stats[0]), 1438 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); 1439 #endif 1440 } 1441 1442 /* Service N_SCAN_COMPLETE (0x84) */ 1443 static void 1444 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) 1445 { 1446 1447 #ifdef CONFIG_IWLEGACY_DEBUG 1448 struct il_rx_pkt *pkt = rxb_addr(rxb); 1449 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 1450 #endif 1451 1452 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 1453 scan_notif->scanned_channels, scan_notif->tsf_low, 1454 scan_notif->tsf_high, scan_notif->status); 1455 1456 /* The HW is no longer scanning */ 1457 clear_bit(S_SCAN_HW, &il->status); 1458 1459 D_SCAN("Scan on %sGHz took %dms\n", 1460 (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2", 1461 jiffies_to_msecs(jiffies - il->scan_start)); 1462 1463 queue_work(il->workqueue, &il->scan_completed); 1464 } 1465 1466 void 1467 il_setup_rx_scan_handlers(struct il_priv *il) 1468 { 1469 /* scan handlers */ 1470 il->handlers[C_SCAN] = il_hdl_scan; 1471 il->handlers[N_SCAN_START] = il_hdl_scan_start; 1472 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; 1473 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; 1474 } 1475 EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1476 1477 u16 1478 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, 1479 u8 n_probes) 1480 { 1481 if (band == NL80211_BAND_5GHZ) 1482 return IL_ACTIVE_DWELL_TIME_52 + 1483 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); 1484 else 1485 return IL_ACTIVE_DWELL_TIME_24 + 1486 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); 1487 } 1488 EXPORT_SYMBOL(il_get_active_dwell_time); 1489 1490 u16 1491 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, 1492 struct ieee80211_vif *vif) 1493 { 1494 u16 value; 1495 1496 u16 passive = 1497 (band == 1498 NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + 1499 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + 1500 IL_PASSIVE_DWELL_TIME_52; 1501 1502 if (il_is_any_associated(il)) { 1503 /* 1504 * If we're associated, we clamp the maximum passive 1505 * dwell time to be 98% of the smallest beacon interval 1506 * (minus 2 * channel tune time) 1507 */ 1508 value = il->vif ? il->vif->bss_conf.beacon_int : 0; 1509 if (value > IL_PASSIVE_DWELL_BASE || !value) 1510 value = IL_PASSIVE_DWELL_BASE; 1511 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; 1512 passive = min(value, passive); 1513 } 1514 1515 return passive; 1516 } 1517 EXPORT_SYMBOL(il_get_passive_dwell_time); 1518 1519 void 1520 il_init_scan_params(struct il_priv *il) 1521 { 1522 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1; 1523 if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) 1524 il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; 1525 if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) 1526 il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; 1527 } 1528 EXPORT_SYMBOL(il_init_scan_params); 1529 1530 static int 1531 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) 1532 { 1533 int ret; 1534 1535 lockdep_assert_held(&il->mutex); 1536 1537 cancel_delayed_work(&il->scan_check); 1538 1539 if (!il_is_ready_rf(il)) { 1540 IL_WARN("Request scan called when driver not ready.\n"); 1541 return -EIO; 1542 } 1543 1544 if (test_bit(S_SCAN_HW, &il->status)) { 1545 D_SCAN("Multiple concurrent scan requests in parallel.\n"); 1546 return -EBUSY; 1547 } 1548 1549 if (test_bit(S_SCAN_ABORTING, &il->status)) { 1550 D_SCAN("Scan request while abort pending.\n"); 1551 return -EBUSY; 1552 } 1553 1554 D_SCAN("Starting scan...\n"); 1555 1556 set_bit(S_SCANNING, &il->status); 1557 il->scan_start = jiffies; 1558 1559 ret = il->ops->request_scan(il, vif); 1560 if (ret) { 1561 clear_bit(S_SCANNING, &il->status); 1562 return ret; 1563 } 1564 1565 queue_delayed_work(il->workqueue, &il->scan_check, 1566 IL_SCAN_CHECK_WATCHDOG); 1567 1568 return 0; 1569 } 1570 1571 int 1572 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1573 struct ieee80211_scan_request *hw_req) 1574 { 1575 struct cfg80211_scan_request *req = &hw_req->req; 1576 struct il_priv *il = hw->priv; 1577 int ret; 1578 1579 if (req->n_channels == 0) { 1580 IL_ERR("Can not scan on no channels.\n"); 1581 return -EINVAL; 1582 } 1583 1584 mutex_lock(&il->mutex); 1585 D_MAC80211("enter\n"); 1586 1587 if (test_bit(S_SCANNING, &il->status)) { 1588 D_SCAN("Scan already in progress.\n"); 1589 ret = -EAGAIN; 1590 goto out_unlock; 1591 } 1592 1593 /* mac80211 will only ask for one band at a time */ 1594 il->scan_request = req; 1595 il->scan_vif = vif; 1596 il->scan_band = req->channels[0]->band; 1597 1598 ret = il_scan_initiate(il, vif); 1599 1600 out_unlock: 1601 D_MAC80211("leave ret %d\n", ret); 1602 mutex_unlock(&il->mutex); 1603 1604 return ret; 1605 } 1606 EXPORT_SYMBOL(il_mac_hw_scan); 1607 1608 static void 1609 il_bg_scan_check(struct work_struct *data) 1610 { 1611 struct il_priv *il = 1612 container_of(data, struct il_priv, scan_check.work); 1613 1614 D_SCAN("Scan check work\n"); 1615 1616 /* Since we are here firmware does not finish scan and 1617 * most likely is in bad shape, so we don't bother to 1618 * send abort command, just force scan complete to mac80211 */ 1619 mutex_lock(&il->mutex); 1620 il_force_scan_end(il); 1621 mutex_unlock(&il->mutex); 1622 } 1623 1624 /** 1625 * il_fill_probe_req - fill in all required fields and IE for probe request 1626 */ 1627 1628 u16 1629 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, 1630 const u8 *ta, const u8 *ies, int ie_len, int left) 1631 { 1632 int len = 0; 1633 u8 *pos = NULL; 1634 1635 /* Make sure there is enough space for the probe request, 1636 * two mandatory IEs and the data */ 1637 left -= 24; 1638 if (left < 0) 1639 return 0; 1640 1641 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 1642 eth_broadcast_addr(frame->da); 1643 memcpy(frame->sa, ta, ETH_ALEN); 1644 eth_broadcast_addr(frame->bssid); 1645 frame->seq_ctrl = 0; 1646 1647 len += 24; 1648 1649 /* ...next IE... */ 1650 pos = &frame->u.probe_req.variable[0]; 1651 1652 /* fill in our indirect SSID IE */ 1653 left -= 2; 1654 if (left < 0) 1655 return 0; 1656 *pos++ = WLAN_EID_SSID; 1657 *pos++ = 0; 1658 1659 len += 2; 1660 1661 if (WARN_ON(left < ie_len)) 1662 return len; 1663 1664 if (ies && ie_len) { 1665 memcpy(pos, ies, ie_len); 1666 len += ie_len; 1667 } 1668 1669 return (u16) len; 1670 } 1671 EXPORT_SYMBOL(il_fill_probe_req); 1672 1673 static void 1674 il_bg_abort_scan(struct work_struct *work) 1675 { 1676 struct il_priv *il = container_of(work, struct il_priv, abort_scan); 1677 1678 D_SCAN("Abort scan work\n"); 1679 1680 /* We keep scan_check work queued in case when firmware will not 1681 * report back scan completed notification */ 1682 mutex_lock(&il->mutex); 1683 il_scan_cancel_timeout(il, 200); 1684 mutex_unlock(&il->mutex); 1685 } 1686 1687 static void 1688 il_bg_scan_completed(struct work_struct *work) 1689 { 1690 struct il_priv *il = container_of(work, struct il_priv, scan_completed); 1691 bool aborted; 1692 1693 D_SCAN("Completed scan.\n"); 1694 1695 cancel_delayed_work(&il->scan_check); 1696 1697 mutex_lock(&il->mutex); 1698 1699 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status); 1700 if (aborted) 1701 D_SCAN("Aborted scan completed.\n"); 1702 1703 if (!test_and_clear_bit(S_SCANNING, &il->status)) { 1704 D_SCAN("Scan already completed.\n"); 1705 goto out_settings; 1706 } 1707 1708 il_complete_scan(il, aborted); 1709 1710 out_settings: 1711 /* Can we still talk to firmware ? */ 1712 if (!il_is_ready_rf(il)) 1713 goto out; 1714 1715 /* 1716 * We do not commit power settings while scan is pending, 1717 * do it now if the settings changed. 1718 */ 1719 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false); 1720 il_set_tx_power(il, il->tx_power_next, false); 1721 1722 il->ops->post_scan(il); 1723 1724 out: 1725 mutex_unlock(&il->mutex); 1726 } 1727 1728 void 1729 il_setup_scan_deferred_work(struct il_priv *il) 1730 { 1731 INIT_WORK(&il->scan_completed, il_bg_scan_completed); 1732 INIT_WORK(&il->abort_scan, il_bg_abort_scan); 1733 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); 1734 } 1735 EXPORT_SYMBOL(il_setup_scan_deferred_work); 1736 1737 void 1738 il_cancel_scan_deferred_work(struct il_priv *il) 1739 { 1740 cancel_work_sync(&il->abort_scan); 1741 cancel_work_sync(&il->scan_completed); 1742 1743 if (cancel_delayed_work_sync(&il->scan_check)) { 1744 mutex_lock(&il->mutex); 1745 il_force_scan_end(il); 1746 mutex_unlock(&il->mutex); 1747 } 1748 } 1749 EXPORT_SYMBOL(il_cancel_scan_deferred_work); 1750 1751 /* il->sta_lock must be held */ 1752 static void 1753 il_sta_ucode_activate(struct il_priv *il, u8 sta_id) 1754 { 1755 1756 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) 1757 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n", 1758 sta_id, il->stations[sta_id].sta.sta.addr); 1759 1760 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { 1761 D_ASSOC("STA id %u addr %pM already present" 1762 " in uCode (according to driver)\n", sta_id, 1763 il->stations[sta_id].sta.sta.addr); 1764 } else { 1765 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; 1766 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id, 1767 il->stations[sta_id].sta.sta.addr); 1768 } 1769 } 1770 1771 static int 1772 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, 1773 struct il_rx_pkt *pkt, bool sync) 1774 { 1775 u8 sta_id = addsta->sta.sta_id; 1776 unsigned long flags; 1777 int ret = -EIO; 1778 1779 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 1780 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags); 1781 return ret; 1782 } 1783 1784 D_INFO("Processing response for adding station %u\n", sta_id); 1785 1786 spin_lock_irqsave(&il->sta_lock, flags); 1787 1788 switch (pkt->u.add_sta.status) { 1789 case ADD_STA_SUCCESS_MSK: 1790 D_INFO("C_ADD_STA PASSED\n"); 1791 il_sta_ucode_activate(il, sta_id); 1792 ret = 0; 1793 break; 1794 case ADD_STA_NO_ROOM_IN_TBL: 1795 IL_ERR("Adding station %d failed, no room in table.\n", sta_id); 1796 break; 1797 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 1798 IL_ERR("Adding station %d failed, no block ack resource.\n", 1799 sta_id); 1800 break; 1801 case ADD_STA_MODIFY_NON_EXIST_STA: 1802 IL_ERR("Attempting to modify non-existing station %d\n", 1803 sta_id); 1804 break; 1805 default: 1806 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status); 1807 break; 1808 } 1809 1810 D_INFO("%s station id %u addr %pM\n", 1811 il->stations[sta_id].sta.mode == 1812 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id, 1813 il->stations[sta_id].sta.sta.addr); 1814 1815 /* 1816 * XXX: The MAC address in the command buffer is often changed from 1817 * the original sent to the device. That is, the MAC address 1818 * written to the command buffer often is not the same MAC address 1819 * read from the command buffer when the command returns. This 1820 * issue has not yet been resolved and this debugging is left to 1821 * observe the problem. 1822 */ 1823 D_INFO("%s station according to cmd buffer %pM\n", 1824 il->stations[sta_id].sta.mode == 1825 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr); 1826 spin_unlock_irqrestore(&il->sta_lock, flags); 1827 1828 return ret; 1829 } 1830 1831 static void 1832 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, 1833 struct il_rx_pkt *pkt) 1834 { 1835 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; 1836 1837 il_process_add_sta_resp(il, addsta, pkt, false); 1838 1839 } 1840 1841 int 1842 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) 1843 { 1844 struct il_rx_pkt *pkt = NULL; 1845 int ret = 0; 1846 u8 data[sizeof(*sta)]; 1847 struct il_host_cmd cmd = { 1848 .id = C_ADD_STA, 1849 .flags = flags, 1850 .data = data, 1851 }; 1852 u8 sta_id __maybe_unused = sta->sta.sta_id; 1853 1854 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, 1855 flags & CMD_ASYNC ? "a" : ""); 1856 1857 if (flags & CMD_ASYNC) 1858 cmd.callback = il_add_sta_callback; 1859 else { 1860 cmd.flags |= CMD_WANT_SKB; 1861 might_sleep(); 1862 } 1863 1864 cmd.len = il->ops->build_addsta_hcmd(sta, data); 1865 ret = il_send_cmd(il, &cmd); 1866 if (ret) 1867 return ret; 1868 if (flags & CMD_ASYNC) 1869 return 0; 1870 1871 pkt = (struct il_rx_pkt *)cmd.reply_page; 1872 ret = il_process_add_sta_resp(il, sta, pkt, true); 1873 1874 il_free_pages(il, cmd.reply_page); 1875 1876 return ret; 1877 } 1878 EXPORT_SYMBOL(il_send_add_sta); 1879 1880 static void 1881 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) 1882 { 1883 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 1884 __le32 sta_flags; 1885 1886 if (!sta || !sta_ht_inf->ht_supported) 1887 goto done; 1888 1889 D_ASSOC("spatial multiplexing power save mode: %s\n", 1890 (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" : 1891 (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : 1892 "disabled"); 1893 1894 sta_flags = il->stations[idx].sta.station_flags; 1895 1896 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 1897 1898 switch (sta->smps_mode) { 1899 case IEEE80211_SMPS_STATIC: 1900 sta_flags |= STA_FLG_MIMO_DIS_MSK; 1901 break; 1902 case IEEE80211_SMPS_DYNAMIC: 1903 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 1904 break; 1905 case IEEE80211_SMPS_OFF: 1906 break; 1907 default: 1908 IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode); 1909 break; 1910 } 1911 1912 sta_flags |= 1913 cpu_to_le32((u32) sta_ht_inf-> 1914 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); 1915 1916 sta_flags |= 1917 cpu_to_le32((u32) sta_ht_inf-> 1918 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 1919 1920 if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) 1921 sta_flags |= STA_FLG_HT40_EN_MSK; 1922 else 1923 sta_flags &= ~STA_FLG_HT40_EN_MSK; 1924 1925 il->stations[idx].sta.station_flags = sta_flags; 1926 done: 1927 return; 1928 } 1929 1930 /** 1931 * il_prep_station - Prepare station information for addition 1932 * 1933 * should be called with sta_lock held 1934 */ 1935 u8 1936 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, 1937 struct ieee80211_sta *sta) 1938 { 1939 struct il_station_entry *station; 1940 int i; 1941 u8 sta_id = IL_INVALID_STATION; 1942 u16 rate; 1943 1944 if (is_ap) 1945 sta_id = IL_AP_ID; 1946 else if (is_broadcast_ether_addr(addr)) 1947 sta_id = il->hw_params.bcast_id; 1948 else 1949 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { 1950 if (ether_addr_equal(il->stations[i].sta.sta.addr, 1951 addr)) { 1952 sta_id = i; 1953 break; 1954 } 1955 1956 if (!il->stations[i].used && 1957 sta_id == IL_INVALID_STATION) 1958 sta_id = i; 1959 } 1960 1961 /* 1962 * These two conditions have the same outcome, but keep them 1963 * separate 1964 */ 1965 if (unlikely(sta_id == IL_INVALID_STATION)) 1966 return sta_id; 1967 1968 /* 1969 * uCode is not able to deal with multiple requests to add a 1970 * station. Keep track if one is in progress so that we do not send 1971 * another. 1972 */ 1973 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 1974 D_INFO("STA %d already in process of being added.\n", sta_id); 1975 return sta_id; 1976 } 1977 1978 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 1979 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && 1980 ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) { 1981 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 1982 sta_id, addr); 1983 return sta_id; 1984 } 1985 1986 station = &il->stations[sta_id]; 1987 station->used = IL_STA_DRIVER_ACTIVE; 1988 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr); 1989 il->num_stations++; 1990 1991 /* Set up the C_ADD_STA command to send to device */ 1992 memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); 1993 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 1994 station->sta.mode = 0; 1995 station->sta.sta.sta_id = sta_id; 1996 station->sta.station_flags = 0; 1997 1998 /* 1999 * OK to call unconditionally, since local stations (IBSS BSSID 2000 * STA and broadcast STA) pass in a NULL sta, and mac80211 2001 * doesn't allow HT IBSS. 2002 */ 2003 il_set_ht_add_station(il, sta_id, sta); 2004 2005 /* 3945 only */ 2006 rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; 2007 /* Turn on both antennas for the station... */ 2008 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 2009 2010 return sta_id; 2011 2012 } 2013 EXPORT_SYMBOL_GPL(il_prep_station); 2014 2015 #define STA_WAIT_TIMEOUT (HZ/2) 2016 2017 /** 2018 * il_add_station_common - 2019 */ 2020 int 2021 il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, 2022 struct ieee80211_sta *sta, u8 *sta_id_r) 2023 { 2024 unsigned long flags_spin; 2025 int ret = 0; 2026 u8 sta_id; 2027 struct il_addsta_cmd sta_cmd; 2028 2029 *sta_id_r = 0; 2030 spin_lock_irqsave(&il->sta_lock, flags_spin); 2031 sta_id = il_prep_station(il, addr, is_ap, sta); 2032 if (sta_id == IL_INVALID_STATION) { 2033 IL_ERR("Unable to prepare station %pM for addition\n", addr); 2034 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2035 return -EINVAL; 2036 } 2037 2038 /* 2039 * uCode is not able to deal with multiple requests to add a 2040 * station. Keep track if one is in progress so that we do not send 2041 * another. 2042 */ 2043 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { 2044 D_INFO("STA %d already in process of being added.\n", sta_id); 2045 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2046 return -EEXIST; 2047 } 2048 2049 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && 2050 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2051 D_ASSOC("STA %d (%pM) already added, not adding again.\n", 2052 sta_id, addr); 2053 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2054 return -EEXIST; 2055 } 2056 2057 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; 2058 memcpy(&sta_cmd, &il->stations[sta_id].sta, 2059 sizeof(struct il_addsta_cmd)); 2060 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2061 2062 /* Add station to device's station table */ 2063 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2064 if (ret) { 2065 spin_lock_irqsave(&il->sta_lock, flags_spin); 2066 IL_ERR("Adding station %pM failed.\n", 2067 il->stations[sta_id].sta.sta.addr); 2068 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2069 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2070 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2071 } 2072 *sta_id_r = sta_id; 2073 return ret; 2074 } 2075 EXPORT_SYMBOL(il_add_station_common); 2076 2077 /** 2078 * il_sta_ucode_deactivate - deactivate ucode status for a station 2079 * 2080 * il->sta_lock must be held 2081 */ 2082 static void 2083 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) 2084 { 2085 /* Ucode must be active and driver must be non active */ 2086 if ((il->stations[sta_id]. 2087 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != 2088 IL_STA_UCODE_ACTIVE) 2089 IL_ERR("removed non active STA %u\n", sta_id); 2090 2091 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; 2092 2093 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); 2094 D_ASSOC("Removed STA %u\n", sta_id); 2095 } 2096 2097 static int 2098 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, 2099 bool temporary) 2100 { 2101 struct il_rx_pkt *pkt; 2102 int ret; 2103 2104 unsigned long flags_spin; 2105 struct il_rem_sta_cmd rm_sta_cmd; 2106 2107 struct il_host_cmd cmd = { 2108 .id = C_REM_STA, 2109 .len = sizeof(struct il_rem_sta_cmd), 2110 .flags = CMD_SYNC, 2111 .data = &rm_sta_cmd, 2112 }; 2113 2114 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 2115 rm_sta_cmd.num_sta = 1; 2116 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); 2117 2118 cmd.flags |= CMD_WANT_SKB; 2119 2120 ret = il_send_cmd(il, &cmd); 2121 2122 if (ret) 2123 return ret; 2124 2125 pkt = (struct il_rx_pkt *)cmd.reply_page; 2126 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 2127 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags); 2128 ret = -EIO; 2129 } 2130 2131 if (!ret) { 2132 switch (pkt->u.rem_sta.status) { 2133 case REM_STA_SUCCESS_MSK: 2134 if (!temporary) { 2135 spin_lock_irqsave(&il->sta_lock, flags_spin); 2136 il_sta_ucode_deactivate(il, sta_id); 2137 spin_unlock_irqrestore(&il->sta_lock, 2138 flags_spin); 2139 } 2140 D_ASSOC("C_REM_STA PASSED\n"); 2141 break; 2142 default: 2143 ret = -EIO; 2144 IL_ERR("C_REM_STA failed\n"); 2145 break; 2146 } 2147 } 2148 il_free_pages(il, cmd.reply_page); 2149 2150 return ret; 2151 } 2152 2153 /** 2154 * il_remove_station - Remove driver's knowledge of station. 2155 */ 2156 int 2157 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) 2158 { 2159 unsigned long flags; 2160 2161 if (!il_is_ready(il)) { 2162 D_INFO("Unable to remove station %pM, device not ready.\n", 2163 addr); 2164 /* 2165 * It is typical for stations to be removed when we are 2166 * going down. Return success since device will be down 2167 * soon anyway 2168 */ 2169 return 0; 2170 } 2171 2172 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr); 2173 2174 if (WARN_ON(sta_id == IL_INVALID_STATION)) 2175 return -EINVAL; 2176 2177 spin_lock_irqsave(&il->sta_lock, flags); 2178 2179 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2180 D_INFO("Removing %pM but non DRIVER active\n", addr); 2181 goto out_err; 2182 } 2183 2184 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { 2185 D_INFO("Removing %pM but non UCODE active\n", addr); 2186 goto out_err; 2187 } 2188 2189 if (il->stations[sta_id].used & IL_STA_LOCAL) { 2190 kfree(il->stations[sta_id].lq); 2191 il->stations[sta_id].lq = NULL; 2192 } 2193 2194 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; 2195 2196 il->num_stations--; 2197 2198 BUG_ON(il->num_stations < 0); 2199 2200 spin_unlock_irqrestore(&il->sta_lock, flags); 2201 2202 return il_send_remove_station(il, addr, sta_id, false); 2203 out_err: 2204 spin_unlock_irqrestore(&il->sta_lock, flags); 2205 return -EINVAL; 2206 } 2207 EXPORT_SYMBOL_GPL(il_remove_station); 2208 2209 /** 2210 * il_clear_ucode_stations - clear ucode station table bits 2211 * 2212 * This function clears all the bits in the driver indicating 2213 * which stations are active in the ucode. Call when something 2214 * other than explicit station management would cause this in 2215 * the ucode, e.g. unassociated RXON. 2216 */ 2217 void 2218 il_clear_ucode_stations(struct il_priv *il) 2219 { 2220 int i; 2221 unsigned long flags_spin; 2222 bool cleared = false; 2223 2224 D_INFO("Clearing ucode stations in driver\n"); 2225 2226 spin_lock_irqsave(&il->sta_lock, flags_spin); 2227 for (i = 0; i < il->hw_params.max_stations; i++) { 2228 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { 2229 D_INFO("Clearing ucode active for station %d\n", i); 2230 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2231 cleared = true; 2232 } 2233 } 2234 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2235 2236 if (!cleared) 2237 D_INFO("No active stations found to be cleared\n"); 2238 } 2239 EXPORT_SYMBOL(il_clear_ucode_stations); 2240 2241 /** 2242 * il_restore_stations() - Restore driver known stations to device 2243 * 2244 * All stations considered active by driver, but not present in ucode, is 2245 * restored. 2246 * 2247 * Function sleeps. 2248 */ 2249 void 2250 il_restore_stations(struct il_priv *il) 2251 { 2252 struct il_addsta_cmd sta_cmd; 2253 struct il_link_quality_cmd lq; 2254 unsigned long flags_spin; 2255 int i; 2256 bool found = false; 2257 int ret; 2258 bool send_lq; 2259 2260 if (!il_is_ready(il)) { 2261 D_INFO("Not ready yet, not restoring any stations.\n"); 2262 return; 2263 } 2264 2265 D_ASSOC("Restoring all known stations ... start.\n"); 2266 spin_lock_irqsave(&il->sta_lock, flags_spin); 2267 for (i = 0; i < il->hw_params.max_stations; i++) { 2268 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && 2269 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { 2270 D_ASSOC("Restoring sta %pM\n", 2271 il->stations[i].sta.sta.addr); 2272 il->stations[i].sta.mode = 0; 2273 il->stations[i].used |= IL_STA_UCODE_INPROGRESS; 2274 found = true; 2275 } 2276 } 2277 2278 for (i = 0; i < il->hw_params.max_stations; i++) { 2279 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { 2280 memcpy(&sta_cmd, &il->stations[i].sta, 2281 sizeof(struct il_addsta_cmd)); 2282 send_lq = false; 2283 if (il->stations[i].lq) { 2284 memcpy(&lq, il->stations[i].lq, 2285 sizeof(struct il_link_quality_cmd)); 2286 send_lq = true; 2287 } 2288 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2289 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); 2290 if (ret) { 2291 spin_lock_irqsave(&il->sta_lock, flags_spin); 2292 IL_ERR("Adding station %pM failed.\n", 2293 il->stations[i].sta.sta.addr); 2294 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; 2295 il->stations[i].used &= 2296 ~IL_STA_UCODE_INPROGRESS; 2297 spin_unlock_irqrestore(&il->sta_lock, 2298 flags_spin); 2299 } 2300 /* 2301 * Rate scaling has already been initialized, send 2302 * current LQ command 2303 */ 2304 if (send_lq) 2305 il_send_lq_cmd(il, &lq, CMD_SYNC, true); 2306 spin_lock_irqsave(&il->sta_lock, flags_spin); 2307 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; 2308 } 2309 } 2310 2311 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2312 if (!found) 2313 D_INFO("Restoring all known stations" 2314 " .... no stations to be restored.\n"); 2315 else 2316 D_INFO("Restoring all known stations" " .... complete.\n"); 2317 } 2318 EXPORT_SYMBOL(il_restore_stations); 2319 2320 int 2321 il_get_free_ucode_key_idx(struct il_priv *il) 2322 { 2323 int i; 2324 2325 for (i = 0; i < il->sta_key_max_num; i++) 2326 if (!test_and_set_bit(i, &il->ucode_key_table)) 2327 return i; 2328 2329 return WEP_INVALID_OFFSET; 2330 } 2331 EXPORT_SYMBOL(il_get_free_ucode_key_idx); 2332 2333 void 2334 il_dealloc_bcast_stations(struct il_priv *il) 2335 { 2336 unsigned long flags; 2337 int i; 2338 2339 spin_lock_irqsave(&il->sta_lock, flags); 2340 for (i = 0; i < il->hw_params.max_stations; i++) { 2341 if (!(il->stations[i].used & IL_STA_BCAST)) 2342 continue; 2343 2344 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; 2345 il->num_stations--; 2346 BUG_ON(il->num_stations < 0); 2347 kfree(il->stations[i].lq); 2348 il->stations[i].lq = NULL; 2349 } 2350 spin_unlock_irqrestore(&il->sta_lock, flags); 2351 } 2352 EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); 2353 2354 #ifdef CONFIG_IWLEGACY_DEBUG 2355 static void 2356 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2357 { 2358 int i; 2359 D_RATE("lq station id 0x%x\n", lq->sta_id); 2360 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk, 2361 lq->general_params.dual_stream_ant_msk); 2362 2363 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 2364 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags); 2365 } 2366 #else 2367 static inline void 2368 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) 2369 { 2370 } 2371 #endif 2372 2373 /** 2374 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity 2375 * 2376 * It sometimes happens when a HT rate has been in use and we 2377 * loose connectivity with AP then mac80211 will first tell us that the 2378 * current channel is not HT anymore before removing the station. In such a 2379 * scenario the RXON flags will be updated to indicate we are not 2380 * communicating HT anymore, but the LQ command may still contain HT rates. 2381 * Test for this to prevent driver from sending LQ command between the time 2382 * RXON flags are updated and when LQ command is updated. 2383 */ 2384 static bool 2385 il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) 2386 { 2387 int i; 2388 2389 if (il->ht.enabled) 2390 return true; 2391 2392 D_INFO("Channel %u is not an HT channel\n", il->active.channel); 2393 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 2394 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 2395 D_INFO("idx %d of LQ expects HT channel\n", i); 2396 return false; 2397 } 2398 } 2399 return true; 2400 } 2401 2402 /** 2403 * il_send_lq_cmd() - Send link quality command 2404 * @init: This command is sent as part of station initialization right 2405 * after station has been added. 2406 * 2407 * The link quality command is sent as the last step of station creation. 2408 * This is the special case in which init is set and we call a callback in 2409 * this case to clear the state indicating that station creation is in 2410 * progress. 2411 */ 2412 int 2413 il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, 2414 u8 flags, bool init) 2415 { 2416 int ret = 0; 2417 unsigned long flags_spin; 2418 2419 struct il_host_cmd cmd = { 2420 .id = C_TX_LINK_QUALITY_CMD, 2421 .len = sizeof(struct il_link_quality_cmd), 2422 .flags = flags, 2423 .data = lq, 2424 }; 2425 2426 if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) 2427 return -EINVAL; 2428 2429 spin_lock_irqsave(&il->sta_lock, flags_spin); 2430 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { 2431 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2432 return -EINVAL; 2433 } 2434 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2435 2436 il_dump_lq_cmd(il, lq); 2437 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 2438 2439 if (il_is_lq_table_valid(il, lq)) 2440 ret = il_send_cmd(il, &cmd); 2441 else 2442 ret = -EINVAL; 2443 2444 if (cmd.flags & CMD_ASYNC) 2445 return ret; 2446 2447 if (init) { 2448 D_INFO("init LQ command complete," 2449 " clearing sta addition status for sta %d\n", 2450 lq->sta_id); 2451 spin_lock_irqsave(&il->sta_lock, flags_spin); 2452 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; 2453 spin_unlock_irqrestore(&il->sta_lock, flags_spin); 2454 } 2455 return ret; 2456 } 2457 EXPORT_SYMBOL(il_send_lq_cmd); 2458 2459 int 2460 il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2461 struct ieee80211_sta *sta) 2462 { 2463 struct il_priv *il = hw->priv; 2464 struct il_station_priv_common *sta_common = (void *)sta->drv_priv; 2465 int ret; 2466 2467 mutex_lock(&il->mutex); 2468 D_MAC80211("enter station %pM\n", sta->addr); 2469 2470 ret = il_remove_station(il, sta_common->sta_id, sta->addr); 2471 if (ret) 2472 IL_ERR("Error removing station %pM\n", sta->addr); 2473 2474 D_MAC80211("leave ret %d\n", ret); 2475 mutex_unlock(&il->mutex); 2476 2477 return ret; 2478 } 2479 EXPORT_SYMBOL(il_mac_sta_remove); 2480 2481 /************************** RX-FUNCTIONS ****************************/ 2482 /* 2483 * Rx theory of operation 2484 * 2485 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 2486 * each of which point to Receive Buffers to be filled by the NIC. These get 2487 * used not only for Rx frames, but for any command response or notification 2488 * from the NIC. The driver and NIC manage the Rx buffers by means 2489 * of idxes into the circular buffer. 2490 * 2491 * Rx Queue Indexes 2492 * The host/firmware share two idx registers for managing the Rx buffers. 2493 * 2494 * The READ idx maps to the first position that the firmware may be writing 2495 * to -- the driver can read up to (but not including) this position and get 2496 * good data. 2497 * The READ idx is managed by the firmware once the card is enabled. 2498 * 2499 * The WRITE idx maps to the last position the driver has read from -- the 2500 * position preceding WRITE is the last slot the firmware can place a packet. 2501 * 2502 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 2503 * WRITE = READ. 2504 * 2505 * During initialization, the host sets up the READ queue position to the first 2506 * IDX position, and WRITE to the last (READ - 1 wrapped) 2507 * 2508 * When the firmware places a packet in a buffer, it will advance the READ idx 2509 * and fire the RX interrupt. The driver can then query the READ idx and 2510 * process as many packets as possible, moving the WRITE idx forward as it 2511 * resets the Rx queue buffers with new memory. 2512 * 2513 * The management in the driver is as follows: 2514 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 2515 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 2516 * to replenish the iwl->rxq->rx_free. 2517 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the 2518 * iwl->rxq is replenished and the READ IDX is updated (updating the 2519 * 'processed' and 'read' driver idxes as well) 2520 * + A received packet is processed and handed to the kernel network stack, 2521 * detached from the iwl->rxq. The driver 'processed' idx is updated. 2522 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 2523 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 2524 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 2525 * were enough free buffers and RX_STALLED is set it is cleared. 2526 * 2527 * 2528 * Driver sequence: 2529 * 2530 * il_rx_queue_alloc() Allocates rx_free 2531 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls 2532 * il_rx_queue_restock 2533 * il_rx_queue_restock() Moves available buffers from rx_free into Rx 2534 * queue, updates firmware pointers, and updates 2535 * the WRITE idx. If insufficient rx_free buffers 2536 * are available, schedules il_rx_replenish 2537 * 2538 * -- enable interrupts -- 2539 * ISR - il_rx() Detach il_rx_bufs from pool up to the 2540 * READ IDX, detaching the SKB from the pool. 2541 * Moves the packet buffer from queue to rx_used. 2542 * Calls il_rx_queue_restock to refill any empty 2543 * slots. 2544 * ... 2545 * 2546 */ 2547 2548 /** 2549 * il_rx_queue_space - Return number of free slots available in queue. 2550 */ 2551 int 2552 il_rx_queue_space(const struct il_rx_queue *q) 2553 { 2554 int s = q->read - q->write; 2555 if (s <= 0) 2556 s += RX_QUEUE_SIZE; 2557 /* keep some buffer to not confuse full and empty queue */ 2558 s -= 2; 2559 if (s < 0) 2560 s = 0; 2561 return s; 2562 } 2563 EXPORT_SYMBOL(il_rx_queue_space); 2564 2565 /** 2566 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue 2567 */ 2568 void 2569 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) 2570 { 2571 unsigned long flags; 2572 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; 2573 u32 reg; 2574 2575 spin_lock_irqsave(&q->lock, flags); 2576 2577 if (q->need_update == 0) 2578 goto exit_unlock; 2579 2580 /* If power-saving is in use, make sure device is awake */ 2581 if (test_bit(S_POWER_PMI, &il->status)) { 2582 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2583 2584 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2585 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n", 2586 reg); 2587 il_set_bit(il, CSR_GP_CNTRL, 2588 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2589 goto exit_unlock; 2590 } 2591 2592 q->write_actual = (q->write & ~0x7); 2593 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2594 2595 /* Else device is assumed to be awake */ 2596 } else { 2597 /* Device expects a multiple of 8 */ 2598 q->write_actual = (q->write & ~0x7); 2599 il_wr(il, rx_wrt_ptr_reg, q->write_actual); 2600 } 2601 2602 q->need_update = 0; 2603 2604 exit_unlock: 2605 spin_unlock_irqrestore(&q->lock, flags); 2606 } 2607 EXPORT_SYMBOL(il_rx_queue_update_write_ptr); 2608 2609 int 2610 il_rx_queue_alloc(struct il_priv *il) 2611 { 2612 struct il_rx_queue *rxq = &il->rxq; 2613 struct device *dev = &il->pci_dev->dev; 2614 int i; 2615 2616 spin_lock_init(&rxq->lock); 2617 INIT_LIST_HEAD(&rxq->rx_free); 2618 INIT_LIST_HEAD(&rxq->rx_used); 2619 2620 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2621 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2622 GFP_KERNEL); 2623 if (!rxq->bd) 2624 goto err_bd; 2625 2626 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2627 &rxq->rb_stts_dma, GFP_KERNEL); 2628 if (!rxq->rb_stts) 2629 goto err_rb; 2630 2631 /* Fill the rx_used queue with _all_ of the Rx buffers */ 2632 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 2633 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2634 2635 /* Set us so that we have processed and used all buffers, but have 2636 * not restocked the Rx queue with fresh buffers */ 2637 rxq->read = rxq->write = 0; 2638 rxq->write_actual = 0; 2639 rxq->free_count = 0; 2640 rxq->need_update = 0; 2641 return 0; 2642 2643 err_rb: 2644 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 2645 rxq->bd_dma); 2646 err_bd: 2647 return -ENOMEM; 2648 } 2649 EXPORT_SYMBOL(il_rx_queue_alloc); 2650 2651 void 2652 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) 2653 { 2654 struct il_rx_pkt *pkt = rxb_addr(rxb); 2655 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); 2656 2657 if (!report->state) { 2658 D_11H("Spectrum Measure Notification: Start\n"); 2659 return; 2660 } 2661 2662 memcpy(&il->measure_report, report, sizeof(*report)); 2663 il->measurement_status |= MEASUREMENT_READY; 2664 } 2665 EXPORT_SYMBOL(il_hdl_spectrum_measurement); 2666 2667 /* 2668 * returns non-zero if packet should be dropped 2669 */ 2670 int 2671 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, 2672 u32 decrypt_res, struct ieee80211_rx_status *stats) 2673 { 2674 u16 fc = le16_to_cpu(hdr->frame_control); 2675 2676 /* 2677 * All contexts have the same setting here due to it being 2678 * a module parameter, so OK to check any context. 2679 */ 2680 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 2681 return 0; 2682 2683 if (!(fc & IEEE80211_FCTL_PROTECTED)) 2684 return 0; 2685 2686 D_RX("decrypt_res:0x%x\n", decrypt_res); 2687 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2688 case RX_RES_STATUS_SEC_TYPE_TKIP: 2689 /* The uCode has got a bad phase 1 Key, pushes the packet. 2690 * Decryption will be done in SW. */ 2691 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2692 RX_RES_STATUS_BAD_KEY_TTAK) 2693 break; 2694 2695 case RX_RES_STATUS_SEC_TYPE_WEP: 2696 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2697 RX_RES_STATUS_BAD_ICV_MIC) { 2698 /* bad ICV, the packet is destroyed since the 2699 * decryption is inplace, drop it */ 2700 D_RX("Packet destroyed\n"); 2701 return -1; 2702 } 2703 case RX_RES_STATUS_SEC_TYPE_CCMP: 2704 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2705 RX_RES_STATUS_DECRYPT_OK) { 2706 D_RX("hw decrypt successfully!!!\n"); 2707 stats->flag |= RX_FLAG_DECRYPTED; 2708 } 2709 break; 2710 2711 default: 2712 break; 2713 } 2714 return 0; 2715 } 2716 EXPORT_SYMBOL(il_set_decrypted_flag); 2717 2718 /** 2719 * il_txq_update_write_ptr - Send new write idx to hardware 2720 */ 2721 void 2722 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) 2723 { 2724 u32 reg = 0; 2725 int txq_id = txq->q.id; 2726 2727 if (txq->need_update == 0) 2728 return; 2729 2730 /* if we're trying to save power */ 2731 if (test_bit(S_POWER_PMI, &il->status)) { 2732 /* wake up nic if it's powered down ... 2733 * uCode will wake up, and interrupt us again, so next 2734 * time we'll skip this part. */ 2735 reg = _il_rd(il, CSR_UCODE_DRV_GP1); 2736 2737 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2738 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n", 2739 txq_id, reg); 2740 il_set_bit(il, CSR_GP_CNTRL, 2741 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2742 return; 2743 } 2744 2745 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2746 2747 /* 2748 * else not in power-save mode, 2749 * uCode will never sleep when we're 2750 * trying to tx (during RFKILL, we're not trying to tx). 2751 */ 2752 } else 2753 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); 2754 txq->need_update = 0; 2755 } 2756 EXPORT_SYMBOL(il_txq_update_write_ptr); 2757 2758 /** 2759 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 2760 */ 2761 void 2762 il_tx_queue_unmap(struct il_priv *il, int txq_id) 2763 { 2764 struct il_tx_queue *txq = &il->txq[txq_id]; 2765 struct il_queue *q = &txq->q; 2766 2767 if (q->n_bd == 0) 2768 return; 2769 2770 while (q->write_ptr != q->read_ptr) { 2771 il->ops->txq_free_tfd(il, txq); 2772 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2773 } 2774 } 2775 EXPORT_SYMBOL(il_tx_queue_unmap); 2776 2777 /** 2778 * il_tx_queue_free - Deallocate DMA queue. 2779 * @txq: Transmit queue to deallocate. 2780 * 2781 * Empty queue by removing and destroying all BD's. 2782 * Free all buffers. 2783 * 0-fill, but do not free "txq" descriptor structure. 2784 */ 2785 void 2786 il_tx_queue_free(struct il_priv *il, int txq_id) 2787 { 2788 struct il_tx_queue *txq = &il->txq[txq_id]; 2789 struct device *dev = &il->pci_dev->dev; 2790 int i; 2791 2792 il_tx_queue_unmap(il, txq_id); 2793 2794 /* De-alloc array of command/tx buffers */ 2795 if (txq->cmd) { 2796 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 2797 kfree(txq->cmd[i]); 2798 } 2799 2800 /* De-alloc circular buffer of TFDs */ 2801 if (txq->q.n_bd) 2802 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2803 txq->tfds, txq->q.dma_addr); 2804 2805 /* De-alloc array of per-TFD driver data */ 2806 kfree(txq->skbs); 2807 txq->skbs = NULL; 2808 2809 /* deallocate arrays */ 2810 kfree(txq->cmd); 2811 kfree(txq->meta); 2812 txq->cmd = NULL; 2813 txq->meta = NULL; 2814 2815 /* 0-fill queue descriptor structure */ 2816 memset(txq, 0, sizeof(*txq)); 2817 } 2818 EXPORT_SYMBOL(il_tx_queue_free); 2819 2820 /** 2821 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue 2822 */ 2823 void 2824 il_cmd_queue_unmap(struct il_priv *il) 2825 { 2826 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2827 struct il_queue *q = &txq->q; 2828 int i; 2829 2830 if (q->n_bd == 0) 2831 return; 2832 2833 while (q->read_ptr != q->write_ptr) { 2834 i = il_get_cmd_idx(q, q->read_ptr, 0); 2835 2836 if (txq->meta[i].flags & CMD_MAPPED) { 2837 pci_unmap_single(il->pci_dev, 2838 dma_unmap_addr(&txq->meta[i], mapping), 2839 dma_unmap_len(&txq->meta[i], len), 2840 PCI_DMA_BIDIRECTIONAL); 2841 txq->meta[i].flags = 0; 2842 } 2843 2844 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); 2845 } 2846 2847 i = q->n_win; 2848 if (txq->meta[i].flags & CMD_MAPPED) { 2849 pci_unmap_single(il->pci_dev, 2850 dma_unmap_addr(&txq->meta[i], mapping), 2851 dma_unmap_len(&txq->meta[i], len), 2852 PCI_DMA_BIDIRECTIONAL); 2853 txq->meta[i].flags = 0; 2854 } 2855 } 2856 EXPORT_SYMBOL(il_cmd_queue_unmap); 2857 2858 /** 2859 * il_cmd_queue_free - Deallocate DMA queue. 2860 * @txq: Transmit queue to deallocate. 2861 * 2862 * Empty queue by removing and destroying all BD's. 2863 * Free all buffers. 2864 * 0-fill, but do not free "txq" descriptor structure. 2865 */ 2866 void 2867 il_cmd_queue_free(struct il_priv *il) 2868 { 2869 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 2870 struct device *dev = &il->pci_dev->dev; 2871 int i; 2872 2873 il_cmd_queue_unmap(il); 2874 2875 /* De-alloc array of command/tx buffers */ 2876 if (txq->cmd) { 2877 for (i = 0; i <= TFD_CMD_SLOTS; i++) 2878 kfree(txq->cmd[i]); 2879 } 2880 2881 /* De-alloc circular buffer of TFDs */ 2882 if (txq->q.n_bd) 2883 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, 2884 txq->tfds, txq->q.dma_addr); 2885 2886 /* deallocate arrays */ 2887 kfree(txq->cmd); 2888 kfree(txq->meta); 2889 txq->cmd = NULL; 2890 txq->meta = NULL; 2891 2892 /* 0-fill queue descriptor structure */ 2893 memset(txq, 0, sizeof(*txq)); 2894 } 2895 EXPORT_SYMBOL(il_cmd_queue_free); 2896 2897 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2898 * DMA services 2899 * 2900 * Theory of operation 2901 * 2902 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2903 * of buffer descriptors, each of which points to one or more data buffers for 2904 * the device to read from or fill. Driver and device exchange status of each 2905 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 2906 * entries in each circular buffer, to protect against confusing empty and full 2907 * queue states. 2908 * 2909 * The device reads or writes the data in the queues via the device's several 2910 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 2911 * 2912 * For Tx queue, there are low mark and high mark limits. If, after queuing 2913 * the packet for Tx, free space become < low mark, Tx queue stopped. When 2914 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 2915 * Tx queue resumed. 2916 * 2917 * See more detailed info in 4965.h. 2918 ***************************************************/ 2919 2920 int 2921 il_queue_space(const struct il_queue *q) 2922 { 2923 int s = q->read_ptr - q->write_ptr; 2924 2925 if (q->read_ptr > q->write_ptr) 2926 s -= q->n_bd; 2927 2928 if (s <= 0) 2929 s += q->n_win; 2930 /* keep some reserve to not confuse empty and full situations */ 2931 s -= 2; 2932 if (s < 0) 2933 s = 0; 2934 return s; 2935 } 2936 EXPORT_SYMBOL(il_queue_space); 2937 2938 2939 /** 2940 * il_queue_init - Initialize queue's high/low-water and read/write idxes 2941 */ 2942 static int 2943 il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) 2944 { 2945 /* 2946 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 2947 * il_queue_inc_wrap and il_queue_dec_wrap are broken. 2948 */ 2949 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 2950 /* FIXME: remove q->n_bd */ 2951 q->n_bd = TFD_QUEUE_SIZE_MAX; 2952 2953 q->n_win = slots; 2954 q->id = id; 2955 2956 /* slots_must be power-of-two size, otherwise 2957 * il_get_cmd_idx is broken. */ 2958 BUG_ON(!is_power_of_2(slots)); 2959 2960 q->low_mark = q->n_win / 4; 2961 if (q->low_mark < 4) 2962 q->low_mark = 4; 2963 2964 q->high_mark = q->n_win / 8; 2965 if (q->high_mark < 2) 2966 q->high_mark = 2; 2967 2968 q->write_ptr = q->read_ptr = 0; 2969 2970 return 0; 2971 } 2972 2973 /** 2974 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 2975 */ 2976 static int 2977 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) 2978 { 2979 struct device *dev = &il->pci_dev->dev; 2980 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 2981 2982 /* Driver ilate data, only for Tx (not command) queues, 2983 * not shared with device. */ 2984 if (id != il->cmd_queue) { 2985 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, 2986 sizeof(struct sk_buff *), 2987 GFP_KERNEL); 2988 if (!txq->skbs) { 2989 IL_ERR("Fail to alloc skbs\n"); 2990 goto error; 2991 } 2992 } else 2993 txq->skbs = NULL; 2994 2995 /* Circular buffer of transmit frame descriptors (TFDs), 2996 * shared with device */ 2997 txq->tfds = 2998 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 2999 if (!txq->tfds) 3000 goto error; 3001 3002 txq->q.id = id; 3003 3004 return 0; 3005 3006 error: 3007 kfree(txq->skbs); 3008 txq->skbs = NULL; 3009 3010 return -ENOMEM; 3011 } 3012 3013 /** 3014 * il_tx_queue_init - Allocate and initialize one tx/cmd queue 3015 */ 3016 int 3017 il_tx_queue_init(struct il_priv *il, u32 txq_id) 3018 { 3019 int i, len, ret; 3020 int slots, actual_slots; 3021 struct il_tx_queue *txq = &il->txq[txq_id]; 3022 3023 /* 3024 * Alloc buffer array for commands (Tx or other types of commands). 3025 * For the command queue (#4/#9), allocate command space + one big 3026 * command for scan, since scan command is very huge; the system will 3027 * not have two scans at the same time, so only one is needed. 3028 * For normal Tx queues (all other queues), no super-size command 3029 * space is needed. 3030 */ 3031 if (txq_id == il->cmd_queue) { 3032 slots = TFD_CMD_SLOTS; 3033 actual_slots = slots + 1; 3034 } else { 3035 slots = TFD_TX_CMD_SLOTS; 3036 actual_slots = slots; 3037 } 3038 3039 txq->meta = 3040 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL); 3041 txq->cmd = 3042 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL); 3043 3044 if (!txq->meta || !txq->cmd) 3045 goto out_free_arrays; 3046 3047 len = sizeof(struct il_device_cmd); 3048 for (i = 0; i < actual_slots; i++) { 3049 /* only happens for cmd queue */ 3050 if (i == slots) 3051 len = IL_MAX_CMD_SIZE; 3052 3053 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 3054 if (!txq->cmd[i]) 3055 goto err; 3056 } 3057 3058 /* Alloc driver data array and TFD circular buffer */ 3059 ret = il_tx_queue_alloc(il, txq, txq_id); 3060 if (ret) 3061 goto err; 3062 3063 txq->need_update = 0; 3064 3065 /* 3066 * For the default queues 0-3, set up the swq_id 3067 * already -- all others need to get one later 3068 * (if they need one at all). 3069 */ 3070 if (txq_id < 4) 3071 il_set_swq_id(txq, txq_id, txq_id); 3072 3073 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3074 il_queue_init(il, &txq->q, slots, txq_id); 3075 3076 /* Tell device where to find queue */ 3077 il->ops->txq_init(il, txq); 3078 3079 return 0; 3080 err: 3081 for (i = 0; i < actual_slots; i++) 3082 kfree(txq->cmd[i]); 3083 out_free_arrays: 3084 kfree(txq->meta); 3085 txq->meta = NULL; 3086 kfree(txq->cmd); 3087 txq->cmd = NULL; 3088 3089 return -ENOMEM; 3090 } 3091 EXPORT_SYMBOL(il_tx_queue_init); 3092 3093 void 3094 il_tx_queue_reset(struct il_priv *il, u32 txq_id) 3095 { 3096 int slots, actual_slots; 3097 struct il_tx_queue *txq = &il->txq[txq_id]; 3098 3099 if (txq_id == il->cmd_queue) { 3100 slots = TFD_CMD_SLOTS; 3101 actual_slots = TFD_CMD_SLOTS + 1; 3102 } else { 3103 slots = TFD_TX_CMD_SLOTS; 3104 actual_slots = TFD_TX_CMD_SLOTS; 3105 } 3106 3107 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); 3108 txq->need_update = 0; 3109 3110 /* Initialize queue's high/low-water marks, and head/tail idxes */ 3111 il_queue_init(il, &txq->q, slots, txq_id); 3112 3113 /* Tell device where to find queue */ 3114 il->ops->txq_init(il, txq); 3115 } 3116 EXPORT_SYMBOL(il_tx_queue_reset); 3117 3118 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 3119 3120 /** 3121 * il_enqueue_hcmd - enqueue a uCode command 3122 * @il: device ilate data point 3123 * @cmd: a point to the ucode command structure 3124 * 3125 * The function returns < 0 values to indicate the operation is 3126 * failed. On success, it turns the idx (> 0) of command in the 3127 * command queue. 3128 */ 3129 int 3130 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) 3131 { 3132 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3133 struct il_queue *q = &txq->q; 3134 struct il_device_cmd *out_cmd; 3135 struct il_cmd_meta *out_meta; 3136 dma_addr_t phys_addr; 3137 unsigned long flags; 3138 int len; 3139 u32 idx; 3140 u16 fix_size; 3141 3142 cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); 3143 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); 3144 3145 /* If any of the command structures end up being larger than 3146 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 3147 * we will need to increase the size of the TFD entries 3148 * Also, check to see if command buffer should not exceed the size 3149 * of device_cmd and max_cmd_size. */ 3150 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 3151 !(cmd->flags & CMD_SIZE_HUGE)); 3152 BUG_ON(fix_size > IL_MAX_CMD_SIZE); 3153 3154 if (il_is_rfkill(il) || il_is_ctkill(il)) { 3155 IL_WARN("Not sending command - %s KILL\n", 3156 il_is_rfkill(il) ? "RF" : "CT"); 3157 return -EIO; 3158 } 3159 3160 spin_lock_irqsave(&il->hcmd_lock, flags); 3161 3162 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 3163 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3164 3165 IL_ERR("Restarting adapter due to command queue full\n"); 3166 queue_work(il->workqueue, &il->restart); 3167 return -ENOSPC; 3168 } 3169 3170 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 3171 out_cmd = txq->cmd[idx]; 3172 out_meta = &txq->meta[idx]; 3173 3174 if (WARN_ON(out_meta->flags & CMD_MAPPED)) { 3175 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3176 return -ENOSPC; 3177 } 3178 3179 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 3180 out_meta->flags = cmd->flags | CMD_MAPPED; 3181 if (cmd->flags & CMD_WANT_SKB) 3182 out_meta->source = cmd; 3183 if (cmd->flags & CMD_ASYNC) 3184 out_meta->callback = cmd->callback; 3185 3186 out_cmd->hdr.cmd = cmd->id; 3187 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 3188 3189 /* At this point, the out_cmd now has all of the incoming cmd 3190 * information */ 3191 3192 out_cmd->hdr.flags = 0; 3193 out_cmd->hdr.sequence = 3194 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); 3195 if (cmd->flags & CMD_SIZE_HUGE) 3196 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 3197 len = sizeof(struct il_device_cmd); 3198 if (idx == TFD_CMD_SLOTS) 3199 len = IL_MAX_CMD_SIZE; 3200 3201 #ifdef CONFIG_IWLEGACY_DEBUG 3202 switch (out_cmd->hdr.cmd) { 3203 case C_TX_LINK_QUALITY_CMD: 3204 case C_SENSITIVITY: 3205 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " 3206 "%d bytes at %d[%d]:%d\n", 3207 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3208 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 3209 q->write_ptr, idx, il->cmd_queue); 3210 break; 3211 default: 3212 D_HC("Sending command %s (#%x), seq: 0x%04X, " 3213 "%d bytes at %d[%d]:%d\n", 3214 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, 3215 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, 3216 idx, il->cmd_queue); 3217 } 3218 #endif 3219 3220 phys_addr = 3221 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, 3222 PCI_DMA_BIDIRECTIONAL); 3223 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { 3224 idx = -ENOMEM; 3225 goto out; 3226 } 3227 dma_unmap_addr_set(out_meta, mapping, phys_addr); 3228 dma_unmap_len_set(out_meta, len, fix_size); 3229 3230 txq->need_update = 1; 3231 3232 if (il->ops->txq_update_byte_cnt_tbl) 3233 /* Set up entry in queue's byte count circular buffer */ 3234 il->ops->txq_update_byte_cnt_tbl(il, txq, 0); 3235 3236 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, 3237 U32_PAD(cmd->len)); 3238 3239 /* Increment and update queue's write idx */ 3240 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 3241 il_txq_update_write_ptr(il, txq); 3242 3243 out: 3244 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3245 return idx; 3246 } 3247 3248 /** 3249 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 3250 * 3251 * When FW advances 'R' idx, all entries between old and new 'R' idx 3252 * need to be reclaimed. As result, some free space forms. If there is 3253 * enough free space (> low mark), wake the stack that feeds us. 3254 */ 3255 static void 3256 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) 3257 { 3258 struct il_tx_queue *txq = &il->txq[txq_id]; 3259 struct il_queue *q = &txq->q; 3260 int nfreed = 0; 3261 3262 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { 3263 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " 3264 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, 3265 q->write_ptr, q->read_ptr); 3266 return; 3267 } 3268 3269 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 3270 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { 3271 3272 if (nfreed++ > 0) { 3273 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx, 3274 q->write_ptr, q->read_ptr); 3275 queue_work(il->workqueue, &il->restart); 3276 } 3277 3278 } 3279 } 3280 3281 /** 3282 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3283 * @rxb: Rx buffer to reclaim 3284 * 3285 * If an Rx buffer has an async callback associated with it the callback 3286 * will be executed. The attached skb (if present) will only be freed 3287 * if the callback returns 1 3288 */ 3289 void 3290 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) 3291 { 3292 struct il_rx_pkt *pkt = rxb_addr(rxb); 3293 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 3294 int txq_id = SEQ_TO_QUEUE(sequence); 3295 int idx = SEQ_TO_IDX(sequence); 3296 int cmd_idx; 3297 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 3298 struct il_device_cmd *cmd; 3299 struct il_cmd_meta *meta; 3300 struct il_tx_queue *txq = &il->txq[il->cmd_queue]; 3301 unsigned long flags; 3302 3303 /* If a Tx command is being handled and it isn't in the actual 3304 * command queue then there a command routing bug has been introduced 3305 * in the queue management code. */ 3306 if (WARN 3307 (txq_id != il->cmd_queue, 3308 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 3309 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, 3310 il->txq[il->cmd_queue].q.write_ptr)) { 3311 il_print_hex_error(il, pkt, 32); 3312 return; 3313 } 3314 3315 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); 3316 cmd = txq->cmd[cmd_idx]; 3317 meta = &txq->meta[cmd_idx]; 3318 3319 txq->time_stamp = jiffies; 3320 3321 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), 3322 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); 3323 3324 /* Input error checking is done when commands are added to queue. */ 3325 if (meta->flags & CMD_WANT_SKB) { 3326 meta->source->reply_page = (unsigned long)rxb_addr(rxb); 3327 rxb->page = NULL; 3328 } else if (meta->callback) 3329 meta->callback(il, cmd, pkt); 3330 3331 spin_lock_irqsave(&il->hcmd_lock, flags); 3332 3333 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); 3334 3335 if (!(meta->flags & CMD_ASYNC)) { 3336 clear_bit(S_HCMD_ACTIVE, &il->status); 3337 D_INFO("Clearing HCMD_ACTIVE for command %s\n", 3338 il_get_cmd_string(cmd->hdr.cmd)); 3339 wake_up(&il->wait_command_queue); 3340 } 3341 3342 /* Mark as unmapped */ 3343 meta->flags = 0; 3344 3345 spin_unlock_irqrestore(&il->hcmd_lock, flags); 3346 } 3347 EXPORT_SYMBOL(il_tx_cmd_complete); 3348 3349 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); 3350 MODULE_VERSION(IWLWIFI_VERSION); 3351 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 3352 MODULE_LICENSE("GPL"); 3353 3354 /* 3355 * set bt_coex_active to true, uCode will do kill/defer 3356 * every time the priority line is asserted (BT is sending signals on the 3357 * priority line in the PCIx). 3358 * set bt_coex_active to false, uCode will ignore the BT activity and 3359 * perform the normal operation 3360 * 3361 * User might experience transmit issue on some platform due to WiFi/BT 3362 * co-exist problem. The possible behaviors are: 3363 * Able to scan and finding all the available AP 3364 * Not able to associate with any AP 3365 * On those platforms, WiFi communication can be restored by set 3366 * "bt_coex_active" module parameter to "false" 3367 * 3368 * default: bt_coex_active = true (BT_COEX_ENABLE) 3369 */ 3370 static bool bt_coex_active = true; 3371 module_param(bt_coex_active, bool, S_IRUGO); 3372 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 3373 3374 u32 il_debug_level; 3375 EXPORT_SYMBOL(il_debug_level); 3376 3377 const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3378 EXPORT_SYMBOL(il_bcast_addr); 3379 3380 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 3381 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 3382 static void 3383 il_init_ht_hw_capab(const struct il_priv *il, 3384 struct ieee80211_sta_ht_cap *ht_info, 3385 enum nl80211_band band) 3386 { 3387 u16 max_bit_rate = 0; 3388 u8 rx_chains_num = il->hw_params.rx_chains_num; 3389 u8 tx_chains_num = il->hw_params.tx_chains_num; 3390 3391 ht_info->cap = 0; 3392 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 3393 3394 ht_info->ht_supported = true; 3395 3396 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 3397 max_bit_rate = MAX_BIT_RATE_20_MHZ; 3398 if (il->hw_params.ht40_channel & BIT(band)) { 3399 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3400 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 3401 ht_info->mcs.rx_mask[4] = 0x01; 3402 max_bit_rate = MAX_BIT_RATE_40_MHZ; 3403 } 3404 3405 if (il->cfg->mod_params->amsdu_size_8K) 3406 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 3407 3408 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3409 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3410 3411 ht_info->mcs.rx_mask[0] = 0xFF; 3412 if (rx_chains_num >= 2) 3413 ht_info->mcs.rx_mask[1] = 0xFF; 3414 if (rx_chains_num >= 3) 3415 ht_info->mcs.rx_mask[2] = 0xFF; 3416 3417 /* Highest supported Rx data rate */ 3418 max_bit_rate *= rx_chains_num; 3419 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); 3420 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); 3421 3422 /* Tx MCS capabilities */ 3423 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 3424 if (tx_chains_num != rx_chains_num) { 3425 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 3426 ht_info->mcs.tx_params |= 3427 ((tx_chains_num - 3428 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3429 } 3430 } 3431 3432 /** 3433 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom 3434 */ 3435 int 3436 il_init_geos(struct il_priv *il) 3437 { 3438 struct il_channel_info *ch; 3439 struct ieee80211_supported_band *sband; 3440 struct ieee80211_channel *channels; 3441 struct ieee80211_channel *geo_ch; 3442 struct ieee80211_rate *rates; 3443 int i = 0; 3444 s8 max_tx_power = 0; 3445 3446 if (il->bands[NL80211_BAND_2GHZ].n_bitrates || 3447 il->bands[NL80211_BAND_5GHZ].n_bitrates) { 3448 D_INFO("Geography modes already initialized.\n"); 3449 set_bit(S_GEO_CONFIGURED, &il->status); 3450 return 0; 3451 } 3452 3453 channels = 3454 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count, 3455 GFP_KERNEL); 3456 if (!channels) 3457 return -ENOMEM; 3458 3459 rates = 3460 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY), 3461 GFP_KERNEL); 3462 if (!rates) { 3463 kfree(channels); 3464 return -ENOMEM; 3465 } 3466 3467 /* 5.2GHz channels start after the 2.4GHz channels */ 3468 sband = &il->bands[NL80211_BAND_5GHZ]; 3469 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; 3470 /* just OFDM */ 3471 sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; 3472 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; 3473 3474 if (il->cfg->sku & IL_SKU_N) 3475 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ); 3476 3477 sband = &il->bands[NL80211_BAND_2GHZ]; 3478 sband->channels = channels; 3479 /* OFDM & CCK */ 3480 sband->bitrates = rates; 3481 sband->n_bitrates = RATE_COUNT_LEGACY; 3482 3483 if (il->cfg->sku & IL_SKU_N) 3484 il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ); 3485 3486 il->ieee_channels = channels; 3487 il->ieee_rates = rates; 3488 3489 for (i = 0; i < il->channel_count; i++) { 3490 ch = &il->channel_info[i]; 3491 3492 if (!il_is_channel_valid(ch)) 3493 continue; 3494 3495 sband = &il->bands[ch->band]; 3496 3497 geo_ch = &sband->channels[sband->n_channels++]; 3498 3499 geo_ch->center_freq = 3500 ieee80211_channel_to_frequency(ch->channel, ch->band); 3501 geo_ch->max_power = ch->max_power_avg; 3502 geo_ch->max_antenna_gain = 0xff; 3503 geo_ch->hw_value = ch->channel; 3504 3505 if (il_is_channel_valid(ch)) { 3506 if (!(ch->flags & EEPROM_CHANNEL_IBSS)) 3507 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3508 3509 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) 3510 geo_ch->flags |= IEEE80211_CHAN_NO_IR; 3511 3512 if (ch->flags & EEPROM_CHANNEL_RADAR) 3513 geo_ch->flags |= IEEE80211_CHAN_RADAR; 3514 3515 geo_ch->flags |= ch->ht40_extension_channel; 3516 3517 if (ch->max_power_avg > max_tx_power) 3518 max_tx_power = ch->max_power_avg; 3519 } else { 3520 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 3521 } 3522 3523 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel, 3524 geo_ch->center_freq, 3525 il_is_channel_a_band(ch) ? "5.2" : "2.4", 3526 geo_ch-> 3527 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid", 3528 geo_ch->flags); 3529 } 3530 3531 il->tx_power_device_lmt = max_tx_power; 3532 il->tx_power_user_lmt = max_tx_power; 3533 il->tx_power_next = max_tx_power; 3534 3535 if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && 3536 (il->cfg->sku & IL_SKU_A)) { 3537 IL_INFO("Incorrectly detected BG card as ABG. " 3538 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", 3539 il->pci_dev->device, il->pci_dev->subsystem_device); 3540 il->cfg->sku &= ~IL_SKU_A; 3541 } 3542 3543 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n", 3544 il->bands[NL80211_BAND_2GHZ].n_channels, 3545 il->bands[NL80211_BAND_5GHZ].n_channels); 3546 3547 set_bit(S_GEO_CONFIGURED, &il->status); 3548 3549 return 0; 3550 } 3551 EXPORT_SYMBOL(il_init_geos); 3552 3553 /* 3554 * il_free_geos - undo allocations in il_init_geos 3555 */ 3556 void 3557 il_free_geos(struct il_priv *il) 3558 { 3559 kfree(il->ieee_channels); 3560 kfree(il->ieee_rates); 3561 clear_bit(S_GEO_CONFIGURED, &il->status); 3562 } 3563 EXPORT_SYMBOL(il_free_geos); 3564 3565 static bool 3566 il_is_channel_extension(struct il_priv *il, enum nl80211_band band, 3567 u16 channel, u8 extension_chan_offset) 3568 { 3569 const struct il_channel_info *ch_info; 3570 3571 ch_info = il_get_channel_info(il, band, channel); 3572 if (!il_is_channel_valid(ch_info)) 3573 return false; 3574 3575 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 3576 return !(ch_info-> 3577 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); 3578 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) 3579 return !(ch_info-> 3580 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); 3581 3582 return false; 3583 } 3584 3585 bool 3586 il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) 3587 { 3588 if (!il->ht.enabled || !il->ht.is_40mhz) 3589 return false; 3590 3591 /* 3592 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 3593 * the bit will not set if it is pure 40MHz case 3594 */ 3595 if (ht_cap && !ht_cap->ht_supported) 3596 return false; 3597 3598 #ifdef CONFIG_IWLEGACY_DEBUGFS 3599 if (il->disable_ht40) 3600 return false; 3601 #endif 3602 3603 return il_is_channel_extension(il, il->band, 3604 le16_to_cpu(il->staging.channel), 3605 il->ht.extension_chan_offset); 3606 } 3607 EXPORT_SYMBOL(il_is_ht40_tx_allowed); 3608 3609 static u16 noinline 3610 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 3611 { 3612 u16 new_val; 3613 u16 beacon_factor; 3614 3615 /* 3616 * If mac80211 hasn't given us a beacon interval, program 3617 * the default into the device. 3618 */ 3619 if (!beacon_val) 3620 return DEFAULT_BEACON_INTERVAL; 3621 3622 /* 3623 * If the beacon interval we obtained from the peer 3624 * is too large, we'll have to wake up more often 3625 * (and in IBSS case, we'll beacon too much) 3626 * 3627 * For example, if max_beacon_val is 4096, and the 3628 * requested beacon interval is 7000, we'll have to 3629 * use 3500 to be able to wake up on the beacons. 3630 * 3631 * This could badly influence beacon detection stats. 3632 */ 3633 3634 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 3635 new_val = beacon_val / beacon_factor; 3636 3637 if (!new_val) 3638 new_val = max_beacon_val; 3639 3640 return new_val; 3641 } 3642 3643 int 3644 il_send_rxon_timing(struct il_priv *il) 3645 { 3646 u64 tsf; 3647 s32 interval_tm, rem; 3648 struct ieee80211_conf *conf = NULL; 3649 u16 beacon_int; 3650 struct ieee80211_vif *vif = il->vif; 3651 3652 conf = &il->hw->conf; 3653 3654 lockdep_assert_held(&il->mutex); 3655 3656 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); 3657 3658 il->timing.timestamp = cpu_to_le64(il->timestamp); 3659 il->timing.listen_interval = cpu_to_le16(conf->listen_interval); 3660 3661 beacon_int = vif ? vif->bss_conf.beacon_int : 0; 3662 3663 /* 3664 * TODO: For IBSS we need to get atim_win from mac80211, 3665 * for now just always use 0 3666 */ 3667 il->timing.atim_win = 0; 3668 3669 beacon_int = 3670 il_adjust_beacon_interval(beacon_int, 3671 il->hw_params.max_beacon_itrvl * 3672 TIME_UNIT); 3673 il->timing.beacon_interval = cpu_to_le16(beacon_int); 3674 3675 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ 3676 interval_tm = beacon_int * TIME_UNIT; 3677 rem = do_div(tsf, interval_tm); 3678 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 3679 3680 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; 3681 3682 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n", 3683 le16_to_cpu(il->timing.beacon_interval), 3684 le32_to_cpu(il->timing.beacon_init_val), 3685 le16_to_cpu(il->timing.atim_win)); 3686 3687 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), 3688 &il->timing); 3689 } 3690 EXPORT_SYMBOL(il_send_rxon_timing); 3691 3692 void 3693 il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) 3694 { 3695 struct il_rxon_cmd *rxon = &il->staging; 3696 3697 if (hw_decrypt) 3698 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 3699 else 3700 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; 3701 3702 } 3703 EXPORT_SYMBOL(il_set_rxon_hwcrypto); 3704 3705 /* validate RXON structure is valid */ 3706 int 3707 il_check_rxon_cmd(struct il_priv *il) 3708 { 3709 struct il_rxon_cmd *rxon = &il->staging; 3710 bool error = false; 3711 3712 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 3713 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 3714 IL_WARN("check 2.4G: wrong narrow\n"); 3715 error = true; 3716 } 3717 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 3718 IL_WARN("check 2.4G: wrong radar\n"); 3719 error = true; 3720 } 3721 } else { 3722 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 3723 IL_WARN("check 5.2G: not short slot!\n"); 3724 error = true; 3725 } 3726 if (rxon->flags & RXON_FLG_CCK_MSK) { 3727 IL_WARN("check 5.2G: CCK!\n"); 3728 error = true; 3729 } 3730 } 3731 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 3732 IL_WARN("mac/bssid mcast!\n"); 3733 error = true; 3734 } 3735 3736 /* make sure basic rates 6Mbps and 1Mbps are supported */ 3737 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && 3738 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { 3739 IL_WARN("neither 1 nor 6 are basic\n"); 3740 error = true; 3741 } 3742 3743 if (le16_to_cpu(rxon->assoc_id) > 2007) { 3744 IL_WARN("aid > 2007\n"); 3745 error = true; 3746 } 3747 3748 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == 3749 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 3750 IL_WARN("CCK and short slot\n"); 3751 error = true; 3752 } 3753 3754 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == 3755 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 3756 IL_WARN("CCK and auto detect"); 3757 error = true; 3758 } 3759 3760 if ((rxon-> 3761 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == 3762 RXON_FLG_TGG_PROTECT_MSK) { 3763 IL_WARN("TGg but no auto-detect\n"); 3764 error = true; 3765 } 3766 3767 if (error) 3768 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel)); 3769 3770 if (error) { 3771 IL_ERR("Invalid RXON\n"); 3772 return -EINVAL; 3773 } 3774 return 0; 3775 } 3776 EXPORT_SYMBOL(il_check_rxon_cmd); 3777 3778 /** 3779 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 3780 * @il: staging_rxon is compared to active_rxon 3781 * 3782 * If the RXON structure is changing enough to require a new tune, 3783 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 3784 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 3785 */ 3786 int 3787 il_full_rxon_required(struct il_priv *il) 3788 { 3789 const struct il_rxon_cmd *staging = &il->staging; 3790 const struct il_rxon_cmd *active = &il->active; 3791 3792 #define CHK(cond) \ 3793 if ((cond)) { \ 3794 D_INFO("need full RXON - " #cond "\n"); \ 3795 return 1; \ 3796 } 3797 3798 #define CHK_NEQ(c1, c2) \ 3799 if ((c1) != (c2)) { \ 3800 D_INFO("need full RXON - " \ 3801 #c1 " != " #c2 " - %d != %d\n", \ 3802 (c1), (c2)); \ 3803 return 1; \ 3804 } 3805 3806 /* These items are only settable from the full RXON command */ 3807 CHK(!il_is_associated(il)); 3808 CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); 3809 CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); 3810 CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, 3811 active->wlap_bssid_addr)); 3812 CHK_NEQ(staging->dev_type, active->dev_type); 3813 CHK_NEQ(staging->channel, active->channel); 3814 CHK_NEQ(staging->air_propagation, active->air_propagation); 3815 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, 3816 active->ofdm_ht_single_stream_basic_rates); 3817 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, 3818 active->ofdm_ht_dual_stream_basic_rates); 3819 CHK_NEQ(staging->assoc_id, active->assoc_id); 3820 3821 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 3822 * be updated with the RXON_ASSOC command -- however only some 3823 * flag transitions are allowed using RXON_ASSOC */ 3824 3825 /* Check if we are not switching bands */ 3826 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, 3827 active->flags & RXON_FLG_BAND_24G_MSK); 3828 3829 /* Check if we are switching association toggle */ 3830 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, 3831 active->filter_flags & RXON_FILTER_ASSOC_MSK); 3832 3833 #undef CHK 3834 #undef CHK_NEQ 3835 3836 return 0; 3837 } 3838 EXPORT_SYMBOL(il_full_rxon_required); 3839 3840 u8 3841 il_get_lowest_plcp(struct il_priv *il) 3842 { 3843 /* 3844 * Assign the lowest rate -- should really get this from 3845 * the beacon skb from mac80211. 3846 */ 3847 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) 3848 return RATE_1M_PLCP; 3849 else 3850 return RATE_6M_PLCP; 3851 } 3852 EXPORT_SYMBOL(il_get_lowest_plcp); 3853 3854 static void 3855 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3856 { 3857 struct il_rxon_cmd *rxon = &il->staging; 3858 3859 if (!il->ht.enabled) { 3860 rxon->flags &= 3861 ~(RXON_FLG_CHANNEL_MODE_MSK | 3862 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK 3863 | RXON_FLG_HT_PROT_MSK); 3864 return; 3865 } 3866 3867 rxon->flags |= 3868 cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); 3869 3870 /* Set up channel bandwidth: 3871 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 3872 /* clear the HT channel mode before set the mode */ 3873 rxon->flags &= 3874 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3875 if (il_is_ht40_tx_allowed(il, NULL)) { 3876 /* pure ht40 */ 3877 if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 3878 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 3879 /* Note: control channel is opposite of extension channel */ 3880 switch (il->ht.extension_chan_offset) { 3881 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3882 rxon->flags &= 3883 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3884 break; 3885 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3886 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3887 break; 3888 } 3889 } else { 3890 /* Note: control channel is opposite of extension channel */ 3891 switch (il->ht.extension_chan_offset) { 3892 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 3893 rxon->flags &= 3894 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 3895 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3896 break; 3897 case IEEE80211_HT_PARAM_CHA_SEC_BELOW: 3898 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 3899 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 3900 break; 3901 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 3902 default: 3903 /* channel location only valid if in Mixed mode */ 3904 IL_ERR("invalid extension channel offset\n"); 3905 break; 3906 } 3907 } 3908 } else { 3909 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; 3910 } 3911 3912 if (il->ops->set_rxon_chain) 3913 il->ops->set_rxon_chain(il); 3914 3915 D_ASSOC("rxon flags 0x%X operation mode :0x%X " 3916 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), 3917 il->ht.protection, il->ht.extension_chan_offset); 3918 } 3919 3920 void 3921 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) 3922 { 3923 _il_set_rxon_ht(il, ht_conf); 3924 } 3925 EXPORT_SYMBOL(il_set_rxon_ht); 3926 3927 /* Return valid, unused, channel for a passive scan to reset the RF */ 3928 u8 3929 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) 3930 { 3931 const struct il_channel_info *ch_info; 3932 int i; 3933 u8 channel = 0; 3934 u8 min, max; 3935 3936 if (band == NL80211_BAND_5GHZ) { 3937 min = 14; 3938 max = il->channel_count; 3939 } else { 3940 min = 0; 3941 max = 14; 3942 } 3943 3944 for (i = min; i < max; i++) { 3945 channel = il->channel_info[i].channel; 3946 if (channel == le16_to_cpu(il->staging.channel)) 3947 continue; 3948 3949 ch_info = il_get_channel_info(il, band, channel); 3950 if (il_is_channel_valid(ch_info)) 3951 break; 3952 } 3953 3954 return channel; 3955 } 3956 EXPORT_SYMBOL(il_get_single_channel_number); 3957 3958 /** 3959 * il_set_rxon_channel - Set the band and channel values in staging RXON 3960 * @ch: requested channel as a pointer to struct ieee80211_channel 3961 3962 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 3963 * in the staging RXON flag structure based on the ch->band 3964 */ 3965 int 3966 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) 3967 { 3968 enum nl80211_band band = ch->band; 3969 u16 channel = ch->hw_value; 3970 3971 if (le16_to_cpu(il->staging.channel) == channel && il->band == band) 3972 return 0; 3973 3974 il->staging.channel = cpu_to_le16(channel); 3975 if (band == NL80211_BAND_5GHZ) 3976 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; 3977 else 3978 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 3979 3980 il->band = band; 3981 3982 D_INFO("Staging channel set to %d [%d]\n", channel, band); 3983 3984 return 0; 3985 } 3986 EXPORT_SYMBOL(il_set_rxon_channel); 3987 3988 void 3989 il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, 3990 struct ieee80211_vif *vif) 3991 { 3992 if (band == NL80211_BAND_5GHZ) { 3993 il->staging.flags &= 3994 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 3995 RXON_FLG_CCK_MSK); 3996 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3997 } else { 3998 /* Copied from il_post_associate() */ 3999 if (vif && vif->bss_conf.use_short_slot) 4000 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 4001 else 4002 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 4003 4004 il->staging.flags |= RXON_FLG_BAND_24G_MSK; 4005 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; 4006 il->staging.flags &= ~RXON_FLG_CCK_MSK; 4007 } 4008 } 4009 EXPORT_SYMBOL(il_set_flags_for_band); 4010 4011 /* 4012 * initialize rxon structure with default values from eeprom 4013 */ 4014 void 4015 il_connection_init_rx_config(struct il_priv *il) 4016 { 4017 const struct il_channel_info *ch_info; 4018 4019 memset(&il->staging, 0, sizeof(il->staging)); 4020 4021 switch (il->iw_mode) { 4022 case NL80211_IFTYPE_UNSPECIFIED: 4023 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4024 break; 4025 case NL80211_IFTYPE_STATION: 4026 il->staging.dev_type = RXON_DEV_TYPE_ESS; 4027 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 4028 break; 4029 case NL80211_IFTYPE_ADHOC: 4030 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 4031 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 4032 il->staging.filter_flags = 4033 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 4034 break; 4035 default: 4036 IL_ERR("Unsupported interface type %d\n", il->vif->type); 4037 return; 4038 } 4039 4040 #if 0 4041 /* TODO: Figure out when short_preamble would be set and cache from 4042 * that */ 4043 if (!hw_to_local(il->hw)->short_preamble) 4044 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 4045 else 4046 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 4047 #endif 4048 4049 ch_info = 4050 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); 4051 4052 if (!ch_info) 4053 ch_info = &il->channel_info[0]; 4054 4055 il->staging.channel = cpu_to_le16(ch_info->channel); 4056 il->band = ch_info->band; 4057 4058 il_set_flags_for_band(il, il->band, il->vif); 4059 4060 il->staging.ofdm_basic_rates = 4061 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4062 il->staging.cck_basic_rates = 4063 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4064 4065 /* clear both MIX and PURE40 mode flag */ 4066 il->staging.flags &= 4067 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); 4068 if (il->vif) 4069 memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); 4070 4071 il->staging.ofdm_ht_single_stream_basic_rates = 0xff; 4072 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; 4073 } 4074 EXPORT_SYMBOL(il_connection_init_rx_config); 4075 4076 void 4077 il_set_rate(struct il_priv *il) 4078 { 4079 const struct ieee80211_supported_band *hw = NULL; 4080 struct ieee80211_rate *rate; 4081 int i; 4082 4083 hw = il_get_hw_mode(il, il->band); 4084 if (!hw) { 4085 IL_ERR("Failed to set rate: unable to get hw mode\n"); 4086 return; 4087 } 4088 4089 il->active_rate = 0; 4090 4091 for (i = 0; i < hw->n_bitrates; i++) { 4092 rate = &(hw->bitrates[i]); 4093 if (rate->hw_value < RATE_COUNT_LEGACY) 4094 il->active_rate |= (1 << rate->hw_value); 4095 } 4096 4097 D_RATE("Set active_rate = %0x\n", il->active_rate); 4098 4099 il->staging.cck_basic_rates = 4100 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; 4101 4102 il->staging.ofdm_basic_rates = 4103 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; 4104 } 4105 EXPORT_SYMBOL(il_set_rate); 4106 4107 void 4108 il_chswitch_done(struct il_priv *il, bool is_success) 4109 { 4110 if (test_bit(S_EXIT_PENDING, &il->status)) 4111 return; 4112 4113 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4114 ieee80211_chswitch_done(il->vif, is_success); 4115 } 4116 EXPORT_SYMBOL(il_chswitch_done); 4117 4118 void 4119 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) 4120 { 4121 struct il_rx_pkt *pkt = rxb_addr(rxb); 4122 struct il_csa_notification *csa = &(pkt->u.csa_notif); 4123 struct il_rxon_cmd *rxon = (void *)&il->active; 4124 4125 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) 4126 return; 4127 4128 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { 4129 rxon->channel = csa->channel; 4130 il->staging.channel = csa->channel; 4131 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel)); 4132 il_chswitch_done(il, true); 4133 } else { 4134 IL_ERR("CSA notif (fail) : channel %d\n", 4135 le16_to_cpu(csa->channel)); 4136 il_chswitch_done(il, false); 4137 } 4138 } 4139 EXPORT_SYMBOL(il_hdl_csa); 4140 4141 #ifdef CONFIG_IWLEGACY_DEBUG 4142 void 4143 il_print_rx_config_cmd(struct il_priv *il) 4144 { 4145 struct il_rxon_cmd *rxon = &il->staging; 4146 4147 D_RADIO("RX CONFIG:\n"); 4148 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4149 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4150 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4151 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); 4152 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); 4153 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); 4154 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 4155 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr); 4156 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 4157 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 4158 } 4159 EXPORT_SYMBOL(il_print_rx_config_cmd); 4160 #endif 4161 /** 4162 * il_irq_handle_error - called for HW or SW error interrupt from card 4163 */ 4164 void 4165 il_irq_handle_error(struct il_priv *il) 4166 { 4167 /* Set the FW error flag -- cleared on il_down */ 4168 set_bit(S_FW_ERROR, &il->status); 4169 4170 /* Cancel currently queued command. */ 4171 clear_bit(S_HCMD_ACTIVE, &il->status); 4172 4173 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version); 4174 4175 il->ops->dump_nic_error_log(il); 4176 if (il->ops->dump_fh) 4177 il->ops->dump_fh(il, NULL, false); 4178 #ifdef CONFIG_IWLEGACY_DEBUG 4179 if (il_get_debug_level(il) & IL_DL_FW_ERRORS) 4180 il_print_rx_config_cmd(il); 4181 #endif 4182 4183 wake_up(&il->wait_command_queue); 4184 4185 /* Keep the restart process from trying to send host 4186 * commands by clearing the INIT status bit */ 4187 clear_bit(S_READY, &il->status); 4188 4189 if (!test_bit(S_EXIT_PENDING, &il->status)) { 4190 IL_DBG(IL_DL_FW_ERRORS, 4191 "Restarting adapter due to uCode error.\n"); 4192 4193 if (il->cfg->mod_params->restart_fw) 4194 queue_work(il->workqueue, &il->restart); 4195 } 4196 } 4197 EXPORT_SYMBOL(il_irq_handle_error); 4198 4199 static int 4200 _il_apm_stop_master(struct il_priv *il) 4201 { 4202 int ret = 0; 4203 4204 /* stop device's busmaster DMA activity */ 4205 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 4206 4207 ret = 4208 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 4209 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 4210 if (ret < 0) 4211 IL_WARN("Master Disable Timed Out, 100 usec\n"); 4212 4213 D_INFO("stop master\n"); 4214 4215 return ret; 4216 } 4217 4218 void 4219 _il_apm_stop(struct il_priv *il) 4220 { 4221 lockdep_assert_held(&il->reg_lock); 4222 4223 D_INFO("Stop card, put in low power state\n"); 4224 4225 /* Stop device's DMA activity */ 4226 _il_apm_stop_master(il); 4227 4228 /* Reset the entire device */ 4229 _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 4230 4231 udelay(10); 4232 4233 /* 4234 * Clear "initialization complete" bit to move adapter from 4235 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 4236 */ 4237 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4238 } 4239 EXPORT_SYMBOL(_il_apm_stop); 4240 4241 void 4242 il_apm_stop(struct il_priv *il) 4243 { 4244 unsigned long flags; 4245 4246 spin_lock_irqsave(&il->reg_lock, flags); 4247 _il_apm_stop(il); 4248 spin_unlock_irqrestore(&il->reg_lock, flags); 4249 } 4250 EXPORT_SYMBOL(il_apm_stop); 4251 4252 /* 4253 * Start up NIC's basic functionality after it has been reset 4254 * (e.g. after platform boot, or shutdown via il_apm_stop()) 4255 * NOTE: This does not load uCode nor start the embedded processor 4256 */ 4257 int 4258 il_apm_init(struct il_priv *il) 4259 { 4260 int ret = 0; 4261 u16 lctl; 4262 4263 D_INFO("Init card's basic functions\n"); 4264 4265 /* 4266 * Use "set_bit" below rather than "write", to preserve any hardware 4267 * bits already set by default after reset. 4268 */ 4269 4270 /* Disable L0S exit timer (platform NMI Work/Around) */ 4271 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4272 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 4273 4274 /* 4275 * Disable L0s without affecting L1; 4276 * don't wait for ICH L0s (ICH bug W/A) 4277 */ 4278 il_set_bit(il, CSR_GIO_CHICKEN_BITS, 4279 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 4280 4281 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 4282 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 4283 4284 /* 4285 * Enable HAP INTA (interrupt from management bus) to 4286 * wake device's PCI Express link L1a -> L0s 4287 * NOTE: This is no-op for 3945 (non-existent bit) 4288 */ 4289 il_set_bit(il, CSR_HW_IF_CONFIG_REG, 4290 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 4291 4292 /* 4293 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. 4294 * Check if BIOS (or OS) enabled L1-ASPM on this device. 4295 * If so (likely), disable L0S, so device moves directly L0->L1; 4296 * costs negligible amount of power savings. 4297 * If not (unlikely), enable L0S, so there is at least some 4298 * power savings, even without L1. 4299 */ 4300 if (il->cfg->set_l0s) { 4301 pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); 4302 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 4303 /* L1-ASPM enabled; disable(!) L0S */ 4304 il_set_bit(il, CSR_GIO_REG, 4305 CSR_GIO_REG_VAL_L0S_ENABLED); 4306 D_POWER("L1 Enabled; Disabling L0S\n"); 4307 } else { 4308 /* L1-ASPM disabled; enable(!) L0S */ 4309 il_clear_bit(il, CSR_GIO_REG, 4310 CSR_GIO_REG_VAL_L0S_ENABLED); 4311 D_POWER("L1 Disabled; Enabling L0S\n"); 4312 } 4313 } 4314 4315 /* Configure analog phase-lock-loop before activating to D0A */ 4316 if (il->cfg->pll_cfg_val) 4317 il_set_bit(il, CSR_ANA_PLL_CFG, 4318 il->cfg->pll_cfg_val); 4319 4320 /* 4321 * Set "initialization complete" bit to move adapter from 4322 * D0U* --> D0A* (powered-up active) state. 4323 */ 4324 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 4325 4326 /* 4327 * Wait for clock stabilization; once stabilized, access to 4328 * device-internal resources is supported, e.g. il_wr_prph() 4329 * and accesses to uCode SRAM. 4330 */ 4331 ret = 4332 _il_poll_bit(il, CSR_GP_CNTRL, 4333 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 4334 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 4335 if (ret < 0) { 4336 D_INFO("Failed to init the card\n"); 4337 goto out; 4338 } 4339 4340 /* 4341 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 4342 * BSM (Boostrap State Machine) is only in 3945 and 4965. 4343 * 4344 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 4345 * do not disable clocks. This preserves any hardware bits already 4346 * set by default in "CLK_CTRL_REG" after reset. 4347 */ 4348 if (il->cfg->use_bsm) 4349 il_wr_prph(il, APMG_CLK_EN_REG, 4350 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 4351 else 4352 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 4353 udelay(20); 4354 4355 /* Disable L1-Active */ 4356 il_set_bits_prph(il, APMG_PCIDEV_STT_REG, 4357 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4358 4359 out: 4360 return ret; 4361 } 4362 EXPORT_SYMBOL(il_apm_init); 4363 4364 int 4365 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) 4366 { 4367 int ret; 4368 s8 prev_tx_power; 4369 bool defer; 4370 4371 lockdep_assert_held(&il->mutex); 4372 4373 if (il->tx_power_user_lmt == tx_power && !force) 4374 return 0; 4375 4376 if (!il->ops->send_tx_power) 4377 return -EOPNOTSUPP; 4378 4379 /* 0 dBm mean 1 milliwatt */ 4380 if (tx_power < 0) { 4381 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power); 4382 return -EINVAL; 4383 } 4384 4385 if (tx_power > il->tx_power_device_lmt) { 4386 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n", 4387 tx_power, il->tx_power_device_lmt); 4388 return -EINVAL; 4389 } 4390 4391 if (!il_is_ready_rf(il)) 4392 return -EIO; 4393 4394 /* scan complete and commit_rxon use tx_power_next value, 4395 * it always need to be updated for newest request */ 4396 il->tx_power_next = tx_power; 4397 4398 /* do not set tx power when scanning or channel changing */ 4399 defer = test_bit(S_SCANNING, &il->status) || 4400 memcmp(&il->active, &il->staging, sizeof(il->staging)); 4401 if (defer && !force) { 4402 D_INFO("Deferring tx power set\n"); 4403 return 0; 4404 } 4405 4406 prev_tx_power = il->tx_power_user_lmt; 4407 il->tx_power_user_lmt = tx_power; 4408 4409 ret = il->ops->send_tx_power(il); 4410 4411 /* if fail to set tx_power, restore the orig. tx power */ 4412 if (ret) { 4413 il->tx_power_user_lmt = prev_tx_power; 4414 il->tx_power_next = prev_tx_power; 4415 } 4416 return ret; 4417 } 4418 EXPORT_SYMBOL(il_set_tx_power); 4419 4420 void 4421 il_send_bt_config(struct il_priv *il) 4422 { 4423 struct il_bt_cmd bt_cmd = { 4424 .lead_time = BT_LEAD_TIME_DEF, 4425 .max_kill = BT_MAX_KILL_DEF, 4426 .kill_ack_mask = 0, 4427 .kill_cts_mask = 0, 4428 }; 4429 4430 if (!bt_coex_active) 4431 bt_cmd.flags = BT_COEX_DISABLE; 4432 else 4433 bt_cmd.flags = BT_COEX_ENABLE; 4434 4435 D_INFO("BT coex %s\n", 4436 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 4437 4438 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) 4439 IL_ERR("failed to send BT Coex Config\n"); 4440 } 4441 EXPORT_SYMBOL(il_send_bt_config); 4442 4443 int 4444 il_send_stats_request(struct il_priv *il, u8 flags, bool clear) 4445 { 4446 struct il_stats_cmd stats_cmd = { 4447 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, 4448 }; 4449 4450 if (flags & CMD_ASYNC) 4451 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), 4452 &stats_cmd, NULL); 4453 else 4454 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), 4455 &stats_cmd); 4456 } 4457 EXPORT_SYMBOL(il_send_stats_request); 4458 4459 void 4460 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) 4461 { 4462 #ifdef CONFIG_IWLEGACY_DEBUG 4463 struct il_rx_pkt *pkt = rxb_addr(rxb); 4464 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); 4465 D_RX("sleep mode: %d, src: %d\n", 4466 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 4467 #endif 4468 } 4469 EXPORT_SYMBOL(il_hdl_pm_sleep); 4470 4471 void 4472 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) 4473 { 4474 struct il_rx_pkt *pkt = rxb_addr(rxb); 4475 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 4476 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len, 4477 il_get_cmd_string(pkt->hdr.cmd)); 4478 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); 4479 } 4480 EXPORT_SYMBOL(il_hdl_pm_debug_stats); 4481 4482 void 4483 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) 4484 { 4485 struct il_rx_pkt *pkt = rxb_addr(rxb); 4486 4487 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " 4488 "seq 0x%04X ser 0x%08X\n", 4489 le32_to_cpu(pkt->u.err_resp.error_type), 4490 il_get_cmd_string(pkt->u.err_resp.cmd_id), 4491 pkt->u.err_resp.cmd_id, 4492 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 4493 le32_to_cpu(pkt->u.err_resp.error_info)); 4494 } 4495 EXPORT_SYMBOL(il_hdl_error); 4496 4497 void 4498 il_clear_isr_stats(struct il_priv *il) 4499 { 4500 memset(&il->isr_stats, 0, sizeof(il->isr_stats)); 4501 } 4502 4503 int 4504 il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 4505 const struct ieee80211_tx_queue_params *params) 4506 { 4507 struct il_priv *il = hw->priv; 4508 unsigned long flags; 4509 int q; 4510 4511 D_MAC80211("enter\n"); 4512 4513 if (!il_is_ready_rf(il)) { 4514 D_MAC80211("leave - RF not ready\n"); 4515 return -EIO; 4516 } 4517 4518 if (queue >= AC_NUM) { 4519 D_MAC80211("leave - queue >= AC_NUM %d\n", queue); 4520 return 0; 4521 } 4522 4523 q = AC_NUM - 1 - queue; 4524 4525 spin_lock_irqsave(&il->lock, flags); 4526 4527 il->qos_data.def_qos_parm.ac[q].cw_min = 4528 cpu_to_le16(params->cw_min); 4529 il->qos_data.def_qos_parm.ac[q].cw_max = 4530 cpu_to_le16(params->cw_max); 4531 il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 4532 il->qos_data.def_qos_parm.ac[q].edca_txop = 4533 cpu_to_le16((params->txop * 32)); 4534 4535 il->qos_data.def_qos_parm.ac[q].reserved1 = 0; 4536 4537 spin_unlock_irqrestore(&il->lock, flags); 4538 4539 D_MAC80211("leave\n"); 4540 return 0; 4541 } 4542 EXPORT_SYMBOL(il_mac_conf_tx); 4543 4544 int 4545 il_mac_tx_last_beacon(struct ieee80211_hw *hw) 4546 { 4547 struct il_priv *il = hw->priv; 4548 int ret; 4549 4550 D_MAC80211("enter\n"); 4551 4552 ret = (il->ibss_manager == IL_IBSS_MANAGER); 4553 4554 D_MAC80211("leave ret %d\n", ret); 4555 return ret; 4556 } 4557 EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); 4558 4559 static int 4560 il_set_mode(struct il_priv *il) 4561 { 4562 il_connection_init_rx_config(il); 4563 4564 if (il->ops->set_rxon_chain) 4565 il->ops->set_rxon_chain(il); 4566 4567 return il_commit_rxon(il); 4568 } 4569 4570 int 4571 il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4572 { 4573 struct il_priv *il = hw->priv; 4574 int err; 4575 bool reset; 4576 4577 mutex_lock(&il->mutex); 4578 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4579 4580 if (!il_is_ready_rf(il)) { 4581 IL_WARN("Try to add interface when device not ready\n"); 4582 err = -EINVAL; 4583 goto out; 4584 } 4585 4586 /* 4587 * We do not support multiple virtual interfaces, but on hardware reset 4588 * we have to add the same interface again. 4589 */ 4590 reset = (il->vif == vif); 4591 if (il->vif && !reset) { 4592 err = -EOPNOTSUPP; 4593 goto out; 4594 } 4595 4596 il->vif = vif; 4597 il->iw_mode = vif->type; 4598 4599 err = il_set_mode(il); 4600 if (err) { 4601 IL_WARN("Fail to set mode %d\n", vif->type); 4602 if (!reset) { 4603 il->vif = NULL; 4604 il->iw_mode = NL80211_IFTYPE_STATION; 4605 } 4606 } 4607 4608 out: 4609 D_MAC80211("leave err %d\n", err); 4610 mutex_unlock(&il->mutex); 4611 4612 return err; 4613 } 4614 EXPORT_SYMBOL(il_mac_add_interface); 4615 4616 static void 4617 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4618 { 4619 lockdep_assert_held(&il->mutex); 4620 4621 if (il->scan_vif == vif) { 4622 il_scan_cancel_timeout(il, 200); 4623 il_force_scan_end(il); 4624 } 4625 4626 il_set_mode(il); 4627 } 4628 4629 void 4630 il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 4631 { 4632 struct il_priv *il = hw->priv; 4633 4634 mutex_lock(&il->mutex); 4635 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 4636 4637 WARN_ON(il->vif != vif); 4638 il->vif = NULL; 4639 il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4640 il_teardown_interface(il, vif); 4641 eth_zero_addr(il->bssid); 4642 4643 D_MAC80211("leave\n"); 4644 mutex_unlock(&il->mutex); 4645 } 4646 EXPORT_SYMBOL(il_mac_remove_interface); 4647 4648 int 4649 il_alloc_txq_mem(struct il_priv *il) 4650 { 4651 if (!il->txq) 4652 il->txq = 4653 kzalloc(sizeof(struct il_tx_queue) * 4654 il->cfg->num_of_queues, GFP_KERNEL); 4655 if (!il->txq) { 4656 IL_ERR("Not enough memory for txq\n"); 4657 return -ENOMEM; 4658 } 4659 return 0; 4660 } 4661 EXPORT_SYMBOL(il_alloc_txq_mem); 4662 4663 void 4664 il_free_txq_mem(struct il_priv *il) 4665 { 4666 kfree(il->txq); 4667 il->txq = NULL; 4668 } 4669 EXPORT_SYMBOL(il_free_txq_mem); 4670 4671 int 4672 il_force_reset(struct il_priv *il, bool external) 4673 { 4674 struct il_force_reset *force_reset; 4675 4676 if (test_bit(S_EXIT_PENDING, &il->status)) 4677 return -EINVAL; 4678 4679 force_reset = &il->force_reset; 4680 force_reset->reset_request_count++; 4681 if (!external) { 4682 if (force_reset->last_force_reset_jiffies && 4683 time_after(force_reset->last_force_reset_jiffies + 4684 force_reset->reset_duration, jiffies)) { 4685 D_INFO("force reset rejected\n"); 4686 force_reset->reset_reject_count++; 4687 return -EAGAIN; 4688 } 4689 } 4690 force_reset->reset_success_count++; 4691 force_reset->last_force_reset_jiffies = jiffies; 4692 4693 /* 4694 * if the request is from external(ex: debugfs), 4695 * then always perform the request in regardless the module 4696 * parameter setting 4697 * if the request is from internal (uCode error or driver 4698 * detect failure), then fw_restart module parameter 4699 * need to be check before performing firmware reload 4700 */ 4701 4702 if (!external && !il->cfg->mod_params->restart_fw) { 4703 D_INFO("Cancel firmware reload based on " 4704 "module parameter setting\n"); 4705 return 0; 4706 } 4707 4708 IL_ERR("On demand firmware reload\n"); 4709 4710 /* Set the FW error flag -- cleared on il_down */ 4711 set_bit(S_FW_ERROR, &il->status); 4712 wake_up(&il->wait_command_queue); 4713 /* 4714 * Keep the restart process from trying to send host 4715 * commands by clearing the INIT status bit 4716 */ 4717 clear_bit(S_READY, &il->status); 4718 queue_work(il->workqueue, &il->restart); 4719 4720 return 0; 4721 } 4722 EXPORT_SYMBOL(il_force_reset); 4723 4724 int 4725 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4726 enum nl80211_iftype newtype, bool newp2p) 4727 { 4728 struct il_priv *il = hw->priv; 4729 int err; 4730 4731 mutex_lock(&il->mutex); 4732 D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n", 4733 vif->type, vif->addr, newtype, newp2p); 4734 4735 if (newp2p) { 4736 err = -EOPNOTSUPP; 4737 goto out; 4738 } 4739 4740 if (!il->vif || !il_is_ready_rf(il)) { 4741 /* 4742 * Huh? But wait ... this can maybe happen when 4743 * we're in the middle of a firmware restart! 4744 */ 4745 err = -EBUSY; 4746 goto out; 4747 } 4748 4749 /* success */ 4750 vif->type = newtype; 4751 vif->p2p = false; 4752 il->iw_mode = newtype; 4753 il_teardown_interface(il, vif); 4754 err = 0; 4755 4756 out: 4757 D_MAC80211("leave err %d\n", err); 4758 mutex_unlock(&il->mutex); 4759 4760 return err; 4761 } 4762 EXPORT_SYMBOL(il_mac_change_interface); 4763 4764 void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4765 u32 queues, bool drop) 4766 { 4767 struct il_priv *il = hw->priv; 4768 unsigned long timeout = jiffies + msecs_to_jiffies(500); 4769 int i; 4770 4771 mutex_lock(&il->mutex); 4772 D_MAC80211("enter\n"); 4773 4774 if (il->txq == NULL) 4775 goto out; 4776 4777 for (i = 0; i < il->hw_params.max_txq_num; i++) { 4778 struct il_queue *q; 4779 4780 if (i == il->cmd_queue) 4781 continue; 4782 4783 q = &il->txq[i].q; 4784 if (q->read_ptr == q->write_ptr) 4785 continue; 4786 4787 if (time_after(jiffies, timeout)) { 4788 IL_ERR("Failed to flush queue %d\n", q->id); 4789 break; 4790 } 4791 4792 msleep(20); 4793 } 4794 out: 4795 D_MAC80211("leave\n"); 4796 mutex_unlock(&il->mutex); 4797 } 4798 EXPORT_SYMBOL(il_mac_flush); 4799 4800 /* 4801 * On every watchdog tick we check (latest) time stamp. If it does not 4802 * change during timeout period and queue is not empty we reset firmware. 4803 */ 4804 static int 4805 il_check_stuck_queue(struct il_priv *il, int cnt) 4806 { 4807 struct il_tx_queue *txq = &il->txq[cnt]; 4808 struct il_queue *q = &txq->q; 4809 unsigned long timeout; 4810 unsigned long now = jiffies; 4811 int ret; 4812 4813 if (q->read_ptr == q->write_ptr) { 4814 txq->time_stamp = now; 4815 return 0; 4816 } 4817 4818 timeout = 4819 txq->time_stamp + 4820 msecs_to_jiffies(il->cfg->wd_timeout); 4821 4822 if (time_after(now, timeout)) { 4823 IL_ERR("Queue %d stuck for %u ms.\n", q->id, 4824 jiffies_to_msecs(now - txq->time_stamp)); 4825 ret = il_force_reset(il, false); 4826 return (ret == -EAGAIN) ? 0 : 1; 4827 } 4828 4829 return 0; 4830 } 4831 4832 /* 4833 * Making watchdog tick be a quarter of timeout assure we will 4834 * discover the queue hung between timeout and 1.25*timeout 4835 */ 4836 #define IL_WD_TICK(timeout) ((timeout) / 4) 4837 4838 /* 4839 * Watchdog timer callback, we check each tx queue for stuck, if if hung 4840 * we reset the firmware. If everything is fine just rearm the timer. 4841 */ 4842 void 4843 il_bg_watchdog(unsigned long data) 4844 { 4845 struct il_priv *il = (struct il_priv *)data; 4846 int cnt; 4847 unsigned long timeout; 4848 4849 if (test_bit(S_EXIT_PENDING, &il->status)) 4850 return; 4851 4852 timeout = il->cfg->wd_timeout; 4853 if (timeout == 0) 4854 return; 4855 4856 /* monitor and check for stuck cmd queue */ 4857 if (il_check_stuck_queue(il, il->cmd_queue)) 4858 return; 4859 4860 /* monitor and check for other stuck queues */ 4861 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { 4862 /* skip as we already checked the command queue */ 4863 if (cnt == il->cmd_queue) 4864 continue; 4865 if (il_check_stuck_queue(il, cnt)) 4866 return; 4867 } 4868 4869 mod_timer(&il->watchdog, 4870 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4871 } 4872 EXPORT_SYMBOL(il_bg_watchdog); 4873 4874 void 4875 il_setup_watchdog(struct il_priv *il) 4876 { 4877 unsigned int timeout = il->cfg->wd_timeout; 4878 4879 if (timeout) 4880 mod_timer(&il->watchdog, 4881 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4882 else 4883 del_timer(&il->watchdog); 4884 } 4885 EXPORT_SYMBOL(il_setup_watchdog); 4886 4887 /* 4888 * extended beacon time format 4889 * time in usec will be changed into a 32-bit value in extended:internal format 4890 * the extended part is the beacon counts 4891 * the internal part is the time in usec within one beacon interval 4892 */ 4893 u32 4894 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) 4895 { 4896 u32 quot; 4897 u32 rem; 4898 u32 interval = beacon_interval * TIME_UNIT; 4899 4900 if (!interval || !usec) 4901 return 0; 4902 4903 quot = 4904 (usec / 4905 interval) & (il_beacon_time_mask_high(il, 4906 il->hw_params. 4907 beacon_time_tsf_bits) >> il-> 4908 hw_params.beacon_time_tsf_bits); 4909 rem = 4910 (usec % interval) & il_beacon_time_mask_low(il, 4911 il->hw_params. 4912 beacon_time_tsf_bits); 4913 4914 return (quot << il->hw_params.beacon_time_tsf_bits) + rem; 4915 } 4916 EXPORT_SYMBOL(il_usecs_to_beacons); 4917 4918 /* base is usually what we get from ucode with each received frame, 4919 * the same as HW timer counter counting down 4920 */ 4921 __le32 4922 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, 4923 u32 beacon_interval) 4924 { 4925 u32 base_low = base & il_beacon_time_mask_low(il, 4926 il->hw_params. 4927 beacon_time_tsf_bits); 4928 u32 addon_low = addon & il_beacon_time_mask_low(il, 4929 il->hw_params. 4930 beacon_time_tsf_bits); 4931 u32 interval = beacon_interval * TIME_UNIT; 4932 u32 res = (base & il_beacon_time_mask_high(il, 4933 il->hw_params. 4934 beacon_time_tsf_bits)) + 4935 (addon & il_beacon_time_mask_high(il, 4936 il->hw_params. 4937 beacon_time_tsf_bits)); 4938 4939 if (base_low > addon_low) 4940 res += base_low - addon_low; 4941 else if (base_low < addon_low) { 4942 res += interval + base_low - addon_low; 4943 res += (1 << il->hw_params.beacon_time_tsf_bits); 4944 } else 4945 res += (1 << il->hw_params.beacon_time_tsf_bits); 4946 4947 return cpu_to_le32(res); 4948 } 4949 EXPORT_SYMBOL(il_add_beacon_time); 4950 4951 #ifdef CONFIG_PM_SLEEP 4952 4953 static int 4954 il_pci_suspend(struct device *device) 4955 { 4956 struct pci_dev *pdev = to_pci_dev(device); 4957 struct il_priv *il = pci_get_drvdata(pdev); 4958 4959 /* 4960 * This function is called when system goes into suspend state 4961 * mac80211 will call il_mac_stop() from the mac80211 suspend function 4962 * first but since il_mac_stop() has no knowledge of who the caller is, 4963 * it will not call apm_ops.stop() to stop the DMA operation. 4964 * Calling apm_ops.stop here to make sure we stop the DMA. 4965 */ 4966 il_apm_stop(il); 4967 4968 return 0; 4969 } 4970 4971 static int 4972 il_pci_resume(struct device *device) 4973 { 4974 struct pci_dev *pdev = to_pci_dev(device); 4975 struct il_priv *il = pci_get_drvdata(pdev); 4976 bool hw_rfkill = false; 4977 4978 /* 4979 * We disable the RETRY_TIMEOUT register (0x41) to keep 4980 * PCI Tx retries from interfering with C3 CPU state. 4981 */ 4982 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 4983 4984 il_enable_interrupts(il); 4985 4986 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4987 hw_rfkill = true; 4988 4989 if (hw_rfkill) 4990 set_bit(S_RFKILL, &il->status); 4991 else 4992 clear_bit(S_RFKILL, &il->status); 4993 4994 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill); 4995 4996 return 0; 4997 } 4998 4999 SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 5000 EXPORT_SYMBOL(il_pm_ops); 5001 5002 #endif /* CONFIG_PM_SLEEP */ 5003 5004 static void 5005 il_update_qos(struct il_priv *il) 5006 { 5007 if (test_bit(S_EXIT_PENDING, &il->status)) 5008 return; 5009 5010 il->qos_data.def_qos_parm.qos_flags = 0; 5011 5012 if (il->qos_data.qos_active) 5013 il->qos_data.def_qos_parm.qos_flags |= 5014 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 5015 5016 if (il->ht.enabled) 5017 il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 5018 5019 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 5020 il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); 5021 5022 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), 5023 &il->qos_data.def_qos_parm, NULL); 5024 } 5025 5026 /** 5027 * il_mac_config - mac80211 config callback 5028 */ 5029 int 5030 il_mac_config(struct ieee80211_hw *hw, u32 changed) 5031 { 5032 struct il_priv *il = hw->priv; 5033 const struct il_channel_info *ch_info; 5034 struct ieee80211_conf *conf = &hw->conf; 5035 struct ieee80211_channel *channel = conf->chandef.chan; 5036 struct il_ht_config *ht_conf = &il->current_ht_config; 5037 unsigned long flags = 0; 5038 int ret = 0; 5039 u16 ch; 5040 int scan_active = 0; 5041 bool ht_changed = false; 5042 5043 mutex_lock(&il->mutex); 5044 D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value, 5045 changed); 5046 5047 if (unlikely(test_bit(S_SCANNING, &il->status))) { 5048 scan_active = 1; 5049 D_MAC80211("scan active\n"); 5050 } 5051 5052 if (changed & 5053 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { 5054 /* mac80211 uses static for non-HT which is what we want */ 5055 il->current_ht_config.smps = conf->smps_mode; 5056 5057 /* 5058 * Recalculate chain counts. 5059 * 5060 * If monitor mode is enabled then mac80211 will 5061 * set up the SM PS mode to OFF if an HT channel is 5062 * configured. 5063 */ 5064 if (il->ops->set_rxon_chain) 5065 il->ops->set_rxon_chain(il); 5066 } 5067 5068 /* during scanning mac80211 will delay channel setting until 5069 * scan finish with changed = 0 5070 */ 5071 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { 5072 5073 if (scan_active) 5074 goto set_ch_out; 5075 5076 ch = channel->hw_value; 5077 ch_info = il_get_channel_info(il, channel->band, ch); 5078 if (!il_is_channel_valid(ch_info)) { 5079 D_MAC80211("leave - invalid channel\n"); 5080 ret = -EINVAL; 5081 goto set_ch_out; 5082 } 5083 5084 if (il->iw_mode == NL80211_IFTYPE_ADHOC && 5085 !il_is_channel_ibss(ch_info)) { 5086 D_MAC80211("leave - not IBSS channel\n"); 5087 ret = -EINVAL; 5088 goto set_ch_out; 5089 } 5090 5091 spin_lock_irqsave(&il->lock, flags); 5092 5093 /* Configure HT40 channels */ 5094 if (il->ht.enabled != conf_is_ht(conf)) { 5095 il->ht.enabled = conf_is_ht(conf); 5096 ht_changed = true; 5097 } 5098 if (il->ht.enabled) { 5099 if (conf_is_ht40_minus(conf)) { 5100 il->ht.extension_chan_offset = 5101 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 5102 il->ht.is_40mhz = true; 5103 } else if (conf_is_ht40_plus(conf)) { 5104 il->ht.extension_chan_offset = 5105 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 5106 il->ht.is_40mhz = true; 5107 } else { 5108 il->ht.extension_chan_offset = 5109 IEEE80211_HT_PARAM_CHA_SEC_NONE; 5110 il->ht.is_40mhz = false; 5111 } 5112 } else 5113 il->ht.is_40mhz = false; 5114 5115 /* 5116 * Default to no protection. Protection mode will 5117 * later be set from BSS config in il_ht_conf 5118 */ 5119 il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 5120 5121 /* if we are switching from ht to 2.4 clear flags 5122 * from any ht related info since 2.4 does not 5123 * support ht */ 5124 if ((le16_to_cpu(il->staging.channel) != ch)) 5125 il->staging.flags = 0; 5126 5127 il_set_rxon_channel(il, channel); 5128 il_set_rxon_ht(il, ht_conf); 5129 5130 il_set_flags_for_band(il, channel->band, il->vif); 5131 5132 spin_unlock_irqrestore(&il->lock, flags); 5133 5134 if (il->ops->update_bcast_stations) 5135 ret = il->ops->update_bcast_stations(il); 5136 5137 set_ch_out: 5138 /* The list of supported rates and rate mask can be different 5139 * for each band; since the band may have changed, reset 5140 * the rate mask to what mac80211 lists */ 5141 il_set_rate(il); 5142 } 5143 5144 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { 5145 il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); 5146 ret = il_power_update_mode(il, false); 5147 if (ret) 5148 D_MAC80211("Error setting sleep level\n"); 5149 } 5150 5151 if (changed & IEEE80211_CONF_CHANGE_POWER) { 5152 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt, 5153 conf->power_level); 5154 5155 il_set_tx_power(il, conf->power_level, false); 5156 } 5157 5158 if (!il_is_ready(il)) { 5159 D_MAC80211("leave - not ready\n"); 5160 goto out; 5161 } 5162 5163 if (scan_active) 5164 goto out; 5165 5166 if (memcmp(&il->active, &il->staging, sizeof(il->staging))) 5167 il_commit_rxon(il); 5168 else 5169 D_INFO("Not re-sending same RXON configuration.\n"); 5170 if (ht_changed) 5171 il_update_qos(il); 5172 5173 out: 5174 D_MAC80211("leave ret %d\n", ret); 5175 mutex_unlock(&il->mutex); 5176 5177 return ret; 5178 } 5179 EXPORT_SYMBOL(il_mac_config); 5180 5181 void 5182 il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5183 { 5184 struct il_priv *il = hw->priv; 5185 unsigned long flags; 5186 5187 mutex_lock(&il->mutex); 5188 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr); 5189 5190 spin_lock_irqsave(&il->lock, flags); 5191 5192 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); 5193 5194 /* new association get rid of ibss beacon skb */ 5195 if (il->beacon_skb) 5196 dev_kfree_skb(il->beacon_skb); 5197 il->beacon_skb = NULL; 5198 il->timestamp = 0; 5199 5200 spin_unlock_irqrestore(&il->lock, flags); 5201 5202 il_scan_cancel_timeout(il, 100); 5203 if (!il_is_ready_rf(il)) { 5204 D_MAC80211("leave - not ready\n"); 5205 mutex_unlock(&il->mutex); 5206 return; 5207 } 5208 5209 /* we are restarting association process */ 5210 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5211 il_commit_rxon(il); 5212 5213 il_set_rate(il); 5214 5215 D_MAC80211("leave\n"); 5216 mutex_unlock(&il->mutex); 5217 } 5218 EXPORT_SYMBOL(il_mac_reset_tsf); 5219 5220 static void 5221 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) 5222 { 5223 struct il_ht_config *ht_conf = &il->current_ht_config; 5224 struct ieee80211_sta *sta; 5225 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 5226 5227 D_ASSOC("enter:\n"); 5228 5229 if (!il->ht.enabled) 5230 return; 5231 5232 il->ht.protection = 5233 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 5234 il->ht.non_gf_sta_present = 5235 !!(bss_conf-> 5236 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 5237 5238 ht_conf->single_chain_sufficient = false; 5239 5240 switch (vif->type) { 5241 case NL80211_IFTYPE_STATION: 5242 rcu_read_lock(); 5243 sta = ieee80211_find_sta(vif, bss_conf->bssid); 5244 if (sta) { 5245 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 5246 int maxstreams; 5247 5248 maxstreams = 5249 (ht_cap->mcs. 5250 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 5251 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 5252 maxstreams += 1; 5253 5254 if (ht_cap->mcs.rx_mask[1] == 0 && 5255 ht_cap->mcs.rx_mask[2] == 0) 5256 ht_conf->single_chain_sufficient = true; 5257 if (maxstreams <= 1) 5258 ht_conf->single_chain_sufficient = true; 5259 } else { 5260 /* 5261 * If at all, this can only happen through a race 5262 * when the AP disconnects us while we're still 5263 * setting up the connection, in that case mac80211 5264 * will soon tell us about that. 5265 */ 5266 ht_conf->single_chain_sufficient = true; 5267 } 5268 rcu_read_unlock(); 5269 break; 5270 case NL80211_IFTYPE_ADHOC: 5271 ht_conf->single_chain_sufficient = true; 5272 break; 5273 default: 5274 break; 5275 } 5276 5277 D_ASSOC("leave\n"); 5278 } 5279 5280 static inline void 5281 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) 5282 { 5283 /* 5284 * inform the ucode that there is no longer an 5285 * association and that no more packets should be 5286 * sent 5287 */ 5288 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5289 il->staging.assoc_id = 0; 5290 il_commit_rxon(il); 5291 } 5292 5293 static void 5294 il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 5295 { 5296 struct il_priv *il = hw->priv; 5297 unsigned long flags; 5298 __le64 timestamp; 5299 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 5300 5301 if (!skb) 5302 return; 5303 5304 D_MAC80211("enter\n"); 5305 5306 lockdep_assert_held(&il->mutex); 5307 5308 if (!il->beacon_enabled) { 5309 IL_ERR("update beacon with no beaconing enabled\n"); 5310 dev_kfree_skb(skb); 5311 return; 5312 } 5313 5314 spin_lock_irqsave(&il->lock, flags); 5315 5316 if (il->beacon_skb) 5317 dev_kfree_skb(il->beacon_skb); 5318 5319 il->beacon_skb = skb; 5320 5321 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 5322 il->timestamp = le64_to_cpu(timestamp); 5323 5324 D_MAC80211("leave\n"); 5325 spin_unlock_irqrestore(&il->lock, flags); 5326 5327 if (!il_is_ready_rf(il)) { 5328 D_MAC80211("leave - RF not ready\n"); 5329 return; 5330 } 5331 5332 il->ops->post_associate(il); 5333 } 5334 5335 void 5336 il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5337 struct ieee80211_bss_conf *bss_conf, u32 changes) 5338 { 5339 struct il_priv *il = hw->priv; 5340 int ret; 5341 5342 mutex_lock(&il->mutex); 5343 D_MAC80211("enter: changes 0x%x\n", changes); 5344 5345 if (!il_is_alive(il)) { 5346 D_MAC80211("leave - not alive\n"); 5347 mutex_unlock(&il->mutex); 5348 return; 5349 } 5350 5351 if (changes & BSS_CHANGED_QOS) { 5352 unsigned long flags; 5353 5354 spin_lock_irqsave(&il->lock, flags); 5355 il->qos_data.qos_active = bss_conf->qos; 5356 il_update_qos(il); 5357 spin_unlock_irqrestore(&il->lock, flags); 5358 } 5359 5360 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5361 /* FIXME: can we remove beacon_enabled ? */ 5362 if (vif->bss_conf.enable_beacon) 5363 il->beacon_enabled = true; 5364 else 5365 il->beacon_enabled = false; 5366 } 5367 5368 if (changes & BSS_CHANGED_BSSID) { 5369 D_MAC80211("BSSID %pM\n", bss_conf->bssid); 5370 5371 /* 5372 * On passive channel we wait with blocked queues to see if 5373 * there is traffic on that channel. If no frame will be 5374 * received (what is very unlikely since scan detects AP on 5375 * that channel, but theoretically possible), mac80211 associate 5376 * procedure will time out and mac80211 will call us with NULL 5377 * bssid. We have to unblock queues on such condition. 5378 */ 5379 if (is_zero_ether_addr(bss_conf->bssid)) 5380 il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); 5381 5382 /* 5383 * If there is currently a HW scan going on in the background, 5384 * then we need to cancel it, otherwise sometimes we are not 5385 * able to authenticate (FIXME: why ?) 5386 */ 5387 if (il_scan_cancel_timeout(il, 100)) { 5388 D_MAC80211("leave - scan abort failed\n"); 5389 mutex_unlock(&il->mutex); 5390 return; 5391 } 5392 5393 /* mac80211 only sets assoc when in STATION mode */ 5394 memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 5395 5396 /* FIXME: currently needed in a few places */ 5397 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5398 } 5399 5400 /* 5401 * This needs to be after setting the BSSID in case 5402 * mac80211 decides to do both changes at once because 5403 * it will invoke post_associate. 5404 */ 5405 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) 5406 il_beacon_update(hw, vif); 5407 5408 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 5409 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); 5410 if (bss_conf->use_short_preamble) 5411 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 5412 else 5413 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 5414 } 5415 5416 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 5417 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot); 5418 if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) 5419 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; 5420 else 5421 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 5422 if (bss_conf->use_cts_prot) 5423 il->staging.flags |= RXON_FLG_SELF_CTS_EN; 5424 else 5425 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; 5426 } 5427 5428 if (changes & BSS_CHANGED_BASIC_RATES) { 5429 /* XXX use this information 5430 * 5431 * To do that, remove code from il_set_rate() and put something 5432 * like this here: 5433 * 5434 if (A-band) 5435 il->staging.ofdm_basic_rates = 5436 bss_conf->basic_rates; 5437 else 5438 il->staging.ofdm_basic_rates = 5439 bss_conf->basic_rates >> 4; 5440 il->staging.cck_basic_rates = 5441 bss_conf->basic_rates & 0xF; 5442 */ 5443 } 5444 5445 if (changes & BSS_CHANGED_HT) { 5446 il_ht_conf(il, vif); 5447 5448 if (il->ops->set_rxon_chain) 5449 il->ops->set_rxon_chain(il); 5450 } 5451 5452 if (changes & BSS_CHANGED_ASSOC) { 5453 D_MAC80211("ASSOC %d\n", bss_conf->assoc); 5454 if (bss_conf->assoc) { 5455 il->timestamp = bss_conf->sync_tsf; 5456 5457 if (!il_is_rfkill(il)) 5458 il->ops->post_associate(il); 5459 } else 5460 il_set_no_assoc(il, vif); 5461 } 5462 5463 if (changes && il_is_associated(il) && bss_conf->aid) { 5464 D_MAC80211("Changes (%#x) while associated\n", changes); 5465 ret = il_send_rxon_assoc(il); 5466 if (!ret) { 5467 /* Sync active_rxon with latest change. */ 5468 memcpy((void *)&il->active, &il->staging, 5469 sizeof(struct il_rxon_cmd)); 5470 } 5471 } 5472 5473 if (changes & BSS_CHANGED_BEACON_ENABLED) { 5474 if (vif->bss_conf.enable_beacon) { 5475 memcpy(il->staging.bssid_addr, bss_conf->bssid, 5476 ETH_ALEN); 5477 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); 5478 il->ops->config_ap(il); 5479 } else 5480 il_set_no_assoc(il, vif); 5481 } 5482 5483 if (changes & BSS_CHANGED_IBSS) { 5484 ret = il->ops->manage_ibss_station(il, vif, 5485 bss_conf->ibss_joined); 5486 if (ret) 5487 IL_ERR("failed to %s IBSS station %pM\n", 5488 bss_conf->ibss_joined ? "add" : "remove", 5489 bss_conf->bssid); 5490 } 5491 5492 D_MAC80211("leave\n"); 5493 mutex_unlock(&il->mutex); 5494 } 5495 EXPORT_SYMBOL(il_mac_bss_info_changed); 5496 5497 irqreturn_t 5498 il_isr(int irq, void *data) 5499 { 5500 struct il_priv *il = data; 5501 u32 inta, inta_mask; 5502 u32 inta_fh; 5503 unsigned long flags; 5504 if (!il) 5505 return IRQ_NONE; 5506 5507 spin_lock_irqsave(&il->lock, flags); 5508 5509 /* Disable (but don't clear!) interrupts here to avoid 5510 * back-to-back ISRs and sporadic interrupts from our NIC. 5511 * If we have something to service, the tasklet will re-enable ints. 5512 * If we *don't* have something, we'll re-enable before leaving here. */ 5513 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ 5514 _il_wr(il, CSR_INT_MASK, 0x00000000); 5515 5516 /* Discover which interrupts are active/pending */ 5517 inta = _il_rd(il, CSR_INT); 5518 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 5519 5520 /* Ignore interrupt if there's nothing in NIC to service. 5521 * This may be due to IRQ shared with another device, 5522 * or due to sporadic interrupts thrown from our NIC. */ 5523 if (!inta && !inta_fh) { 5524 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); 5525 goto none; 5526 } 5527 5528 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { 5529 /* Hardware disappeared. It might have already raised 5530 * an interrupt */ 5531 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta); 5532 goto unplugged; 5533 } 5534 5535 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, 5536 inta_fh); 5537 5538 inta &= ~CSR_INT_BIT_SCD; 5539 5540 /* il_irq_tasklet() will service interrupts and re-enable them */ 5541 if (likely(inta || inta_fh)) 5542 tasklet_schedule(&il->irq_tasklet); 5543 5544 unplugged: 5545 spin_unlock_irqrestore(&il->lock, flags); 5546 return IRQ_HANDLED; 5547 5548 none: 5549 /* re-enable interrupts here since we don't have anything to service. */ 5550 /* only Re-enable if disabled by irq */ 5551 if (test_bit(S_INT_ENABLED, &il->status)) 5552 il_enable_interrupts(il); 5553 spin_unlock_irqrestore(&il->lock, flags); 5554 return IRQ_NONE; 5555 } 5556 EXPORT_SYMBOL(il_isr); 5557 5558 /* 5559 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this 5560 * function. 5561 */ 5562 void 5563 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, 5564 __le16 fc, __le32 *tx_flags) 5565 { 5566 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 5567 *tx_flags |= TX_CMD_FLG_RTS_MSK; 5568 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 5569 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5570 5571 if (!ieee80211_is_mgmt(fc)) 5572 return; 5573 5574 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 5575 case cpu_to_le16(IEEE80211_STYPE_AUTH): 5576 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 5577 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): 5578 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): 5579 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5580 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5581 break; 5582 } 5583 } else if (info->control.rates[0]. 5584 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 5585 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 5586 *tx_flags |= TX_CMD_FLG_CTS_MSK; 5587 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 5588 } 5589 } 5590 EXPORT_SYMBOL(il_tx_cmd_protection); 5591