1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. 4 * 5 * Portions of this file are derived from the ipw3945 project, as well 6 * as portions of the ieee80211 subsystem header files. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of version 2 of the GNU General Public License as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 20 * 21 * The full GNU General Public License is included in this distribution in the 22 * file called LICENSE. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 *****************************************************************************/ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/kernel.h> 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/pci.h> 36 #include <linux/pci-aspm.h> 37 #include <linux/slab.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/delay.h> 40 #include <linux/sched.h> 41 #include <linux/skbuff.h> 42 #include <linux/netdevice.h> 43 #include <linux/firmware.h> 44 #include <linux/etherdevice.h> 45 #include <linux/if_arp.h> 46 47 #include <net/ieee80211_radiotap.h> 48 #include <net/mac80211.h> 49 50 #include <asm/div64.h> 51 52 #define DRV_NAME "iwl3945" 53 54 #include "commands.h" 55 #include "common.h" 56 #include "3945.h" 57 #include "iwl-spectrum.h" 58 59 /* 60 * module name, copyright, version, etc. 61 */ 62 63 #define DRV_DESCRIPTION \ 64 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 65 66 #ifdef CONFIG_IWLEGACY_DEBUG 67 #define VD "d" 68 #else 69 #define VD 70 #endif 71 72 /* 73 * add "s" to indicate spectrum measurement included. 74 * we add it here to be consistent with previous releases in which 75 * this was configurable. 76 */ 77 #define DRV_VERSION IWLWIFI_VERSION VD "s" 78 #define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" 79 #define DRV_AUTHOR "<ilw@linux.intel.com>" 80 81 MODULE_DESCRIPTION(DRV_DESCRIPTION); 82 MODULE_VERSION(DRV_VERSION); 83 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 84 MODULE_LICENSE("GPL"); 85 86 /* module parameters */ 87 struct il_mod_params il3945_mod_params = { 88 .sw_crypto = 1, 89 .restart_fw = 1, 90 .disable_hw_scan = 1, 91 /* the rest are 0 by default */ 92 }; 93 94 /** 95 * il3945_get_antenna_flags - Get antenna flags for RXON command 96 * @il: eeprom and antenna fields are used to determine antenna flags 97 * 98 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed 99 * il3945_mod_params.antenna specifies the antenna diversity mode: 100 * 101 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself 102 * IL_ANTENNA_MAIN - Force MAIN antenna 103 * IL_ANTENNA_AUX - Force AUX antenna 104 */ 105 __le32 106 il3945_get_antenna_flags(const struct il_priv *il) 107 { 108 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; 109 110 switch (il3945_mod_params.antenna) { 111 case IL_ANTENNA_DIVERSITY: 112 return 0; 113 114 case IL_ANTENNA_MAIN: 115 if (eeprom->antenna_switch_type) 116 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 117 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 118 119 case IL_ANTENNA_AUX: 120 if (eeprom->antenna_switch_type) 121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 123 } 124 125 /* bad antenna selector value */ 126 IL_ERR("Bad antenna selector value (0x%x)\n", 127 il3945_mod_params.antenna); 128 129 return 0; /* "diversity" is default if error */ 130 } 131 132 static int 133 il3945_set_ccmp_dynamic_key_info(struct il_priv *il, 134 struct ieee80211_key_conf *keyconf, u8 sta_id) 135 { 136 unsigned long flags; 137 __le16 key_flags = 0; 138 int ret; 139 140 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 141 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 142 143 if (sta_id == il->hw_params.bcast_id) 144 key_flags |= STA_KEY_MULTICAST_MSK; 145 146 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 147 keyconf->hw_key_idx = keyconf->keyidx; 148 key_flags &= ~STA_KEY_FLG_INVALID; 149 150 spin_lock_irqsave(&il->sta_lock, flags); 151 il->stations[sta_id].keyinfo.cipher = keyconf->cipher; 152 il->stations[sta_id].keyinfo.keylen = keyconf->keylen; 153 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); 154 155 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); 156 157 if ((il->stations[sta_id].sta.key. 158 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) 159 il->stations[sta_id].sta.key.key_offset = 160 il_get_free_ucode_key_idx(il); 161 /* else, we are overriding an existing key => no need to allocated room 162 * in uCode. */ 163 164 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 165 "no space for a new key"); 166 167 il->stations[sta_id].sta.key.key_flags = key_flags; 168 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 169 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 170 171 D_INFO("hwcrypto: modify ucode station key info\n"); 172 173 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); 174 175 spin_unlock_irqrestore(&il->sta_lock, flags); 176 177 return ret; 178 } 179 180 static int 181 il3945_set_tkip_dynamic_key_info(struct il_priv *il, 182 struct ieee80211_key_conf *keyconf, u8 sta_id) 183 { 184 return -EOPNOTSUPP; 185 } 186 187 static int 188 il3945_set_wep_dynamic_key_info(struct il_priv *il, 189 struct ieee80211_key_conf *keyconf, u8 sta_id) 190 { 191 return -EOPNOTSUPP; 192 } 193 194 static int 195 il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id) 196 { 197 unsigned long flags; 198 struct il_addsta_cmd sta_cmd; 199 200 spin_lock_irqsave(&il->sta_lock, flags); 201 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); 202 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); 203 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 204 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 205 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 206 memcpy(&sta_cmd, &il->stations[sta_id].sta, 207 sizeof(struct il_addsta_cmd)); 208 spin_unlock_irqrestore(&il->sta_lock, flags); 209 210 D_INFO("hwcrypto: clear ucode station key info\n"); 211 return il_send_add_sta(il, &sta_cmd, CMD_SYNC); 212 } 213 214 static int 215 il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, 216 u8 sta_id) 217 { 218 int ret = 0; 219 220 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 221 222 switch (keyconf->cipher) { 223 case WLAN_CIPHER_SUITE_CCMP: 224 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id); 225 break; 226 case WLAN_CIPHER_SUITE_TKIP: 227 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id); 228 break; 229 case WLAN_CIPHER_SUITE_WEP40: 230 case WLAN_CIPHER_SUITE_WEP104: 231 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id); 232 break; 233 default: 234 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher); 235 ret = -EINVAL; 236 } 237 238 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", 239 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); 240 241 return ret; 242 } 243 244 static int 245 il3945_remove_static_key(struct il_priv *il) 246 { 247 int ret = -EOPNOTSUPP; 248 249 return ret; 250 } 251 252 static int 253 il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key) 254 { 255 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || 256 key->cipher == WLAN_CIPHER_SUITE_WEP104) 257 return -EOPNOTSUPP; 258 259 IL_ERR("Static key invalid: cipher %x\n", key->cipher); 260 return -EINVAL; 261 } 262 263 static void 264 il3945_clear_free_frames(struct il_priv *il) 265 { 266 struct list_head *element; 267 268 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); 269 270 while (!list_empty(&il->free_frames)) { 271 element = il->free_frames.next; 272 list_del(element); 273 kfree(list_entry(element, struct il3945_frame, list)); 274 il->frames_count--; 275 } 276 277 if (il->frames_count) { 278 IL_WARN("%d frames still in use. Did we lose one?\n", 279 il->frames_count); 280 il->frames_count = 0; 281 } 282 } 283 284 static struct il3945_frame * 285 il3945_get_free_frame(struct il_priv *il) 286 { 287 struct il3945_frame *frame; 288 struct list_head *element; 289 if (list_empty(&il->free_frames)) { 290 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 291 if (!frame) { 292 IL_ERR("Could not allocate frame!\n"); 293 return NULL; 294 } 295 296 il->frames_count++; 297 return frame; 298 } 299 300 element = il->free_frames.next; 301 list_del(element); 302 return list_entry(element, struct il3945_frame, list); 303 } 304 305 static void 306 il3945_free_frame(struct il_priv *il, struct il3945_frame *frame) 307 { 308 memset(frame, 0, sizeof(*frame)); 309 list_add(&frame->list, &il->free_frames); 310 } 311 312 unsigned int 313 il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, 314 int left) 315 { 316 317 if (!il_is_associated(il) || !il->beacon_skb) 318 return 0; 319 320 if (il->beacon_skb->len > left) 321 return 0; 322 323 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); 324 325 return il->beacon_skb->len; 326 } 327 328 static int 329 il3945_send_beacon_cmd(struct il_priv *il) 330 { 331 struct il3945_frame *frame; 332 unsigned int frame_size; 333 int rc; 334 u8 rate; 335 336 frame = il3945_get_free_frame(il); 337 338 if (!frame) { 339 IL_ERR("Could not obtain free frame buffer for beacon " 340 "command.\n"); 341 return -ENOMEM; 342 } 343 344 rate = il_get_lowest_plcp(il); 345 346 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate); 347 348 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); 349 350 il3945_free_frame(il, frame); 351 352 return rc; 353 } 354 355 static void 356 il3945_unset_hw_params(struct il_priv *il) 357 { 358 if (il->_3945.shared_virt) 359 dma_free_coherent(&il->pci_dev->dev, 360 sizeof(struct il3945_shared), 361 il->_3945.shared_virt, il->_3945.shared_phys); 362 } 363 364 static void 365 il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, 366 struct il_device_cmd *cmd, 367 struct sk_buff *skb_frag, int sta_id) 368 { 369 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; 370 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo; 371 372 tx_cmd->sec_ctl = 0; 373 374 switch (keyinfo->cipher) { 375 case WLAN_CIPHER_SUITE_CCMP: 376 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 377 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); 378 D_TX("tx_cmd with AES hwcrypto\n"); 379 break; 380 381 case WLAN_CIPHER_SUITE_TKIP: 382 break; 383 384 case WLAN_CIPHER_SUITE_WEP104: 385 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; 386 /* fall through */ 387 case WLAN_CIPHER_SUITE_WEP40: 388 tx_cmd->sec_ctl |= 389 TX_CMD_SEC_WEP | (info->control.hw_key-> 390 hw_key_idx & TX_CMD_SEC_MSK) << 391 TX_CMD_SEC_SHIFT; 392 393 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); 394 395 D_TX("Configuring packet for WEP encryption " "with key %d\n", 396 info->control.hw_key->hw_key_idx); 397 break; 398 399 default: 400 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher); 401 break; 402 } 403 } 404 405 /* 406 * handle build C_TX command notification. 407 */ 408 static void 409 il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, 410 struct ieee80211_tx_info *info, 411 struct ieee80211_hdr *hdr, u8 std_id) 412 { 413 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; 414 __le32 tx_flags = tx_cmd->tx_flags; 415 __le16 fc = hdr->frame_control; 416 417 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 418 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 419 tx_flags |= TX_CMD_FLG_ACK_MSK; 420 if (ieee80211_is_mgmt(fc)) 421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 422 if (ieee80211_is_probe_resp(fc) && 423 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) 424 tx_flags |= TX_CMD_FLG_TSF_MSK; 425 } else { 426 tx_flags &= (~TX_CMD_FLG_ACK_MSK); 427 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 428 } 429 430 tx_cmd->sta_id = std_id; 431 if (ieee80211_has_morefrags(fc)) 432 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 433 434 if (ieee80211_is_data_qos(fc)) { 435 u8 *qc = ieee80211_get_qos_ctl(hdr); 436 tx_cmd->tid_tspec = qc[0] & 0xf; 437 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 438 } else { 439 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 440 } 441 442 il_tx_cmd_protection(il, info, fc, &tx_flags); 443 444 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 445 if (ieee80211_is_mgmt(fc)) { 446 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); 448 else 449 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); 450 } else { 451 tx_cmd->timeout.pm_frame_timeout = 0; 452 } 453 454 tx_cmd->driver_txop = 0; 455 tx_cmd->tx_flags = tx_flags; 456 tx_cmd->next_frame_len = 0; 457 } 458 459 /* 460 * start C_TX command process 461 */ 462 static int 463 il3945_tx_skb(struct il_priv *il, 464 struct ieee80211_sta *sta, 465 struct sk_buff *skb) 466 { 467 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 469 struct il3945_tx_cmd *tx_cmd; 470 struct il_tx_queue *txq = NULL; 471 struct il_queue *q = NULL; 472 struct il_device_cmd *out_cmd; 473 struct il_cmd_meta *out_meta; 474 dma_addr_t phys_addr; 475 dma_addr_t txcmd_phys; 476 int txq_id = skb_get_queue_mapping(skb); 477 u16 len, idx, hdr_len; 478 u16 firstlen, secondlen; 479 u8 id; 480 u8 unicast; 481 u8 sta_id; 482 u8 tid = 0; 483 __le16 fc; 484 u8 wait_write_ptr = 0; 485 unsigned long flags; 486 487 spin_lock_irqsave(&il->lock, flags); 488 if (il_is_rfkill(il)) { 489 D_DROP("Dropping - RF KILL\n"); 490 goto drop_unlock; 491 } 492 493 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) == 494 IL_INVALID_RATE) { 495 IL_ERR("ERROR: No TX rate available.\n"); 496 goto drop_unlock; 497 } 498 499 unicast = !is_multicast_ether_addr(hdr->addr1); 500 id = 0; 501 502 fc = hdr->frame_control; 503 504 #ifdef CONFIG_IWLEGACY_DEBUG 505 if (ieee80211_is_auth(fc)) 506 D_TX("Sending AUTH frame\n"); 507 else if (ieee80211_is_assoc_req(fc)) 508 D_TX("Sending ASSOC frame\n"); 509 else if (ieee80211_is_reassoc_req(fc)) 510 D_TX("Sending REASSOC frame\n"); 511 #endif 512 513 spin_unlock_irqrestore(&il->lock, flags); 514 515 hdr_len = ieee80211_hdrlen(fc); 516 517 /* Find idx into station table for destination station */ 518 sta_id = il_sta_id_or_broadcast(il, sta); 519 if (sta_id == IL_INVALID_STATION) { 520 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 521 goto drop; 522 } 523 524 D_RATE("station Id %d\n", sta_id); 525 526 if (ieee80211_is_data_qos(fc)) { 527 u8 *qc = ieee80211_get_qos_ctl(hdr); 528 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 529 if (unlikely(tid >= MAX_TID_COUNT)) 530 goto drop; 531 } 532 533 /* Descriptor for chosen Tx queue */ 534 txq = &il->txq[txq_id]; 535 q = &txq->q; 536 537 if ((il_queue_space(q) < q->high_mark)) 538 goto drop; 539 540 spin_lock_irqsave(&il->lock, flags); 541 542 idx = il_get_cmd_idx(q, q->write_ptr, 0); 543 544 txq->skbs[q->write_ptr] = skb; 545 546 /* Init first empty entry in queue's array of Tx/cmd buffers */ 547 out_cmd = txq->cmd[idx]; 548 out_meta = &txq->meta[idx]; 549 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload; 550 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 551 memset(tx_cmd, 0, sizeof(*tx_cmd)); 552 553 /* 554 * Set up the Tx-command (not MAC!) header. 555 * Store the chosen Tx queue and TFD idx within the sequence field; 556 * after Tx, uCode's Tx response will return this value so driver can 557 * locate the frame within the tx queue and do post-tx processing. 558 */ 559 out_cmd->hdr.cmd = C_TX; 560 out_cmd->hdr.sequence = 561 cpu_to_le16((u16) 562 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); 563 564 /* Copy MAC header from skb into command buffer */ 565 memcpy(tx_cmd->hdr, hdr, hdr_len); 566 567 if (info->control.hw_key) 568 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id); 569 570 /* TODO need this for burst mode later on */ 571 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id); 572 573 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); 574 575 /* Total # bytes to be transmitted */ 576 tx_cmd->len = cpu_to_le16((u16) skb->len); 577 578 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 579 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 580 581 /* 582 * Use the first empty entry in this queue's command buffer array 583 * to contain the Tx command and MAC header concatenated together 584 * (payload data will be in another buffer). 585 * Size of this varies, due to varying MAC header length. 586 * If end is not dword aligned, we'll have 2 extra bytes at the end 587 * of the MAC header (device reads on dword boundaries). 588 * We'll tell device about this padding later. 589 */ 590 len = 591 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + 592 hdr_len; 593 firstlen = (len + 3) & ~3; 594 595 /* Physical address of this Tx command's header (not MAC header!), 596 * within command buffer array. */ 597 txcmd_phys = 598 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, 599 PCI_DMA_TODEVICE); 600 if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) 601 goto drop_unlock; 602 603 /* Set up TFD's 2nd entry to point directly to remainder of skb, 604 * if any (802.11 null frames have no payload). */ 605 secondlen = skb->len - hdr_len; 606 if (secondlen > 0) { 607 phys_addr = 608 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, 609 PCI_DMA_TODEVICE); 610 if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) 611 goto drop_unlock; 612 } 613 614 /* Add buffer containing Tx command and MAC(!) header to TFD's 615 * first entry */ 616 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); 617 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 618 dma_unmap_len_set(out_meta, len, firstlen); 619 if (secondlen > 0) 620 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, 621 U32_PAD(secondlen)); 622 623 if (!ieee80211_has_morefrags(hdr->frame_control)) { 624 txq->need_update = 1; 625 } else { 626 wait_write_ptr = 1; 627 txq->need_update = 0; 628 } 629 630 il_update_stats(il, true, fc, skb->len); 631 632 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); 633 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 634 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); 635 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, 636 ieee80211_hdrlen(fc)); 637 638 /* Tell device the write idx *just past* this latest filled TFD */ 639 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 640 il_txq_update_write_ptr(il, txq); 641 spin_unlock_irqrestore(&il->lock, flags); 642 643 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { 644 if (wait_write_ptr) { 645 spin_lock_irqsave(&il->lock, flags); 646 txq->need_update = 1; 647 il_txq_update_write_ptr(il, txq); 648 spin_unlock_irqrestore(&il->lock, flags); 649 } 650 651 il_stop_queue(il, txq); 652 } 653 654 return 0; 655 656 drop_unlock: 657 spin_unlock_irqrestore(&il->lock, flags); 658 drop: 659 return -1; 660 } 661 662 static int 663 il3945_get_measurement(struct il_priv *il, 664 struct ieee80211_measurement_params *params, u8 type) 665 { 666 struct il_spectrum_cmd spectrum; 667 struct il_rx_pkt *pkt; 668 struct il_host_cmd cmd = { 669 .id = C_SPECTRUM_MEASUREMENT, 670 .data = (void *)&spectrum, 671 .flags = CMD_WANT_SKB, 672 }; 673 u32 add_time = le64_to_cpu(params->start_time); 674 int rc; 675 int spectrum_resp_status; 676 int duration = le16_to_cpu(params->duration); 677 678 if (il_is_associated(il)) 679 add_time = 680 il_usecs_to_beacons(il, 681 le64_to_cpu(params->start_time) - 682 il->_3945.last_tsf, 683 le16_to_cpu(il->timing.beacon_interval)); 684 685 memset(&spectrum, 0, sizeof(spectrum)); 686 687 spectrum.channel_count = cpu_to_le16(1); 688 spectrum.flags = 689 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; 690 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; 691 cmd.len = sizeof(spectrum); 692 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 693 694 if (il_is_associated(il)) 695 spectrum.start_time = 696 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time, 697 le16_to_cpu(il->timing.beacon_interval)); 698 else 699 spectrum.start_time = 0; 700 701 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); 702 spectrum.channels[0].channel = params->channel; 703 spectrum.channels[0].type = type; 704 if (il->active.flags & RXON_FLG_BAND_24G_MSK) 705 spectrum.flags |= 706 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 707 RXON_FLG_TGG_PROTECT_MSK; 708 709 rc = il_send_cmd_sync(il, &cmd); 710 if (rc) 711 return rc; 712 713 pkt = (struct il_rx_pkt *)cmd.reply_page; 714 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 715 IL_ERR("Bad return from N_RX_ON_ASSOC command\n"); 716 rc = -EIO; 717 } 718 719 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); 720 switch (spectrum_resp_status) { 721 case 0: /* Command will be handled */ 722 if (pkt->u.spectrum.id != 0xff) { 723 D_INFO("Replaced existing measurement: %d\n", 724 pkt->u.spectrum.id); 725 il->measurement_status &= ~MEASUREMENT_READY; 726 } 727 il->measurement_status |= MEASUREMENT_ACTIVE; 728 rc = 0; 729 break; 730 731 case 1: /* Command will not be handled */ 732 rc = -EAGAIN; 733 break; 734 } 735 736 il_free_pages(il, cmd.reply_page); 737 738 return rc; 739 } 740 741 static void 742 il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) 743 { 744 struct il_rx_pkt *pkt = rxb_addr(rxb); 745 struct il_alive_resp *palive; 746 struct delayed_work *pwork; 747 748 palive = &pkt->u.alive_frame; 749 750 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", 751 palive->is_valid, palive->ver_type, palive->ver_subtype); 752 753 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 754 D_INFO("Initialization Alive received.\n"); 755 memcpy(&il->card_alive_init, &pkt->u.alive_frame, 756 sizeof(struct il_alive_resp)); 757 pwork = &il->init_alive_start; 758 } else { 759 D_INFO("Runtime Alive received.\n"); 760 memcpy(&il->card_alive, &pkt->u.alive_frame, 761 sizeof(struct il_alive_resp)); 762 pwork = &il->alive_start; 763 il3945_disable_events(il); 764 } 765 766 /* We delay the ALIVE response by 5ms to 767 * give the HW RF Kill time to activate... */ 768 if (palive->is_valid == UCODE_VALID_OK) 769 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); 770 else 771 IL_WARN("uCode did not respond OK.\n"); 772 } 773 774 static void 775 il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) 776 { 777 #ifdef CONFIG_IWLEGACY_DEBUG 778 struct il_rx_pkt *pkt = rxb_addr(rxb); 779 #endif 780 781 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); 782 } 783 784 static void 785 il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) 786 { 787 struct il_rx_pkt *pkt = rxb_addr(rxb); 788 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status); 789 #ifdef CONFIG_IWLEGACY_DEBUG 790 u8 rate = beacon->beacon_notify_hdr.rate; 791 792 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", 793 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, 794 beacon->beacon_notify_hdr.failure_frame, 795 le32_to_cpu(beacon->ibss_mgr_status), 796 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); 797 #endif 798 799 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 800 801 } 802 803 /* Handle notification from uCode that card's power state is changing 804 * due to software, hardware, or critical temperature RFKILL */ 805 static void 806 il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) 807 { 808 struct il_rx_pkt *pkt = rxb_addr(rxb); 809 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 810 unsigned long status = il->status; 811 812 IL_WARN("Card state received: HW:%s SW:%s\n", 813 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 814 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 815 816 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 817 818 if (flags & HW_CARD_DISABLED) 819 set_bit(S_RFKILL, &il->status); 820 else 821 clear_bit(S_RFKILL, &il->status); 822 823 il_scan_cancel(il); 824 825 if ((test_bit(S_RFKILL, &status) != 826 test_bit(S_RFKILL, &il->status))) 827 wiphy_rfkill_set_hw_state(il->hw->wiphy, 828 test_bit(S_RFKILL, &il->status)); 829 else 830 wake_up(&il->wait_command_queue); 831 } 832 833 /** 834 * il3945_setup_handlers - Initialize Rx handler callbacks 835 * 836 * Setup the RX handlers for each of the reply types sent from the uCode 837 * to the host. 838 * 839 * This function chains into the hardware specific files for them to setup 840 * any hardware specific handlers as well. 841 */ 842 static void 843 il3945_setup_handlers(struct il_priv *il) 844 { 845 il->handlers[N_ALIVE] = il3945_hdl_alive; 846 il->handlers[C_ADD_STA] = il3945_hdl_add_sta; 847 il->handlers[N_ERROR] = il_hdl_error; 848 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; 849 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; 850 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; 851 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; 852 il->handlers[N_BEACON] = il3945_hdl_beacon; 853 854 /* 855 * The same handler is used for both the REPLY to a discrete 856 * stats request from the host as well as for the periodic 857 * stats notifications (after received beacons) from the uCode. 858 */ 859 il->handlers[C_STATS] = il3945_hdl_c_stats; 860 il->handlers[N_STATS] = il3945_hdl_stats; 861 862 il_setup_rx_scan_handlers(il); 863 il->handlers[N_CARD_STATE] = il3945_hdl_card_state; 864 865 /* Set up hardware specific Rx handlers */ 866 il3945_hw_handler_setup(il); 867 } 868 869 /************************** RX-FUNCTIONS ****************************/ 870 /* 871 * Rx theory of operation 872 * 873 * The host allocates 32 DMA target addresses and passes the host address 874 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is 875 * 0 to 31 876 * 877 * Rx Queue Indexes 878 * The host/firmware share two idx registers for managing the Rx buffers. 879 * 880 * The READ idx maps to the first position that the firmware may be writing 881 * to -- the driver can read up to (but not including) this position and get 882 * good data. 883 * The READ idx is managed by the firmware once the card is enabled. 884 * 885 * The WRITE idx maps to the last position the driver has read from -- the 886 * position preceding WRITE is the last slot the firmware can place a packet. 887 * 888 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 889 * WRITE = READ. 890 * 891 * During initialization, the host sets up the READ queue position to the first 892 * IDX position, and WRITE to the last (READ - 1 wrapped) 893 * 894 * When the firmware places a packet in a buffer, it will advance the READ idx 895 * and fire the RX interrupt. The driver can then query the READ idx and 896 * process as many packets as possible, moving the WRITE idx forward as it 897 * resets the Rx queue buffers with new memory. 898 * 899 * The management in the driver is as follows: 900 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 901 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 902 * to replenish the iwl->rxq->rx_free. 903 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the 904 * iwl->rxq is replenished and the READ IDX is updated (updating the 905 * 'processed' and 'read' driver idxes as well) 906 * + A received packet is processed and handed to the kernel network stack, 907 * detached from the iwl->rxq. The driver 'processed' idx is updated. 908 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 909 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 910 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 911 * were enough free buffers and RX_STALLED is set it is cleared. 912 * 913 * 914 * Driver sequence: 915 * 916 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls 917 * il3945_rx_queue_restock 918 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx 919 * queue, updates firmware pointers, and updates 920 * the WRITE idx. If insufficient rx_free buffers 921 * are available, schedules il3945_rx_replenish 922 * 923 * -- enable interrupts -- 924 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the 925 * READ IDX, detaching the SKB from the pool. 926 * Moves the packet buffer from queue to rx_used. 927 * Calls il3945_rx_queue_restock to refill any empty 928 * slots. 929 * ... 930 * 931 */ 932 933 /** 934 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 935 */ 936 static inline __le32 937 il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) 938 { 939 return cpu_to_le32((u32) dma_addr); 940 } 941 942 /** 943 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool 944 * 945 * If there are slots in the RX queue that need to be restocked, 946 * and we have free pre-allocated buffers, fill the ranks as much 947 * as we can, pulling from rx_free. 948 * 949 * This moves the 'write' idx forward to catch up with 'processed', and 950 * also updates the memory address in the firmware to reference the new 951 * target buffer. 952 */ 953 static void 954 il3945_rx_queue_restock(struct il_priv *il) 955 { 956 struct il_rx_queue *rxq = &il->rxq; 957 struct list_head *element; 958 struct il_rx_buf *rxb; 959 unsigned long flags; 960 int write; 961 962 spin_lock_irqsave(&rxq->lock, flags); 963 write = rxq->write & ~0x7; 964 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { 965 /* Get next free Rx buffer, remove from free list */ 966 element = rxq->rx_free.next; 967 rxb = list_entry(element, struct il_rx_buf, list); 968 list_del(element); 969 970 /* Point to Rx buffer via next RBD in circular buffer */ 971 rxq->bd[rxq->write] = 972 il3945_dma_addr2rbd_ptr(il, rxb->page_dma); 973 rxq->queue[rxq->write] = rxb; 974 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 975 rxq->free_count--; 976 } 977 spin_unlock_irqrestore(&rxq->lock, flags); 978 /* If the pre-allocated buffer pool is dropping low, schedule to 979 * refill it */ 980 if (rxq->free_count <= RX_LOW_WATERMARK) 981 queue_work(il->workqueue, &il->rx_replenish); 982 983 /* If we've added more space for the firmware to place data, tell it. 984 * Increment device's write pointer in multiples of 8. */ 985 if (rxq->write_actual != (rxq->write & ~0x7) || 986 abs(rxq->write - rxq->read) > 7) { 987 spin_lock_irqsave(&rxq->lock, flags); 988 rxq->need_update = 1; 989 spin_unlock_irqrestore(&rxq->lock, flags); 990 il_rx_queue_update_write_ptr(il, rxq); 991 } 992 } 993 994 /** 995 * il3945_rx_replenish - Move all used packet from rx_used to rx_free 996 * 997 * When moving to rx_free an SKB is allocated for the slot. 998 * 999 * Also restock the Rx queue via il3945_rx_queue_restock. 1000 * This is called as a scheduled work item (except for during initialization) 1001 */ 1002 static void 1003 il3945_rx_allocate(struct il_priv *il, gfp_t priority) 1004 { 1005 struct il_rx_queue *rxq = &il->rxq; 1006 struct list_head *element; 1007 struct il_rx_buf *rxb; 1008 struct page *page; 1009 dma_addr_t page_dma; 1010 unsigned long flags; 1011 gfp_t gfp_mask = priority; 1012 1013 while (1) { 1014 spin_lock_irqsave(&rxq->lock, flags); 1015 if (list_empty(&rxq->rx_used)) { 1016 spin_unlock_irqrestore(&rxq->lock, flags); 1017 return; 1018 } 1019 spin_unlock_irqrestore(&rxq->lock, flags); 1020 1021 if (rxq->free_count > RX_LOW_WATERMARK) 1022 gfp_mask |= __GFP_NOWARN; 1023 1024 if (il->hw_params.rx_page_order > 0) 1025 gfp_mask |= __GFP_COMP; 1026 1027 /* Alloc a new receive buffer */ 1028 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); 1029 if (!page) { 1030 if (net_ratelimit()) 1031 D_INFO("Failed to allocate SKB buffer.\n"); 1032 if (rxq->free_count <= RX_LOW_WATERMARK && 1033 net_ratelimit()) 1034 IL_ERR("Failed to allocate SKB buffer with %0x." 1035 "Only %u free buffers remaining.\n", 1036 priority, rxq->free_count); 1037 /* We don't reschedule replenish work here -- we will 1038 * call the restock method and if it still needs 1039 * more buffers it will schedule replenish */ 1040 break; 1041 } 1042 1043 /* Get physical address of RB/SKB */ 1044 page_dma = 1045 pci_map_page(il->pci_dev, page, 0, 1046 PAGE_SIZE << il->hw_params.rx_page_order, 1047 PCI_DMA_FROMDEVICE); 1048 1049 if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { 1050 __free_pages(page, il->hw_params.rx_page_order); 1051 break; 1052 } 1053 1054 spin_lock_irqsave(&rxq->lock, flags); 1055 1056 if (list_empty(&rxq->rx_used)) { 1057 spin_unlock_irqrestore(&rxq->lock, flags); 1058 pci_unmap_page(il->pci_dev, page_dma, 1059 PAGE_SIZE << il->hw_params.rx_page_order, 1060 PCI_DMA_FROMDEVICE); 1061 __free_pages(page, il->hw_params.rx_page_order); 1062 return; 1063 } 1064 1065 element = rxq->rx_used.next; 1066 rxb = list_entry(element, struct il_rx_buf, list); 1067 list_del(element); 1068 1069 rxb->page = page; 1070 rxb->page_dma = page_dma; 1071 list_add_tail(&rxb->list, &rxq->rx_free); 1072 rxq->free_count++; 1073 il->alloc_rxb_page++; 1074 1075 spin_unlock_irqrestore(&rxq->lock, flags); 1076 } 1077 } 1078 1079 void 1080 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) 1081 { 1082 unsigned long flags; 1083 int i; 1084 spin_lock_irqsave(&rxq->lock, flags); 1085 INIT_LIST_HEAD(&rxq->rx_free); 1086 INIT_LIST_HEAD(&rxq->rx_used); 1087 /* Fill the rx_used queue with _all_ of the Rx buffers */ 1088 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1089 /* In the reset function, these buffers may have been allocated 1090 * to an SKB, so we need to unmap and free potential storage */ 1091 if (rxq->pool[i].page != NULL) { 1092 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, 1093 PAGE_SIZE << il->hw_params.rx_page_order, 1094 PCI_DMA_FROMDEVICE); 1095 __il_free_pages(il, rxq->pool[i].page); 1096 rxq->pool[i].page = NULL; 1097 } 1098 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1099 } 1100 1101 /* Set us so that we have processed and used all buffers, but have 1102 * not restocked the Rx queue with fresh buffers */ 1103 rxq->read = rxq->write = 0; 1104 rxq->write_actual = 0; 1105 rxq->free_count = 0; 1106 spin_unlock_irqrestore(&rxq->lock, flags); 1107 } 1108 1109 void 1110 il3945_rx_replenish(void *data) 1111 { 1112 struct il_priv *il = data; 1113 unsigned long flags; 1114 1115 il3945_rx_allocate(il, GFP_KERNEL); 1116 1117 spin_lock_irqsave(&il->lock, flags); 1118 il3945_rx_queue_restock(il); 1119 spin_unlock_irqrestore(&il->lock, flags); 1120 } 1121 1122 static void 1123 il3945_rx_replenish_now(struct il_priv *il) 1124 { 1125 il3945_rx_allocate(il, GFP_ATOMIC); 1126 1127 il3945_rx_queue_restock(il); 1128 } 1129 1130 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 1131 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 1132 * This free routine walks the list of POOL entries and if SKB is set to 1133 * non NULL it is unmapped and freed 1134 */ 1135 static void 1136 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) 1137 { 1138 int i; 1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1140 if (rxq->pool[i].page != NULL) { 1141 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, 1142 PAGE_SIZE << il->hw_params.rx_page_order, 1143 PCI_DMA_FROMDEVICE); 1144 __il_free_pages(il, rxq->pool[i].page); 1145 rxq->pool[i].page = NULL; 1146 } 1147 } 1148 1149 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1150 rxq->bd_dma); 1151 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), 1152 rxq->rb_stts, rxq->rb_stts_dma); 1153 rxq->bd = NULL; 1154 rxq->rb_stts = NULL; 1155 } 1156 1157 /* Convert linear signal-to-noise ratio into dB */ 1158 static u8 ratio2dB[100] = { 1159 /* 0 1 2 3 4 5 6 7 8 9 */ 1160 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ 1161 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ 1162 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ 1163 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ 1164 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ 1165 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ 1166 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ 1167 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ 1168 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ 1169 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ 1170 }; 1171 1172 /* Calculates a relative dB value from a ratio of linear 1173 * (i.e. not dB) signal levels. 1174 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ 1175 int 1176 il3945_calc_db_from_ratio(int sig_ratio) 1177 { 1178 /* 1000:1 or higher just report as 60 dB */ 1179 if (sig_ratio >= 1000) 1180 return 60; 1181 1182 /* 100:1 or higher, divide by 10 and use table, 1183 * add 20 dB to make up for divide by 10 */ 1184 if (sig_ratio >= 100) 1185 return 20 + (int)ratio2dB[sig_ratio / 10]; 1186 1187 /* We shouldn't see this */ 1188 if (sig_ratio < 1) 1189 return 0; 1190 1191 /* Use table for ratios 1:1 - 99:1 */ 1192 return (int)ratio2dB[sig_ratio]; 1193 } 1194 1195 /** 1196 * il3945_rx_handle - Main entry function for receiving responses from uCode 1197 * 1198 * Uses the il->handlers callback function array to invoke 1199 * the appropriate handlers, including command responses, 1200 * frame-received notifications, and other notifications. 1201 */ 1202 static void 1203 il3945_rx_handle(struct il_priv *il) 1204 { 1205 struct il_rx_buf *rxb; 1206 struct il_rx_pkt *pkt; 1207 struct il_rx_queue *rxq = &il->rxq; 1208 u32 r, i; 1209 int reclaim; 1210 unsigned long flags; 1211 u8 fill_rx = 0; 1212 u32 count = 8; 1213 int total_empty = 0; 1214 1215 /* uCode's read idx (stored in shared DRAM) indicates the last Rx 1216 * buffer that the driver may process (last buffer filled by ucode). */ 1217 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 1218 i = rxq->read; 1219 1220 /* calculate total frames need to be restock after handling RX */ 1221 total_empty = r - rxq->write_actual; 1222 if (total_empty < 0) 1223 total_empty += RX_QUEUE_SIZE; 1224 1225 if (total_empty > (RX_QUEUE_SIZE / 2)) 1226 fill_rx = 1; 1227 /* Rx interrupt, but nothing sent from uCode */ 1228 if (i == r) 1229 D_RX("r = %d, i = %d\n", r, i); 1230 1231 while (i != r) { 1232 int len; 1233 1234 rxb = rxq->queue[i]; 1235 1236 /* If an RXB doesn't have a Rx queue slot associated with it, 1237 * then a bug has been introduced in the queue refilling 1238 * routines -- catch it here */ 1239 BUG_ON(rxb == NULL); 1240 1241 rxq->queue[i] = NULL; 1242 1243 pci_unmap_page(il->pci_dev, rxb->page_dma, 1244 PAGE_SIZE << il->hw_params.rx_page_order, 1245 PCI_DMA_FROMDEVICE); 1246 pkt = rxb_addr(rxb); 1247 1248 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; 1249 len += sizeof(u32); /* account for status word */ 1250 1251 reclaim = il_need_reclaim(il, pkt); 1252 1253 /* Based on type of command response or notification, 1254 * handle those that need handling via function in 1255 * handlers table. See il3945_setup_handlers() */ 1256 if (il->handlers[pkt->hdr.cmd]) { 1257 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, 1258 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1259 il->isr_stats.handlers[pkt->hdr.cmd]++; 1260 il->handlers[pkt->hdr.cmd] (il, rxb); 1261 } else { 1262 /* No handling needed */ 1263 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, 1264 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1265 } 1266 1267 /* 1268 * XXX: After here, we should always check rxb->page 1269 * against NULL before touching it or its virtual 1270 * memory (pkt). Because some handler might have 1271 * already taken or freed the pages. 1272 */ 1273 1274 if (reclaim) { 1275 /* Invoke any callbacks, transfer the buffer to caller, 1276 * and fire off the (possibly) blocking il_send_cmd() 1277 * as we reclaim the driver command queue */ 1278 if (rxb->page) 1279 il_tx_cmd_complete(il, rxb); 1280 else 1281 IL_WARN("Claim null rxb?\n"); 1282 } 1283 1284 /* Reuse the page if possible. For notification packets and 1285 * SKBs that fail to Rx correctly, add them back into the 1286 * rx_free list for reuse later. */ 1287 spin_lock_irqsave(&rxq->lock, flags); 1288 if (rxb->page != NULL) { 1289 rxb->page_dma = 1290 pci_map_page(il->pci_dev, rxb->page, 0, 1291 PAGE_SIZE << il->hw_params. 1292 rx_page_order, PCI_DMA_FROMDEVICE); 1293 if (unlikely(pci_dma_mapping_error(il->pci_dev, 1294 rxb->page_dma))) { 1295 __il_free_pages(il, rxb->page); 1296 rxb->page = NULL; 1297 list_add_tail(&rxb->list, &rxq->rx_used); 1298 } else { 1299 list_add_tail(&rxb->list, &rxq->rx_free); 1300 rxq->free_count++; 1301 } 1302 } else 1303 list_add_tail(&rxb->list, &rxq->rx_used); 1304 1305 spin_unlock_irqrestore(&rxq->lock, flags); 1306 1307 i = (i + 1) & RX_QUEUE_MASK; 1308 /* If there are a lot of unused frames, 1309 * restock the Rx queue so ucode won't assert. */ 1310 if (fill_rx) { 1311 count++; 1312 if (count >= 8) { 1313 rxq->read = i; 1314 il3945_rx_replenish_now(il); 1315 count = 0; 1316 } 1317 } 1318 } 1319 1320 /* Backtrack one entry */ 1321 rxq->read = i; 1322 if (fill_rx) 1323 il3945_rx_replenish_now(il); 1324 else 1325 il3945_rx_queue_restock(il); 1326 } 1327 1328 /* call this function to flush any scheduled tasklet */ 1329 static inline void 1330 il3945_synchronize_irq(struct il_priv *il) 1331 { 1332 /* wait to make sure we flush pending tasklet */ 1333 synchronize_irq(il->pci_dev->irq); 1334 tasklet_kill(&il->irq_tasklet); 1335 } 1336 1337 static const char * 1338 il3945_desc_lookup(int i) 1339 { 1340 switch (i) { 1341 case 1: 1342 return "FAIL"; 1343 case 2: 1344 return "BAD_PARAM"; 1345 case 3: 1346 return "BAD_CHECKSUM"; 1347 case 4: 1348 return "NMI_INTERRUPT"; 1349 case 5: 1350 return "SYSASSERT"; 1351 case 6: 1352 return "FATAL_ERROR"; 1353 } 1354 1355 return "UNKNOWN"; 1356 } 1357 1358 #define ERROR_START_OFFSET (1 * sizeof(u32)) 1359 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 1360 1361 void 1362 il3945_dump_nic_error_log(struct il_priv *il) 1363 { 1364 u32 i; 1365 u32 desc, time, count, base, data1; 1366 u32 blink1, blink2, ilink1, ilink2; 1367 1368 base = le32_to_cpu(il->card_alive.error_event_table_ptr); 1369 1370 if (!il3945_hw_valid_rtc_data_addr(base)) { 1371 IL_ERR("Not valid error log pointer 0x%08X\n", base); 1372 return; 1373 } 1374 1375 count = il_read_targ_mem(il, base); 1376 1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1378 IL_ERR("Start IWL Error Log Dump:\n"); 1379 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); 1380 } 1381 1382 IL_ERR("Desc Time asrtPC blink2 " 1383 "ilink1 nmiPC Line\n"); 1384 for (i = ERROR_START_OFFSET; 1385 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1386 i += ERROR_ELEM_SIZE) { 1387 desc = il_read_targ_mem(il, base + i); 1388 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32)); 1389 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32)); 1390 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32)); 1391 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32)); 1392 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32)); 1393 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32)); 1394 1395 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1396 il3945_desc_lookup(desc), desc, time, blink1, blink2, 1397 ilink1, ilink2, data1); 1398 } 1399 } 1400 1401 static void 1402 il3945_irq_tasklet(struct il_priv *il) 1403 { 1404 u32 inta, handled = 0; 1405 u32 inta_fh; 1406 unsigned long flags; 1407 #ifdef CONFIG_IWLEGACY_DEBUG 1408 u32 inta_mask; 1409 #endif 1410 1411 spin_lock_irqsave(&il->lock, flags); 1412 1413 /* Ack/clear/reset pending uCode interrupts. 1414 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1415 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 1416 inta = _il_rd(il, CSR_INT); 1417 _il_wr(il, CSR_INT, inta); 1418 1419 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 1420 * Any new interrupts that happen after this, either while we're 1421 * in this tasklet, or later, will show up in next ISR/tasklet. */ 1422 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 1423 _il_wr(il, CSR_FH_INT_STATUS, inta_fh); 1424 1425 #ifdef CONFIG_IWLEGACY_DEBUG 1426 if (il_get_debug_level(il) & IL_DL_ISR) { 1427 /* just for debug */ 1428 inta_mask = _il_rd(il, CSR_INT_MASK); 1429 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, 1430 inta_mask, inta_fh); 1431 } 1432 #endif 1433 1434 spin_unlock_irqrestore(&il->lock, flags); 1435 1436 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1437 * atomic, make sure that inta covers all the interrupts that 1438 * we've discovered, even if FH interrupt came in just after 1439 * reading CSR_INT. */ 1440 if (inta_fh & CSR39_FH_INT_RX_MASK) 1441 inta |= CSR_INT_BIT_FH_RX; 1442 if (inta_fh & CSR39_FH_INT_TX_MASK) 1443 inta |= CSR_INT_BIT_FH_TX; 1444 1445 /* Now service all interrupt bits discovered above. */ 1446 if (inta & CSR_INT_BIT_HW_ERR) { 1447 IL_ERR("Hardware error detected. Restarting.\n"); 1448 1449 /* Tell the device to stop sending interrupts */ 1450 il_disable_interrupts(il); 1451 1452 il->isr_stats.hw++; 1453 il_irq_handle_error(il); 1454 1455 handled |= CSR_INT_BIT_HW_ERR; 1456 1457 return; 1458 } 1459 #ifdef CONFIG_IWLEGACY_DEBUG 1460 if (il_get_debug_level(il) & (IL_DL_ISR)) { 1461 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1462 if (inta & CSR_INT_BIT_SCD) { 1463 D_ISR("Scheduler finished to transmit " 1464 "the frame/frames.\n"); 1465 il->isr_stats.sch++; 1466 } 1467 1468 /* Alive notification via Rx interrupt will do the real work */ 1469 if (inta & CSR_INT_BIT_ALIVE) { 1470 D_ISR("Alive interrupt\n"); 1471 il->isr_stats.alive++; 1472 } 1473 } 1474 #endif 1475 /* Safely ignore these bits for debug checks below */ 1476 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1477 1478 /* Error detected by uCode */ 1479 if (inta & CSR_INT_BIT_SW_ERR) { 1480 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n", 1481 inta); 1482 il->isr_stats.sw++; 1483 il_irq_handle_error(il); 1484 handled |= CSR_INT_BIT_SW_ERR; 1485 } 1486 1487 /* uCode wakes up after power-down sleep */ 1488 if (inta & CSR_INT_BIT_WAKEUP) { 1489 D_ISR("Wakeup interrupt\n"); 1490 il_rx_queue_update_write_ptr(il, &il->rxq); 1491 1492 spin_lock_irqsave(&il->lock, flags); 1493 il_txq_update_write_ptr(il, &il->txq[0]); 1494 il_txq_update_write_ptr(il, &il->txq[1]); 1495 il_txq_update_write_ptr(il, &il->txq[2]); 1496 il_txq_update_write_ptr(il, &il->txq[3]); 1497 il_txq_update_write_ptr(il, &il->txq[4]); 1498 spin_unlock_irqrestore(&il->lock, flags); 1499 1500 il->isr_stats.wakeup++; 1501 handled |= CSR_INT_BIT_WAKEUP; 1502 } 1503 1504 /* All uCode command responses, including Tx command responses, 1505 * Rx "responses" (frame-received notification), and other 1506 * notifications from uCode come through here*/ 1507 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1508 il3945_rx_handle(il); 1509 il->isr_stats.rx++; 1510 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1511 } 1512 1513 if (inta & CSR_INT_BIT_FH_TX) { 1514 D_ISR("Tx interrupt\n"); 1515 il->isr_stats.tx++; 1516 1517 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6)); 1518 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0); 1519 handled |= CSR_INT_BIT_FH_TX; 1520 } 1521 1522 if (inta & ~handled) { 1523 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 1524 il->isr_stats.unhandled++; 1525 } 1526 1527 if (inta & ~il->inta_mask) { 1528 IL_WARN("Disabled INTA bits 0x%08x were pending\n", 1529 inta & ~il->inta_mask); 1530 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh); 1531 } 1532 1533 /* Re-enable all interrupts */ 1534 /* only Re-enable if disabled by irq */ 1535 if (test_bit(S_INT_ENABLED, &il->status)) 1536 il_enable_interrupts(il); 1537 1538 #ifdef CONFIG_IWLEGACY_DEBUG 1539 if (il_get_debug_level(il) & (IL_DL_ISR)) { 1540 inta = _il_rd(il, CSR_INT); 1541 inta_mask = _il_rd(il, CSR_INT_MASK); 1542 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 1543 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 1544 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1545 } 1546 #endif 1547 } 1548 1549 static int 1550 il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band, 1551 u8 is_active, u8 n_probes, 1552 struct il3945_scan_channel *scan_ch, 1553 struct ieee80211_vif *vif) 1554 { 1555 struct ieee80211_channel *chan; 1556 const struct ieee80211_supported_band *sband; 1557 const struct il_channel_info *ch_info; 1558 u16 passive_dwell = 0; 1559 u16 active_dwell = 0; 1560 int added, i; 1561 1562 sband = il_get_hw_mode(il, band); 1563 if (!sband) 1564 return 0; 1565 1566 active_dwell = il_get_active_dwell_time(il, band, n_probes); 1567 passive_dwell = il_get_passive_dwell_time(il, band, vif); 1568 1569 if (passive_dwell <= active_dwell) 1570 passive_dwell = active_dwell + 1; 1571 1572 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { 1573 chan = il->scan_request->channels[i]; 1574 1575 if (chan->band != band) 1576 continue; 1577 1578 scan_ch->channel = chan->hw_value; 1579 1580 ch_info = il_get_channel_info(il, band, scan_ch->channel); 1581 if (!il_is_channel_valid(ch_info)) { 1582 D_SCAN("Channel %d is INVALID for this band.\n", 1583 scan_ch->channel); 1584 continue; 1585 } 1586 1587 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1588 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1589 /* If passive , set up for auto-switch 1590 * and use long active_dwell time. 1591 */ 1592 if (!is_active || il_is_channel_passive(ch_info) || 1593 (chan->flags & IEEE80211_CHAN_NO_IR)) { 1594 scan_ch->type = 0; /* passive */ 1595 if (IL_UCODE_API(il->ucode_ver) == 1) 1596 scan_ch->active_dwell = 1597 cpu_to_le16(passive_dwell - 1); 1598 } else { 1599 scan_ch->type = 1; /* active */ 1600 } 1601 1602 /* Set direct probe bits. These may be used both for active 1603 * scan channels (probes gets sent right away), 1604 * or for passive channels (probes get se sent only after 1605 * hearing clear Rx packet).*/ 1606 if (IL_UCODE_API(il->ucode_ver) >= 2) { 1607 if (n_probes) 1608 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); 1609 } else { 1610 /* uCode v1 does not allow setting direct probe bits on 1611 * passive channel. */ 1612 if ((scan_ch->type & 1) && n_probes) 1613 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); 1614 } 1615 1616 /* Set txpower levels to defaults */ 1617 scan_ch->tpc.dsp_atten = 110; 1618 /* scan_pwr_info->tpc.dsp_atten; */ 1619 1620 /*scan_pwr_info->tpc.tx_gain; */ 1621 if (band == NL80211_BAND_5GHZ) 1622 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 1623 else { 1624 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 1625 /* NOTE: if we were doing 6Mb OFDM for scans we'd use 1626 * power level: 1627 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; 1628 */ 1629 } 1630 1631 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel, 1632 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", 1633 (scan_ch->type & 1) ? active_dwell : passive_dwell); 1634 1635 scan_ch++; 1636 added++; 1637 } 1638 1639 D_SCAN("total channels to scan %d\n", added); 1640 return added; 1641 } 1642 1643 static void 1644 il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) 1645 { 1646 int i; 1647 1648 for (i = 0; i < RATE_COUNT_LEGACY; i++) { 1649 rates[i].bitrate = il3945_rates[i].ieee * 5; 1650 rates[i].hw_value = i; /* Rate scaling will work on idxes */ 1651 rates[i].hw_value_short = i; 1652 rates[i].flags = 0; 1653 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) { 1654 /* 1655 * If CCK != 1M then set short preamble rate flag. 1656 */ 1657 rates[i].flags |= 1658 (il3945_rates[i].plcp == 1659 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; 1660 } 1661 } 1662 } 1663 1664 /****************************************************************************** 1665 * 1666 * uCode download functions 1667 * 1668 ******************************************************************************/ 1669 1670 static void 1671 il3945_dealloc_ucode_pci(struct il_priv *il) 1672 { 1673 il_free_fw_desc(il->pci_dev, &il->ucode_code); 1674 il_free_fw_desc(il->pci_dev, &il->ucode_data); 1675 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); 1676 il_free_fw_desc(il->pci_dev, &il->ucode_init); 1677 il_free_fw_desc(il->pci_dev, &il->ucode_init_data); 1678 il_free_fw_desc(il->pci_dev, &il->ucode_boot); 1679 } 1680 1681 /** 1682 * il3945_verify_inst_full - verify runtime uCode image in card vs. host, 1683 * looking at all data. 1684 */ 1685 static int 1686 il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) 1687 { 1688 u32 val; 1689 u32 save_len = len; 1690 int rc = 0; 1691 u32 errcnt; 1692 1693 D_INFO("ucode inst image size is %u\n", len); 1694 1695 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND); 1696 1697 errcnt = 0; 1698 for (; len > 0; len -= sizeof(u32), image++) { 1699 /* read data comes through single port, auto-incr addr */ 1700 /* NOTE: Use the debugless read so we don't flood kernel log 1701 * if IL_DL_IO is set */ 1702 val = _il_rd(il, HBUS_TARG_MEM_RDAT); 1703 if (val != le32_to_cpu(*image)) { 1704 IL_ERR("uCode INST section is invalid at " 1705 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1706 save_len - len, val, le32_to_cpu(*image)); 1707 rc = -EIO; 1708 errcnt++; 1709 if (errcnt >= 20) 1710 break; 1711 } 1712 } 1713 1714 if (!errcnt) 1715 D_INFO("ucode image in INSTRUCTION memory is good\n"); 1716 1717 return rc; 1718 } 1719 1720 /** 1721 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host, 1722 * using sample data 100 bytes apart. If these sample points are good, 1723 * it's a pretty good bet that everything between them is good, too. 1724 */ 1725 static int 1726 il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) 1727 { 1728 u32 val; 1729 int rc = 0; 1730 u32 errcnt = 0; 1731 u32 i; 1732 1733 D_INFO("ucode inst image size is %u\n", len); 1734 1735 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { 1736 /* read data comes through single port, auto-incr addr */ 1737 /* NOTE: Use the debugless read so we don't flood kernel log 1738 * if IL_DL_IO is set */ 1739 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND); 1740 val = _il_rd(il, HBUS_TARG_MEM_RDAT); 1741 if (val != le32_to_cpu(*image)) { 1742 #if 0 /* Enable this if you want to see details */ 1743 IL_ERR("uCode INST section is invalid at " 1744 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val, 1745 *image); 1746 #endif 1747 rc = -EIO; 1748 errcnt++; 1749 if (errcnt >= 3) 1750 break; 1751 } 1752 } 1753 1754 return rc; 1755 } 1756 1757 /** 1758 * il3945_verify_ucode - determine which instruction image is in SRAM, 1759 * and verify its contents 1760 */ 1761 static int 1762 il3945_verify_ucode(struct il_priv *il) 1763 { 1764 __le32 *image; 1765 u32 len; 1766 int rc = 0; 1767 1768 /* Try bootstrap */ 1769 image = (__le32 *) il->ucode_boot.v_addr; 1770 len = il->ucode_boot.len; 1771 rc = il3945_verify_inst_sparse(il, image, len); 1772 if (rc == 0) { 1773 D_INFO("Bootstrap uCode is good in inst SRAM\n"); 1774 return 0; 1775 } 1776 1777 /* Try initialize */ 1778 image = (__le32 *) il->ucode_init.v_addr; 1779 len = il->ucode_init.len; 1780 rc = il3945_verify_inst_sparse(il, image, len); 1781 if (rc == 0) { 1782 D_INFO("Initialize uCode is good in inst SRAM\n"); 1783 return 0; 1784 } 1785 1786 /* Try runtime/protocol */ 1787 image = (__le32 *) il->ucode_code.v_addr; 1788 len = il->ucode_code.len; 1789 rc = il3945_verify_inst_sparse(il, image, len); 1790 if (rc == 0) { 1791 D_INFO("Runtime uCode is good in inst SRAM\n"); 1792 return 0; 1793 } 1794 1795 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 1796 1797 /* Since nothing seems to match, show first several data entries in 1798 * instruction SRAM, so maybe visual inspection will give a clue. 1799 * Selection of bootstrap image (vs. other images) is arbitrary. */ 1800 image = (__le32 *) il->ucode_boot.v_addr; 1801 len = il->ucode_boot.len; 1802 rc = il3945_verify_inst_full(il, image, len); 1803 1804 return rc; 1805 } 1806 1807 static void 1808 il3945_nic_start(struct il_priv *il) 1809 { 1810 /* Remove all resets to allow NIC to operate */ 1811 _il_wr(il, CSR_RESET, 0); 1812 } 1813 1814 #define IL3945_UCODE_GET(item) \ 1815 static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\ 1816 { \ 1817 return le32_to_cpu(ucode->v1.item); \ 1818 } 1819 1820 static u32 1821 il3945_ucode_get_header_size(u32 api_ver) 1822 { 1823 return 24; 1824 } 1825 1826 static u8 * 1827 il3945_ucode_get_data(const struct il_ucode_header *ucode) 1828 { 1829 return (u8 *) ucode->v1.data; 1830 } 1831 1832 IL3945_UCODE_GET(inst_size); 1833 IL3945_UCODE_GET(data_size); 1834 IL3945_UCODE_GET(init_size); 1835 IL3945_UCODE_GET(init_data_size); 1836 IL3945_UCODE_GET(boot_size); 1837 1838 /** 1839 * il3945_read_ucode - Read uCode images from disk file. 1840 * 1841 * Copy into buffers for card to fetch via bus-mastering 1842 */ 1843 static int 1844 il3945_read_ucode(struct il_priv *il) 1845 { 1846 const struct il_ucode_header *ucode; 1847 int ret = -EINVAL, idx; 1848 const struct firmware *ucode_raw; 1849 /* firmware file name contains uCode/driver compatibility version */ 1850 const char *name_pre = il->cfg->fw_name_pre; 1851 const unsigned int api_max = il->cfg->ucode_api_max; 1852 const unsigned int api_min = il->cfg->ucode_api_min; 1853 char buf[25]; 1854 u8 *src; 1855 size_t len; 1856 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; 1857 1858 /* Ask kernel firmware_class module to get the boot firmware off disk. 1859 * request_firmware() is synchronous, file is in memory on return. */ 1860 for (idx = api_max; idx >= api_min; idx--) { 1861 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); 1862 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); 1863 if (ret < 0) { 1864 IL_ERR("%s firmware file req failed: %d\n", buf, ret); 1865 if (ret == -ENOENT) 1866 continue; 1867 else 1868 goto error; 1869 } else { 1870 if (idx < api_max) 1871 IL_ERR("Loaded firmware %s, " 1872 "which is deprecated. " 1873 " Please use API v%u instead.\n", buf, 1874 api_max); 1875 D_INFO("Got firmware '%s' file " 1876 "(%zd bytes) from disk\n", buf, ucode_raw->size); 1877 break; 1878 } 1879 } 1880 1881 if (ret < 0) 1882 goto error; 1883 1884 /* Make sure that we got at least our header! */ 1885 if (ucode_raw->size < il3945_ucode_get_header_size(1)) { 1886 IL_ERR("File size way too small!\n"); 1887 ret = -EINVAL; 1888 goto err_release; 1889 } 1890 1891 /* Data from ucode file: header followed by uCode images */ 1892 ucode = (struct il_ucode_header *)ucode_raw->data; 1893 1894 il->ucode_ver = le32_to_cpu(ucode->ver); 1895 api_ver = IL_UCODE_API(il->ucode_ver); 1896 inst_size = il3945_ucode_get_inst_size(ucode); 1897 data_size = il3945_ucode_get_data_size(ucode); 1898 init_size = il3945_ucode_get_init_size(ucode); 1899 init_data_size = il3945_ucode_get_init_data_size(ucode); 1900 boot_size = il3945_ucode_get_boot_size(ucode); 1901 src = il3945_ucode_get_data(ucode); 1902 1903 /* api_ver should match the api version forming part of the 1904 * firmware filename ... but we don't check for that and only rely 1905 * on the API version read from firmware header from here on forward */ 1906 1907 if (api_ver < api_min || api_ver > api_max) { 1908 IL_ERR("Driver unable to support your firmware API. " 1909 "Driver supports v%u, firmware is v%u.\n", api_max, 1910 api_ver); 1911 il->ucode_ver = 0; 1912 ret = -EINVAL; 1913 goto err_release; 1914 } 1915 if (api_ver != api_max) 1916 IL_ERR("Firmware has old API version. Expected %u, " 1917 "got %u. New firmware can be obtained " 1918 "from http://www.intellinuxwireless.org.\n", api_max, 1919 api_ver); 1920 1921 IL_INFO("loaded firmware version %u.%u.%u.%u\n", 1922 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), 1923 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); 1924 1925 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), 1926 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), 1927 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), 1928 IL_UCODE_SERIAL(il->ucode_ver)); 1929 1930 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); 1931 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size); 1932 D_INFO("f/w package hdr runtime data size = %u\n", data_size); 1933 D_INFO("f/w package hdr init inst size = %u\n", init_size); 1934 D_INFO("f/w package hdr init data size = %u\n", init_data_size); 1935 D_INFO("f/w package hdr boot inst size = %u\n", boot_size); 1936 1937 /* Verify size of file vs. image size info in file's header */ 1938 if (ucode_raw->size != 1939 il3945_ucode_get_header_size(api_ver) + inst_size + data_size + 1940 init_size + init_data_size + boot_size) { 1941 1942 D_INFO("uCode file size %zd does not match expected size\n", 1943 ucode_raw->size); 1944 ret = -EINVAL; 1945 goto err_release; 1946 } 1947 1948 /* Verify that uCode images will fit in card's SRAM */ 1949 if (inst_size > IL39_MAX_INST_SIZE) { 1950 D_INFO("uCode instr len %d too large to fit in\n", inst_size); 1951 ret = -EINVAL; 1952 goto err_release; 1953 } 1954 1955 if (data_size > IL39_MAX_DATA_SIZE) { 1956 D_INFO("uCode data len %d too large to fit in\n", data_size); 1957 ret = -EINVAL; 1958 goto err_release; 1959 } 1960 if (init_size > IL39_MAX_INST_SIZE) { 1961 D_INFO("uCode init instr len %d too large to fit in\n", 1962 init_size); 1963 ret = -EINVAL; 1964 goto err_release; 1965 } 1966 if (init_data_size > IL39_MAX_DATA_SIZE) { 1967 D_INFO("uCode init data len %d too large to fit in\n", 1968 init_data_size); 1969 ret = -EINVAL; 1970 goto err_release; 1971 } 1972 if (boot_size > IL39_MAX_BSM_SIZE) { 1973 D_INFO("uCode boot instr len %d too large to fit in\n", 1974 boot_size); 1975 ret = -EINVAL; 1976 goto err_release; 1977 } 1978 1979 /* Allocate ucode buffers for card's bus-master loading ... */ 1980 1981 /* Runtime instructions and 2 copies of data: 1982 * 1) unmodified from disk 1983 * 2) backup cache for save/restore during power-downs */ 1984 il->ucode_code.len = inst_size; 1985 il_alloc_fw_desc(il->pci_dev, &il->ucode_code); 1986 1987 il->ucode_data.len = data_size; 1988 il_alloc_fw_desc(il->pci_dev, &il->ucode_data); 1989 1990 il->ucode_data_backup.len = data_size; 1991 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); 1992 1993 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || 1994 !il->ucode_data_backup.v_addr) 1995 goto err_pci_alloc; 1996 1997 /* Initialization instructions and data */ 1998 if (init_size && init_data_size) { 1999 il->ucode_init.len = init_size; 2000 il_alloc_fw_desc(il->pci_dev, &il->ucode_init); 2001 2002 il->ucode_init_data.len = init_data_size; 2003 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); 2004 2005 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) 2006 goto err_pci_alloc; 2007 } 2008 2009 /* Bootstrap (instructions only, no data) */ 2010 if (boot_size) { 2011 il->ucode_boot.len = boot_size; 2012 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); 2013 2014 if (!il->ucode_boot.v_addr) 2015 goto err_pci_alloc; 2016 } 2017 2018 /* Copy images into buffers for card's bus-master reads ... */ 2019 2020 /* Runtime instructions (first block of data in file) */ 2021 len = inst_size; 2022 D_INFO("Copying (but not loading) uCode instr len %zd\n", len); 2023 memcpy(il->ucode_code.v_addr, src, len); 2024 src += len; 2025 2026 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 2027 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); 2028 2029 /* Runtime data (2nd block) 2030 * NOTE: Copy into backup buffer will be done in il3945_up() */ 2031 len = data_size; 2032 D_INFO("Copying (but not loading) uCode data len %zd\n", len); 2033 memcpy(il->ucode_data.v_addr, src, len); 2034 memcpy(il->ucode_data_backup.v_addr, src, len); 2035 src += len; 2036 2037 /* Initialization instructions (3rd block) */ 2038 if (init_size) { 2039 len = init_size; 2040 D_INFO("Copying (but not loading) init instr len %zd\n", len); 2041 memcpy(il->ucode_init.v_addr, src, len); 2042 src += len; 2043 } 2044 2045 /* Initialization data (4th block) */ 2046 if (init_data_size) { 2047 len = init_data_size; 2048 D_INFO("Copying (but not loading) init data len %zd\n", len); 2049 memcpy(il->ucode_init_data.v_addr, src, len); 2050 src += len; 2051 } 2052 2053 /* Bootstrap instructions (5th block) */ 2054 len = boot_size; 2055 D_INFO("Copying (but not loading) boot instr len %zd\n", len); 2056 memcpy(il->ucode_boot.v_addr, src, len); 2057 2058 /* We have our copies now, allow OS release its copies */ 2059 release_firmware(ucode_raw); 2060 return 0; 2061 2062 err_pci_alloc: 2063 IL_ERR("failed to allocate pci memory\n"); 2064 ret = -ENOMEM; 2065 il3945_dealloc_ucode_pci(il); 2066 2067 err_release: 2068 release_firmware(ucode_raw); 2069 2070 error: 2071 return ret; 2072 } 2073 2074 /** 2075 * il3945_set_ucode_ptrs - Set uCode address location 2076 * 2077 * Tell initialization uCode where to find runtime uCode. 2078 * 2079 * BSM registers initially contain pointers to initialization uCode. 2080 * We need to replace them to load runtime uCode inst and data, 2081 * and to save runtime data when powering down. 2082 */ 2083 static int 2084 il3945_set_ucode_ptrs(struct il_priv *il) 2085 { 2086 dma_addr_t pinst; 2087 dma_addr_t pdata; 2088 2089 /* bits 31:0 for 3945 */ 2090 pinst = il->ucode_code.p_addr; 2091 pdata = il->ucode_data_backup.p_addr; 2092 2093 /* Tell bootstrap uCode where to find image to load */ 2094 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); 2095 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); 2096 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); 2097 2098 /* Inst byte count must be last to set up, bit 31 signals uCode 2099 * that all new ptr/size info is in place */ 2100 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, 2101 il->ucode_code.len | BSM_DRAM_INST_LOAD); 2102 2103 D_INFO("Runtime uCode pointers are set.\n"); 2104 2105 return 0; 2106 } 2107 2108 /** 2109 * il3945_init_alive_start - Called after N_ALIVE notification received 2110 * 2111 * Called after N_ALIVE notification received from "initialize" uCode. 2112 * 2113 * Tell "initialize" uCode to go ahead and load the runtime uCode. 2114 */ 2115 static void 2116 il3945_init_alive_start(struct il_priv *il) 2117 { 2118 /* Check alive response for "valid" sign from uCode */ 2119 if (il->card_alive_init.is_valid != UCODE_VALID_OK) { 2120 /* We had an error bringing up the hardware, so take it 2121 * all the way back down so we can try again */ 2122 D_INFO("Initialize Alive failed.\n"); 2123 goto restart; 2124 } 2125 2126 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 2127 * This is a paranoid check, because we would not have gotten the 2128 * "initialize" alive if code weren't properly loaded. */ 2129 if (il3945_verify_ucode(il)) { 2130 /* Runtime instruction load was bad; 2131 * take it all the way back down so we can try again */ 2132 D_INFO("Bad \"initialize\" uCode load.\n"); 2133 goto restart; 2134 } 2135 2136 /* Send pointers to protocol/runtime uCode image ... init code will 2137 * load and launch runtime uCode, which will send us another "Alive" 2138 * notification. */ 2139 D_INFO("Initialization Alive received.\n"); 2140 if (il3945_set_ucode_ptrs(il)) { 2141 /* Runtime instruction load won't happen; 2142 * take it all the way back down so we can try again */ 2143 D_INFO("Couldn't set up uCode pointers.\n"); 2144 goto restart; 2145 } 2146 return; 2147 2148 restart: 2149 queue_work(il->workqueue, &il->restart); 2150 } 2151 2152 /** 2153 * il3945_alive_start - called after N_ALIVE notification received 2154 * from protocol/runtime uCode (initialization uCode's 2155 * Alive gets handled by il3945_init_alive_start()). 2156 */ 2157 static void 2158 il3945_alive_start(struct il_priv *il) 2159 { 2160 int thermal_spin = 0; 2161 u32 rfkill; 2162 2163 D_INFO("Runtime Alive received.\n"); 2164 2165 if (il->card_alive.is_valid != UCODE_VALID_OK) { 2166 /* We had an error bringing up the hardware, so take it 2167 * all the way back down so we can try again */ 2168 D_INFO("Alive failed.\n"); 2169 goto restart; 2170 } 2171 2172 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2173 * This is a paranoid check, because we would not have gotten the 2174 * "runtime" alive if code weren't properly loaded. */ 2175 if (il3945_verify_ucode(il)) { 2176 /* Runtime instruction load was bad; 2177 * take it all the way back down so we can try again */ 2178 D_INFO("Bad runtime uCode load.\n"); 2179 goto restart; 2180 } 2181 2182 rfkill = il_rd_prph(il, APMG_RFKILL_REG); 2183 D_INFO("RFKILL status: 0x%x\n", rfkill); 2184 2185 if (rfkill & 0x1) { 2186 clear_bit(S_RFKILL, &il->status); 2187 /* if RFKILL is not on, then wait for thermal 2188 * sensor in adapter to kick in */ 2189 while (il3945_hw_get_temperature(il) == 0) { 2190 thermal_spin++; 2191 udelay(10); 2192 } 2193 2194 if (thermal_spin) 2195 D_INFO("Thermal calibration took %dus\n", 2196 thermal_spin * 10); 2197 } else 2198 set_bit(S_RFKILL, &il->status); 2199 2200 /* After the ALIVE response, we can send commands to 3945 uCode */ 2201 set_bit(S_ALIVE, &il->status); 2202 2203 /* Enable watchdog to monitor the driver tx queues */ 2204 il_setup_watchdog(il); 2205 2206 if (il_is_rfkill(il)) 2207 return; 2208 2209 ieee80211_wake_queues(il->hw); 2210 2211 il->active_rate = RATES_MASK_3945; 2212 2213 il_power_update_mode(il, true); 2214 2215 if (il_is_associated(il)) { 2216 struct il3945_rxon_cmd *active_rxon = 2217 (struct il3945_rxon_cmd *)(&il->active); 2218 2219 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2220 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2221 } else { 2222 /* Initialize our rx_config data */ 2223 il_connection_init_rx_config(il); 2224 } 2225 2226 /* Configure Bluetooth device coexistence support */ 2227 il_send_bt_config(il); 2228 2229 set_bit(S_READY, &il->status); 2230 2231 /* Configure the adapter for unassociated operation */ 2232 il3945_commit_rxon(il); 2233 2234 il3945_reg_txpower_periodic(il); 2235 2236 D_INFO("ALIVE processing complete.\n"); 2237 wake_up(&il->wait_command_queue); 2238 2239 return; 2240 2241 restart: 2242 queue_work(il->workqueue, &il->restart); 2243 } 2244 2245 static void il3945_cancel_deferred_work(struct il_priv *il); 2246 2247 static void 2248 __il3945_down(struct il_priv *il) 2249 { 2250 unsigned long flags; 2251 int exit_pending; 2252 2253 D_INFO(DRV_NAME " is going down\n"); 2254 2255 il_scan_cancel_timeout(il, 200); 2256 2257 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); 2258 2259 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 2260 * to prevent rearm timer */ 2261 del_timer_sync(&il->watchdog); 2262 2263 /* Station information will now be cleared in device */ 2264 il_clear_ucode_stations(il); 2265 il_dealloc_bcast_stations(il); 2266 il_clear_driver_stations(il); 2267 2268 /* Unblock any waiting calls */ 2269 wake_up_all(&il->wait_command_queue); 2270 2271 /* Wipe out the EXIT_PENDING status bit if we are not actually 2272 * exiting the module */ 2273 if (!exit_pending) 2274 clear_bit(S_EXIT_PENDING, &il->status); 2275 2276 /* stop and reset the on-board processor */ 2277 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 2278 2279 /* tell the device to stop sending interrupts */ 2280 spin_lock_irqsave(&il->lock, flags); 2281 il_disable_interrupts(il); 2282 spin_unlock_irqrestore(&il->lock, flags); 2283 il3945_synchronize_irq(il); 2284 2285 if (il->mac80211_registered) 2286 ieee80211_stop_queues(il->hw); 2287 2288 /* If we have not previously called il3945_init() then 2289 * clear all bits but the RF Kill bits and return */ 2290 if (!il_is_init(il)) { 2291 il->status = 2292 test_bit(S_RFKILL, &il->status) << S_RFKILL | 2293 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | 2294 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; 2295 goto exit; 2296 } 2297 2298 /* ...otherwise clear out all the status bits but the RF Kill 2299 * bit and continue taking the NIC down. */ 2300 il->status &= 2301 test_bit(S_RFKILL, &il->status) << S_RFKILL | 2302 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | 2303 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR | 2304 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; 2305 2306 /* 2307 * We disabled and synchronized interrupt, and priv->mutex is taken, so 2308 * here is the only thread which will program device registers, but 2309 * still have lockdep assertions, so we are taking reg_lock. 2310 */ 2311 spin_lock_irq(&il->reg_lock); 2312 /* FIXME: il_grab_nic_access if rfkill is off ? */ 2313 2314 il3945_hw_txq_ctx_stop(il); 2315 il3945_hw_rxq_stop(il); 2316 /* Power-down device's busmaster DMA clocks */ 2317 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2318 udelay(5); 2319 /* Stop the device, and put it in low power state */ 2320 _il_apm_stop(il); 2321 2322 spin_unlock_irq(&il->reg_lock); 2323 2324 il3945_hw_txq_ctx_free(il); 2325 exit: 2326 memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); 2327 2328 if (il->beacon_skb) 2329 dev_kfree_skb(il->beacon_skb); 2330 il->beacon_skb = NULL; 2331 2332 /* clear out any free frames */ 2333 il3945_clear_free_frames(il); 2334 } 2335 2336 static void 2337 il3945_down(struct il_priv *il) 2338 { 2339 mutex_lock(&il->mutex); 2340 __il3945_down(il); 2341 mutex_unlock(&il->mutex); 2342 2343 il3945_cancel_deferred_work(il); 2344 } 2345 2346 #define MAX_HW_RESTARTS 5 2347 2348 static int 2349 il3945_alloc_bcast_station(struct il_priv *il) 2350 { 2351 unsigned long flags; 2352 u8 sta_id; 2353 2354 spin_lock_irqsave(&il->sta_lock, flags); 2355 sta_id = il_prep_station(il, il_bcast_addr, false, NULL); 2356 if (sta_id == IL_INVALID_STATION) { 2357 IL_ERR("Unable to prepare broadcast station\n"); 2358 spin_unlock_irqrestore(&il->sta_lock, flags); 2359 2360 return -EINVAL; 2361 } 2362 2363 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; 2364 il->stations[sta_id].used |= IL_STA_BCAST; 2365 spin_unlock_irqrestore(&il->sta_lock, flags); 2366 2367 return 0; 2368 } 2369 2370 static int 2371 __il3945_up(struct il_priv *il) 2372 { 2373 int rc, i; 2374 2375 rc = il3945_alloc_bcast_station(il); 2376 if (rc) 2377 return rc; 2378 2379 if (test_bit(S_EXIT_PENDING, &il->status)) { 2380 IL_WARN("Exit pending; will not bring the NIC up\n"); 2381 return -EIO; 2382 } 2383 2384 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { 2385 IL_ERR("ucode not available for device bring up\n"); 2386 return -EIO; 2387 } 2388 2389 /* If platform's RF_KILL switch is NOT set to KILL */ 2390 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2391 clear_bit(S_RFKILL, &il->status); 2392 else { 2393 set_bit(S_RFKILL, &il->status); 2394 return -ERFKILL; 2395 } 2396 2397 _il_wr(il, CSR_INT, 0xFFFFFFFF); 2398 2399 rc = il3945_hw_nic_init(il); 2400 if (rc) { 2401 IL_ERR("Unable to int nic\n"); 2402 return rc; 2403 } 2404 2405 /* make sure rfkill handshake bits are cleared */ 2406 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2407 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2408 2409 /* clear (again), then enable host interrupts */ 2410 _il_wr(il, CSR_INT, 0xFFFFFFFF); 2411 il_enable_interrupts(il); 2412 2413 /* really make sure rfkill handshake bits are cleared */ 2414 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2415 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2416 2417 /* Copy original ucode data image from disk into backup cache. 2418 * This will be used to initialize the on-board processor's 2419 * data SRAM for a clean start when the runtime program first loads. */ 2420 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, 2421 il->ucode_data.len); 2422 2423 /* We return success when we resume from suspend and rf_kill is on. */ 2424 if (test_bit(S_RFKILL, &il->status)) 2425 return 0; 2426 2427 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2428 2429 /* load bootstrap state machine, 2430 * load bootstrap program into processor's memory, 2431 * prepare to load the "initialize" uCode */ 2432 rc = il->ops->load_ucode(il); 2433 2434 if (rc) { 2435 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc); 2436 continue; 2437 } 2438 2439 /* start card; "initialize" will load runtime ucode */ 2440 il3945_nic_start(il); 2441 2442 D_INFO(DRV_NAME " is coming up\n"); 2443 2444 return 0; 2445 } 2446 2447 set_bit(S_EXIT_PENDING, &il->status); 2448 __il3945_down(il); 2449 clear_bit(S_EXIT_PENDING, &il->status); 2450 2451 /* tried to restart and config the device for as long as our 2452 * patience could withstand */ 2453 IL_ERR("Unable to initialize device after %d attempts.\n", i); 2454 return -EIO; 2455 } 2456 2457 /***************************************************************************** 2458 * 2459 * Workqueue callbacks 2460 * 2461 *****************************************************************************/ 2462 2463 static void 2464 il3945_bg_init_alive_start(struct work_struct *data) 2465 { 2466 struct il_priv *il = 2467 container_of(data, struct il_priv, init_alive_start.work); 2468 2469 mutex_lock(&il->mutex); 2470 if (test_bit(S_EXIT_PENDING, &il->status)) 2471 goto out; 2472 2473 il3945_init_alive_start(il); 2474 out: 2475 mutex_unlock(&il->mutex); 2476 } 2477 2478 static void 2479 il3945_bg_alive_start(struct work_struct *data) 2480 { 2481 struct il_priv *il = 2482 container_of(data, struct il_priv, alive_start.work); 2483 2484 mutex_lock(&il->mutex); 2485 if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL) 2486 goto out; 2487 2488 il3945_alive_start(il); 2489 out: 2490 mutex_unlock(&il->mutex); 2491 } 2492 2493 /* 2494 * 3945 cannot interrupt driver when hardware rf kill switch toggles; 2495 * driver must poll CSR_GP_CNTRL_REG register for change. This register 2496 * *is* readable even when device has been SW_RESET into low power mode 2497 * (e.g. during RF KILL). 2498 */ 2499 static void 2500 il3945_rfkill_poll(struct work_struct *data) 2501 { 2502 struct il_priv *il = 2503 container_of(data, struct il_priv, _3945.rfkill_poll.work); 2504 bool old_rfkill = test_bit(S_RFKILL, &il->status); 2505 bool new_rfkill = 2506 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 2507 2508 if (new_rfkill != old_rfkill) { 2509 if (new_rfkill) 2510 set_bit(S_RFKILL, &il->status); 2511 else 2512 clear_bit(S_RFKILL, &il->status); 2513 2514 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill); 2515 2516 D_RF_KILL("RF_KILL bit toggled to %s.\n", 2517 new_rfkill ? "disable radio" : "enable radio"); 2518 } 2519 2520 /* Keep this running, even if radio now enabled. This will be 2521 * cancelled in mac_start() if system decides to start again */ 2522 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2523 round_jiffies_relative(2 * HZ)); 2524 2525 } 2526 2527 int 2528 il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) 2529 { 2530 struct il_host_cmd cmd = { 2531 .id = C_SCAN, 2532 .len = sizeof(struct il3945_scan_cmd), 2533 .flags = CMD_SIZE_HUGE, 2534 }; 2535 struct il3945_scan_cmd *scan; 2536 u8 n_probes = 0; 2537 enum nl80211_band band; 2538 bool is_active = false; 2539 int ret; 2540 u16 len; 2541 2542 lockdep_assert_held(&il->mutex); 2543 2544 if (!il->scan_cmd) { 2545 il->scan_cmd = 2546 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE, 2547 GFP_KERNEL); 2548 if (!il->scan_cmd) { 2549 D_SCAN("Fail to allocate scan memory\n"); 2550 return -ENOMEM; 2551 } 2552 } 2553 scan = il->scan_cmd; 2554 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE); 2555 2556 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; 2557 scan->quiet_time = IL_ACTIVE_QUIET_TIME; 2558 2559 if (il_is_associated(il)) { 2560 u16 interval; 2561 u32 extra; 2562 u32 suspend_time = 100; 2563 u32 scan_suspend_time = 100; 2564 2565 D_INFO("Scanning while associated...\n"); 2566 2567 interval = vif->bss_conf.beacon_int; 2568 2569 scan->suspend_time = 0; 2570 scan->max_out_time = cpu_to_le32(200 * 1024); 2571 if (!interval) 2572 interval = suspend_time; 2573 /* 2574 * suspend time format: 2575 * 0-19: beacon interval in usec (time before exec.) 2576 * 20-23: 0 2577 * 24-31: number of beacons (suspend between channels) 2578 */ 2579 2580 extra = (suspend_time / interval) << 24; 2581 scan_suspend_time = 2582 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024)); 2583 2584 scan->suspend_time = cpu_to_le32(scan_suspend_time); 2585 D_SCAN("suspend_time 0x%X beacon interval %d\n", 2586 scan_suspend_time, interval); 2587 } 2588 2589 if (il->scan_request->n_ssids) { 2590 int i, p = 0; 2591 D_SCAN("Kicking off active scan\n"); 2592 for (i = 0; i < il->scan_request->n_ssids; i++) { 2593 /* always does wildcard anyway */ 2594 if (!il->scan_request->ssids[i].ssid_len) 2595 continue; 2596 scan->direct_scan[p].id = WLAN_EID_SSID; 2597 scan->direct_scan[p].len = 2598 il->scan_request->ssids[i].ssid_len; 2599 memcpy(scan->direct_scan[p].ssid, 2600 il->scan_request->ssids[i].ssid, 2601 il->scan_request->ssids[i].ssid_len); 2602 n_probes++; 2603 p++; 2604 } 2605 is_active = true; 2606 } else 2607 D_SCAN("Kicking off passive scan.\n"); 2608 2609 /* We don't build a direct scan probe request; the uCode will do 2610 * that based on the direct_mask added to each channel entry */ 2611 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 2612 scan->tx_cmd.sta_id = il->hw_params.bcast_id; 2613 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2614 2615 /* flags + rate selection */ 2616 2617 switch (il->scan_band) { 2618 case NL80211_BAND_2GHZ: 2619 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2620 scan->tx_cmd.rate = RATE_1M_PLCP; 2621 band = NL80211_BAND_2GHZ; 2622 break; 2623 case NL80211_BAND_5GHZ: 2624 scan->tx_cmd.rate = RATE_6M_PLCP; 2625 band = NL80211_BAND_5GHZ; 2626 break; 2627 default: 2628 IL_WARN("Invalid scan band\n"); 2629 return -EIO; 2630 } 2631 2632 /* 2633 * If active scaning is requested but a certain channel is marked 2634 * passive, we can do active scanning if we detect transmissions. For 2635 * passive only scanning disable switching to active on any channel. 2636 */ 2637 scan->good_CRC_th = 2638 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; 2639 2640 len = 2641 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, 2642 vif->addr, il->scan_request->ie, 2643 il->scan_request->ie_len, 2644 IL_MAX_SCAN_SIZE - sizeof(*scan)); 2645 scan->tx_cmd.len = cpu_to_le16(len); 2646 2647 /* select Rx antennas */ 2648 scan->flags |= il3945_get_antenna_flags(il); 2649 2650 scan->channel_count = 2651 il3945_get_channels_for_scan(il, band, is_active, n_probes, 2652 (void *)&scan->data[len], vif); 2653 if (scan->channel_count == 0) { 2654 D_SCAN("channel count %d\n", scan->channel_count); 2655 return -EIO; 2656 } 2657 2658 cmd.len += 2659 le16_to_cpu(scan->tx_cmd.len) + 2660 scan->channel_count * sizeof(struct il3945_scan_channel); 2661 cmd.data = scan; 2662 scan->len = cpu_to_le16(cmd.len); 2663 2664 set_bit(S_SCAN_HW, &il->status); 2665 ret = il_send_cmd_sync(il, &cmd); 2666 if (ret) 2667 clear_bit(S_SCAN_HW, &il->status); 2668 return ret; 2669 } 2670 2671 void 2672 il3945_post_scan(struct il_priv *il) 2673 { 2674 /* 2675 * Since setting the RXON may have been deferred while 2676 * performing the scan, fire one off if needed 2677 */ 2678 if (memcmp(&il->staging, &il->active, sizeof(il->staging))) 2679 il3945_commit_rxon(il); 2680 } 2681 2682 static void 2683 il3945_bg_restart(struct work_struct *data) 2684 { 2685 struct il_priv *il = container_of(data, struct il_priv, restart); 2686 2687 if (test_bit(S_EXIT_PENDING, &il->status)) 2688 return; 2689 2690 if (test_and_clear_bit(S_FW_ERROR, &il->status)) { 2691 mutex_lock(&il->mutex); 2692 il->is_open = 0; 2693 mutex_unlock(&il->mutex); 2694 il3945_down(il); 2695 ieee80211_restart_hw(il->hw); 2696 } else { 2697 il3945_down(il); 2698 2699 mutex_lock(&il->mutex); 2700 if (test_bit(S_EXIT_PENDING, &il->status)) { 2701 mutex_unlock(&il->mutex); 2702 return; 2703 } 2704 2705 __il3945_up(il); 2706 mutex_unlock(&il->mutex); 2707 } 2708 } 2709 2710 static void 2711 il3945_bg_rx_replenish(struct work_struct *data) 2712 { 2713 struct il_priv *il = container_of(data, struct il_priv, rx_replenish); 2714 2715 mutex_lock(&il->mutex); 2716 if (test_bit(S_EXIT_PENDING, &il->status)) 2717 goto out; 2718 2719 il3945_rx_replenish(il); 2720 out: 2721 mutex_unlock(&il->mutex); 2722 } 2723 2724 void 2725 il3945_post_associate(struct il_priv *il) 2726 { 2727 int rc = 0; 2728 struct ieee80211_conf *conf = NULL; 2729 2730 if (!il->vif || !il->is_open) 2731 return; 2732 2733 D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid, 2734 il->active.bssid_addr); 2735 2736 if (test_bit(S_EXIT_PENDING, &il->status)) 2737 return; 2738 2739 il_scan_cancel_timeout(il, 200); 2740 2741 conf = &il->hw->conf; 2742 2743 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2744 il3945_commit_rxon(il); 2745 2746 rc = il_send_rxon_timing(il); 2747 if (rc) 2748 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); 2749 2750 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2751 2752 il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid); 2753 2754 D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid, 2755 il->vif->bss_conf.beacon_int); 2756 2757 if (il->vif->bss_conf.use_short_preamble) 2758 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2759 else 2760 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 2761 2762 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { 2763 if (il->vif->bss_conf.use_short_slot) 2764 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 2765 else 2766 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2767 } 2768 2769 il3945_commit_rxon(il); 2770 2771 switch (il->vif->type) { 2772 case NL80211_IFTYPE_STATION: 2773 il3945_rate_scale_init(il->hw, IL_AP_ID); 2774 break; 2775 case NL80211_IFTYPE_ADHOC: 2776 il3945_send_beacon_cmd(il); 2777 break; 2778 default: 2779 IL_ERR("%s Should not be called in %d mode\n", __func__, 2780 il->vif->type); 2781 break; 2782 } 2783 } 2784 2785 /***************************************************************************** 2786 * 2787 * mac80211 entry point functions 2788 * 2789 *****************************************************************************/ 2790 2791 #define UCODE_READY_TIMEOUT (2 * HZ) 2792 2793 static int 2794 il3945_mac_start(struct ieee80211_hw *hw) 2795 { 2796 struct il_priv *il = hw->priv; 2797 int ret; 2798 2799 /* we should be verifying the device is ready to be opened */ 2800 mutex_lock(&il->mutex); 2801 D_MAC80211("enter\n"); 2802 2803 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 2804 * ucode filename and max sizes are card-specific. */ 2805 2806 if (!il->ucode_code.len) { 2807 ret = il3945_read_ucode(il); 2808 if (ret) { 2809 IL_ERR("Could not read microcode: %d\n", ret); 2810 mutex_unlock(&il->mutex); 2811 goto out_release_irq; 2812 } 2813 } 2814 2815 ret = __il3945_up(il); 2816 2817 mutex_unlock(&il->mutex); 2818 2819 if (ret) 2820 goto out_release_irq; 2821 2822 D_INFO("Start UP work.\n"); 2823 2824 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 2825 * mac80211 will not be run successfully. */ 2826 ret = wait_event_timeout(il->wait_command_queue, 2827 test_bit(S_READY, &il->status), 2828 UCODE_READY_TIMEOUT); 2829 if (!ret) { 2830 if (!test_bit(S_READY, &il->status)) { 2831 IL_ERR("Wait for START_ALIVE timeout after %dms.\n", 2832 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 2833 ret = -ETIMEDOUT; 2834 goto out_release_irq; 2835 } 2836 } 2837 2838 /* ucode is running and will send rfkill notifications, 2839 * no need to poll the killswitch state anymore */ 2840 cancel_delayed_work(&il->_3945.rfkill_poll); 2841 2842 il->is_open = 1; 2843 D_MAC80211("leave\n"); 2844 return 0; 2845 2846 out_release_irq: 2847 il->is_open = 0; 2848 D_MAC80211("leave - failed\n"); 2849 return ret; 2850 } 2851 2852 static void 2853 il3945_mac_stop(struct ieee80211_hw *hw) 2854 { 2855 struct il_priv *il = hw->priv; 2856 2857 D_MAC80211("enter\n"); 2858 2859 if (!il->is_open) { 2860 D_MAC80211("leave - skip\n"); 2861 return; 2862 } 2863 2864 il->is_open = 0; 2865 2866 il3945_down(il); 2867 2868 flush_workqueue(il->workqueue); 2869 2870 /* start polling the killswitch state again */ 2871 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2872 round_jiffies_relative(2 * HZ)); 2873 2874 D_MAC80211("leave\n"); 2875 } 2876 2877 static void 2878 il3945_mac_tx(struct ieee80211_hw *hw, 2879 struct ieee80211_tx_control *control, 2880 struct sk_buff *skb) 2881 { 2882 struct il_priv *il = hw->priv; 2883 2884 D_MAC80211("enter\n"); 2885 2886 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2887 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2888 2889 if (il3945_tx_skb(il, control->sta, skb)) 2890 dev_kfree_skb_any(skb); 2891 2892 D_MAC80211("leave\n"); 2893 } 2894 2895 void 2896 il3945_config_ap(struct il_priv *il) 2897 { 2898 struct ieee80211_vif *vif = il->vif; 2899 int rc = 0; 2900 2901 if (test_bit(S_EXIT_PENDING, &il->status)) 2902 return; 2903 2904 /* The following should be done only at AP bring up */ 2905 if (!(il_is_associated(il))) { 2906 2907 /* RXON - unassoc (to set timing command) */ 2908 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2909 il3945_commit_rxon(il); 2910 2911 /* RXON Timing */ 2912 rc = il_send_rxon_timing(il); 2913 if (rc) 2914 IL_WARN("C_RXON_TIMING failed - " 2915 "Attempting to continue.\n"); 2916 2917 il->staging.assoc_id = 0; 2918 2919 if (vif->bss_conf.use_short_preamble) 2920 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2921 else 2922 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 2923 2924 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { 2925 if (vif->bss_conf.use_short_slot) 2926 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 2927 else 2928 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2929 } 2930 /* restore RXON assoc */ 2931 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2932 il3945_commit_rxon(il); 2933 } 2934 il3945_send_beacon_cmd(il); 2935 } 2936 2937 static int 2938 il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2939 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 2940 struct ieee80211_key_conf *key) 2941 { 2942 struct il_priv *il = hw->priv; 2943 int ret = 0; 2944 u8 sta_id = IL_INVALID_STATION; 2945 u8 static_key; 2946 2947 D_MAC80211("enter\n"); 2948 2949 if (il3945_mod_params.sw_crypto) { 2950 D_MAC80211("leave - hwcrypto disabled\n"); 2951 return -EOPNOTSUPP; 2952 } 2953 2954 /* 2955 * To support IBSS RSN, don't program group keys in IBSS, the 2956 * hardware will then not attempt to decrypt the frames. 2957 */ 2958 if (vif->type == NL80211_IFTYPE_ADHOC && 2959 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 2960 D_MAC80211("leave - IBSS RSN\n"); 2961 return -EOPNOTSUPP; 2962 } 2963 2964 static_key = !il_is_associated(il); 2965 2966 if (!static_key) { 2967 sta_id = il_sta_id_or_broadcast(il, sta); 2968 if (sta_id == IL_INVALID_STATION) { 2969 D_MAC80211("leave - station not found\n"); 2970 return -EINVAL; 2971 } 2972 } 2973 2974 mutex_lock(&il->mutex); 2975 il_scan_cancel_timeout(il, 100); 2976 2977 switch (cmd) { 2978 case SET_KEY: 2979 if (static_key) 2980 ret = il3945_set_static_key(il, key); 2981 else 2982 ret = il3945_set_dynamic_key(il, key, sta_id); 2983 D_MAC80211("enable hwcrypto key\n"); 2984 break; 2985 case DISABLE_KEY: 2986 if (static_key) 2987 ret = il3945_remove_static_key(il); 2988 else 2989 ret = il3945_clear_sta_key_info(il, sta_id); 2990 D_MAC80211("disable hwcrypto key\n"); 2991 break; 2992 default: 2993 ret = -EINVAL; 2994 } 2995 2996 D_MAC80211("leave ret %d\n", ret); 2997 mutex_unlock(&il->mutex); 2998 2999 return ret; 3000 } 3001 3002 static int 3003 il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3004 struct ieee80211_sta *sta) 3005 { 3006 struct il_priv *il = hw->priv; 3007 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv; 3008 int ret; 3009 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 3010 u8 sta_id; 3011 3012 mutex_lock(&il->mutex); 3013 D_INFO("station %pM\n", sta->addr); 3014 sta_priv->common.sta_id = IL_INVALID_STATION; 3015 3016 ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id); 3017 if (ret) { 3018 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); 3019 /* Should we return success if return code is EEXIST ? */ 3020 mutex_unlock(&il->mutex); 3021 return ret; 3022 } 3023 3024 sta_priv->common.sta_id = sta_id; 3025 3026 /* Initialize rate scaling */ 3027 D_INFO("Initializing rate scaling for station %pM\n", sta->addr); 3028 il3945_rs_rate_init(il, sta, sta_id); 3029 mutex_unlock(&il->mutex); 3030 3031 return 0; 3032 } 3033 3034 static void 3035 il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, 3036 unsigned int *total_flags, u64 multicast) 3037 { 3038 struct il_priv *il = hw->priv; 3039 __le32 filter_or = 0, filter_nand = 0; 3040 3041 #define CHK(test, flag) do { \ 3042 if (*total_flags & (test)) \ 3043 filter_or |= (flag); \ 3044 else \ 3045 filter_nand |= (flag); \ 3046 } while (0) 3047 3048 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, 3049 *total_flags); 3050 3051 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK); 3052 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 3053 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 3054 3055 #undef CHK 3056 3057 mutex_lock(&il->mutex); 3058 3059 il->staging.filter_flags &= ~filter_nand; 3060 il->staging.filter_flags |= filter_or; 3061 3062 /* 3063 * Not committing directly because hardware can perform a scan, 3064 * but even if hw is ready, committing here breaks for some reason, 3065 * we'll eventually commit the filter flags change anyway. 3066 */ 3067 3068 mutex_unlock(&il->mutex); 3069 3070 /* 3071 * Receiving all multicast frames is always enabled by the 3072 * default flags setup in il_connection_init_rx_config() 3073 * since we currently do not support programming multicast 3074 * filters into the device. 3075 */ 3076 *total_flags &= 3077 FIF_OTHER_BSS | FIF_ALLMULTI | 3078 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3079 } 3080 3081 /***************************************************************************** 3082 * 3083 * sysfs attributes 3084 * 3085 *****************************************************************************/ 3086 3087 #ifdef CONFIG_IWLEGACY_DEBUG 3088 3089 /* 3090 * The following adds a new attribute to the sysfs representation 3091 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) 3092 * used for controlling the debug level. 3093 * 3094 * See the level definitions in iwl for details. 3095 * 3096 * The debug_level being managed using sysfs below is a per device debug 3097 * level that is used instead of the global debug level if it (the per 3098 * device debug level) is set. 3099 */ 3100 static ssize_t 3101 il3945_show_debug_level(struct device *d, struct device_attribute *attr, 3102 char *buf) 3103 { 3104 struct il_priv *il = dev_get_drvdata(d); 3105 return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); 3106 } 3107 3108 static ssize_t 3109 il3945_store_debug_level(struct device *d, struct device_attribute *attr, 3110 const char *buf, size_t count) 3111 { 3112 struct il_priv *il = dev_get_drvdata(d); 3113 unsigned long val; 3114 int ret; 3115 3116 ret = kstrtoul(buf, 0, &val); 3117 if (ret) 3118 IL_INFO("%s is not in hex or decimal form.\n", buf); 3119 else 3120 il->debug_level = val; 3121 3122 return strnlen(buf, count); 3123 } 3124 3125 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level, 3126 il3945_store_debug_level); 3127 3128 #endif /* CONFIG_IWLEGACY_DEBUG */ 3129 3130 static ssize_t 3131 il3945_show_temperature(struct device *d, struct device_attribute *attr, 3132 char *buf) 3133 { 3134 struct il_priv *il = dev_get_drvdata(d); 3135 3136 if (!il_is_alive(il)) 3137 return -EAGAIN; 3138 3139 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); 3140 } 3141 3142 static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL); 3143 3144 static ssize_t 3145 il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) 3146 { 3147 struct il_priv *il = dev_get_drvdata(d); 3148 return sprintf(buf, "%d\n", il->tx_power_user_lmt); 3149 } 3150 3151 static ssize_t 3152 il3945_store_tx_power(struct device *d, struct device_attribute *attr, 3153 const char *buf, size_t count) 3154 { 3155 struct il_priv *il = dev_get_drvdata(d); 3156 char *p = (char *)buf; 3157 u32 val; 3158 3159 val = simple_strtoul(p, &p, 10); 3160 if (p == buf) 3161 IL_INFO(": %s is not in decimal form.\n", buf); 3162 else 3163 il3945_hw_reg_set_txpower(il, val); 3164 3165 return count; 3166 } 3167 3168 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power, 3169 il3945_store_tx_power); 3170 3171 static ssize_t 3172 il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) 3173 { 3174 struct il_priv *il = dev_get_drvdata(d); 3175 3176 return sprintf(buf, "0x%04X\n", il->active.flags); 3177 } 3178 3179 static ssize_t 3180 il3945_store_flags(struct device *d, struct device_attribute *attr, 3181 const char *buf, size_t count) 3182 { 3183 struct il_priv *il = dev_get_drvdata(d); 3184 u32 flags = simple_strtoul(buf, NULL, 0); 3185 3186 mutex_lock(&il->mutex); 3187 if (le32_to_cpu(il->staging.flags) != flags) { 3188 /* Cancel any currently running scans... */ 3189 if (il_scan_cancel_timeout(il, 100)) 3190 IL_WARN("Could not cancel scan.\n"); 3191 else { 3192 D_INFO("Committing rxon.flags = 0x%04X\n", flags); 3193 il->staging.flags = cpu_to_le32(flags); 3194 il3945_commit_rxon(il); 3195 } 3196 } 3197 mutex_unlock(&il->mutex); 3198 3199 return count; 3200 } 3201 3202 static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags, 3203 il3945_store_flags); 3204 3205 static ssize_t 3206 il3945_show_filter_flags(struct device *d, struct device_attribute *attr, 3207 char *buf) 3208 { 3209 struct il_priv *il = dev_get_drvdata(d); 3210 3211 return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags)); 3212 } 3213 3214 static ssize_t 3215 il3945_store_filter_flags(struct device *d, struct device_attribute *attr, 3216 const char *buf, size_t count) 3217 { 3218 struct il_priv *il = dev_get_drvdata(d); 3219 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3220 3221 mutex_lock(&il->mutex); 3222 if (le32_to_cpu(il->staging.filter_flags) != filter_flags) { 3223 /* Cancel any currently running scans... */ 3224 if (il_scan_cancel_timeout(il, 100)) 3225 IL_WARN("Could not cancel scan.\n"); 3226 else { 3227 D_INFO("Committing rxon.filter_flags = " "0x%04X\n", 3228 filter_flags); 3229 il->staging.filter_flags = cpu_to_le32(filter_flags); 3230 il3945_commit_rxon(il); 3231 } 3232 } 3233 mutex_unlock(&il->mutex); 3234 3235 return count; 3236 } 3237 3238 static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags, 3239 il3945_store_filter_flags); 3240 3241 static ssize_t 3242 il3945_show_measurement(struct device *d, struct device_attribute *attr, 3243 char *buf) 3244 { 3245 struct il_priv *il = dev_get_drvdata(d); 3246 struct il_spectrum_notification measure_report; 3247 u32 size = sizeof(measure_report), len = 0, ofs = 0; 3248 u8 *data = (u8 *) &measure_report; 3249 unsigned long flags; 3250 3251 spin_lock_irqsave(&il->lock, flags); 3252 if (!(il->measurement_status & MEASUREMENT_READY)) { 3253 spin_unlock_irqrestore(&il->lock, flags); 3254 return 0; 3255 } 3256 memcpy(&measure_report, &il->measure_report, size); 3257 il->measurement_status = 0; 3258 spin_unlock_irqrestore(&il->lock, flags); 3259 3260 while (size && PAGE_SIZE - len) { 3261 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, 3262 PAGE_SIZE - len, true); 3263 len = strlen(buf); 3264 if (PAGE_SIZE - len) 3265 buf[len++] = '\n'; 3266 3267 ofs += 16; 3268 size -= min(size, 16U); 3269 } 3270 3271 return len; 3272 } 3273 3274 static ssize_t 3275 il3945_store_measurement(struct device *d, struct device_attribute *attr, 3276 const char *buf, size_t count) 3277 { 3278 struct il_priv *il = dev_get_drvdata(d); 3279 struct ieee80211_measurement_params params = { 3280 .channel = le16_to_cpu(il->active.channel), 3281 .start_time = cpu_to_le64(il->_3945.last_tsf), 3282 .duration = cpu_to_le16(1), 3283 }; 3284 u8 type = IL_MEASURE_BASIC; 3285 u8 buffer[32]; 3286 u8 channel; 3287 3288 if (count) { 3289 char *p = buffer; 3290 strlcpy(buffer, buf, sizeof(buffer)); 3291 channel = simple_strtoul(p, NULL, 0); 3292 if (channel) 3293 params.channel = channel; 3294 3295 p = buffer; 3296 while (*p && *p != ' ') 3297 p++; 3298 if (*p) 3299 type = simple_strtoul(p + 1, NULL, 0); 3300 } 3301 3302 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n", 3303 type, params.channel, buf); 3304 il3945_get_measurement(il, ¶ms, type); 3305 3306 return count; 3307 } 3308 3309 static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement, 3310 il3945_store_measurement); 3311 3312 static ssize_t 3313 il3945_store_retry_rate(struct device *d, struct device_attribute *attr, 3314 const char *buf, size_t count) 3315 { 3316 struct il_priv *il = dev_get_drvdata(d); 3317 3318 il->retry_rate = simple_strtoul(buf, NULL, 0); 3319 if (il->retry_rate <= 0) 3320 il->retry_rate = 1; 3321 3322 return count; 3323 } 3324 3325 static ssize_t 3326 il3945_show_retry_rate(struct device *d, struct device_attribute *attr, 3327 char *buf) 3328 { 3329 struct il_priv *il = dev_get_drvdata(d); 3330 return sprintf(buf, "%d", il->retry_rate); 3331 } 3332 3333 static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate, 3334 il3945_store_retry_rate); 3335 3336 static ssize_t 3337 il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) 3338 { 3339 /* all this shit doesn't belong into sysfs anyway */ 3340 return 0; 3341 } 3342 3343 static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL); 3344 3345 static ssize_t 3346 il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) 3347 { 3348 struct il_priv *il = dev_get_drvdata(d); 3349 3350 if (!il_is_alive(il)) 3351 return -EAGAIN; 3352 3353 return sprintf(buf, "%d\n", il3945_mod_params.antenna); 3354 } 3355 3356 static ssize_t 3357 il3945_store_antenna(struct device *d, struct device_attribute *attr, 3358 const char *buf, size_t count) 3359 { 3360 struct il_priv *il __maybe_unused = dev_get_drvdata(d); 3361 int ant; 3362 3363 if (count == 0) 3364 return 0; 3365 3366 if (sscanf(buf, "%1i", &ant) != 1) { 3367 D_INFO("not in hex or decimal form.\n"); 3368 return count; 3369 } 3370 3371 if (ant >= 0 && ant <= 2) { 3372 D_INFO("Setting antenna select to %d.\n", ant); 3373 il3945_mod_params.antenna = (enum il3945_antenna)ant; 3374 } else 3375 D_INFO("Bad antenna select value %d.\n", ant); 3376 3377 return count; 3378 } 3379 3380 static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna, 3381 il3945_store_antenna); 3382 3383 static ssize_t 3384 il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) 3385 { 3386 struct il_priv *il = dev_get_drvdata(d); 3387 if (!il_is_alive(il)) 3388 return -EAGAIN; 3389 return sprintf(buf, "0x%08x\n", (int)il->status); 3390 } 3391 3392 static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL); 3393 3394 static ssize_t 3395 il3945_dump_error_log(struct device *d, struct device_attribute *attr, 3396 const char *buf, size_t count) 3397 { 3398 struct il_priv *il = dev_get_drvdata(d); 3399 char *p = (char *)buf; 3400 3401 if (p[0] == '1') 3402 il3945_dump_nic_error_log(il); 3403 3404 return strnlen(buf, count); 3405 } 3406 3407 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log); 3408 3409 /***************************************************************************** 3410 * 3411 * driver setup and tear down 3412 * 3413 *****************************************************************************/ 3414 3415 static void 3416 il3945_setup_deferred_work(struct il_priv *il) 3417 { 3418 il->workqueue = create_singlethread_workqueue(DRV_NAME); 3419 3420 init_waitqueue_head(&il->wait_command_queue); 3421 3422 INIT_WORK(&il->restart, il3945_bg_restart); 3423 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish); 3424 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start); 3425 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start); 3426 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll); 3427 3428 il_setup_scan_deferred_work(il); 3429 3430 il3945_hw_setup_deferred_work(il); 3431 3432 setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il); 3433 3434 tasklet_init(&il->irq_tasklet, 3435 (void (*)(unsigned long))il3945_irq_tasklet, 3436 (unsigned long)il); 3437 } 3438 3439 static void 3440 il3945_cancel_deferred_work(struct il_priv *il) 3441 { 3442 il3945_hw_cancel_deferred_work(il); 3443 3444 cancel_delayed_work_sync(&il->init_alive_start); 3445 cancel_delayed_work(&il->alive_start); 3446 3447 il_cancel_scan_deferred_work(il); 3448 } 3449 3450 static struct attribute *il3945_sysfs_entries[] = { 3451 &dev_attr_antenna.attr, 3452 &dev_attr_channels.attr, 3453 &dev_attr_dump_errors.attr, 3454 &dev_attr_flags.attr, 3455 &dev_attr_filter_flags.attr, 3456 &dev_attr_measurement.attr, 3457 &dev_attr_retry_rate.attr, 3458 &dev_attr_status.attr, 3459 &dev_attr_temperature.attr, 3460 &dev_attr_tx_power.attr, 3461 #ifdef CONFIG_IWLEGACY_DEBUG 3462 &dev_attr_debug_level.attr, 3463 #endif 3464 NULL 3465 }; 3466 3467 static struct attribute_group il3945_attribute_group = { 3468 .name = NULL, /* put in device directory */ 3469 .attrs = il3945_sysfs_entries, 3470 }; 3471 3472 static struct ieee80211_ops il3945_mac_ops __ro_after_init = { 3473 .tx = il3945_mac_tx, 3474 .start = il3945_mac_start, 3475 .stop = il3945_mac_stop, 3476 .add_interface = il_mac_add_interface, 3477 .remove_interface = il_mac_remove_interface, 3478 .change_interface = il_mac_change_interface, 3479 .config = il_mac_config, 3480 .configure_filter = il3945_configure_filter, 3481 .set_key = il3945_mac_set_key, 3482 .conf_tx = il_mac_conf_tx, 3483 .reset_tsf = il_mac_reset_tsf, 3484 .bss_info_changed = il_mac_bss_info_changed, 3485 .hw_scan = il_mac_hw_scan, 3486 .sta_add = il3945_mac_sta_add, 3487 .sta_remove = il_mac_sta_remove, 3488 .tx_last_beacon = il_mac_tx_last_beacon, 3489 .flush = il_mac_flush, 3490 }; 3491 3492 static int 3493 il3945_init_drv(struct il_priv *il) 3494 { 3495 int ret; 3496 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; 3497 3498 il->retry_rate = 1; 3499 il->beacon_skb = NULL; 3500 3501 spin_lock_init(&il->sta_lock); 3502 spin_lock_init(&il->hcmd_lock); 3503 3504 INIT_LIST_HEAD(&il->free_frames); 3505 3506 mutex_init(&il->mutex); 3507 3508 il->ieee_channels = NULL; 3509 il->ieee_rates = NULL; 3510 il->band = NL80211_BAND_2GHZ; 3511 3512 il->iw_mode = NL80211_IFTYPE_STATION; 3513 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; 3514 3515 /* initialize force reset */ 3516 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; 3517 3518 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3519 IL_WARN("Unsupported EEPROM version: 0x%04X\n", 3520 eeprom->version); 3521 ret = -EINVAL; 3522 goto err; 3523 } 3524 ret = il_init_channel_map(il); 3525 if (ret) { 3526 IL_ERR("initializing regulatory failed: %d\n", ret); 3527 goto err; 3528 } 3529 3530 /* Set up txpower settings in driver for all channels */ 3531 if (il3945_txpower_set_from_eeprom(il)) { 3532 ret = -EIO; 3533 goto err_free_channel_map; 3534 } 3535 3536 ret = il_init_geos(il); 3537 if (ret) { 3538 IL_ERR("initializing geos failed: %d\n", ret); 3539 goto err_free_channel_map; 3540 } 3541 il3945_init_hw_rates(il, il->ieee_rates); 3542 3543 return 0; 3544 3545 err_free_channel_map: 3546 il_free_channel_map(il); 3547 err: 3548 return ret; 3549 } 3550 3551 #define IL3945_MAX_PROBE_REQUEST 200 3552 3553 static int 3554 il3945_setup_mac(struct il_priv *il) 3555 { 3556 int ret; 3557 struct ieee80211_hw *hw = il->hw; 3558 3559 hw->rate_control_algorithm = "iwl-3945-rs"; 3560 hw->sta_data_size = sizeof(struct il3945_sta_priv); 3561 hw->vif_data_size = sizeof(struct il_vif_priv); 3562 3563 /* Tell mac80211 our characteristics */ 3564 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 3565 ieee80211_hw_set(hw, SUPPORTS_PS); 3566 ieee80211_hw_set(hw, SIGNAL_DBM); 3567 ieee80211_hw_set(hw, SPECTRUM_MGMT); 3568 3569 hw->wiphy->interface_modes = 3570 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 3571 3572 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 3573 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 3574 REGULATORY_DISABLE_BEACON_HINTS; 3575 3576 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 3577 3578 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3579 /* we create the 802.11 header and a zero-length SSID element */ 3580 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; 3581 3582 /* Default value; 4 EDCA QOS priorities */ 3583 hw->queues = 4; 3584 3585 if (il->bands[NL80211_BAND_2GHZ].n_channels) 3586 il->hw->wiphy->bands[NL80211_BAND_2GHZ] = 3587 &il->bands[NL80211_BAND_2GHZ]; 3588 3589 if (il->bands[NL80211_BAND_5GHZ].n_channels) 3590 il->hw->wiphy->bands[NL80211_BAND_5GHZ] = 3591 &il->bands[NL80211_BAND_5GHZ]; 3592 3593 il_leds_init(il); 3594 3595 ret = ieee80211_register_hw(il->hw); 3596 if (ret) { 3597 IL_ERR("Failed to register hw (error %d)\n", ret); 3598 return ret; 3599 } 3600 il->mac80211_registered = 1; 3601 3602 return 0; 3603 } 3604 3605 static int 3606 il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3607 { 3608 int err = 0; 3609 struct il_priv *il; 3610 struct ieee80211_hw *hw; 3611 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); 3612 struct il3945_eeprom *eeprom; 3613 unsigned long flags; 3614 3615 /*********************** 3616 * 1. Allocating HW data 3617 * ********************/ 3618 3619 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops); 3620 if (!hw) { 3621 err = -ENOMEM; 3622 goto out; 3623 } 3624 il = hw->priv; 3625 il->hw = hw; 3626 SET_IEEE80211_DEV(hw, &pdev->dev); 3627 3628 il->cmd_queue = IL39_CMD_QUEUE_NUM; 3629 3630 D_INFO("*** LOAD DRIVER ***\n"); 3631 il->cfg = cfg; 3632 il->ops = &il3945_ops; 3633 #ifdef CONFIG_IWLEGACY_DEBUGFS 3634 il->debugfs_ops = &il3945_debugfs_ops; 3635 #endif 3636 il->pci_dev = pdev; 3637 il->inta_mask = CSR_INI_SET_MASK; 3638 3639 /*************************** 3640 * 2. Initializing PCI bus 3641 * *************************/ 3642 pci_disable_link_state(pdev, 3643 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 3644 PCIE_LINK_STATE_CLKPM); 3645 3646 if (pci_enable_device(pdev)) { 3647 err = -ENODEV; 3648 goto out_ieee80211_free_hw; 3649 } 3650 3651 pci_set_master(pdev); 3652 3653 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3654 if (!err) 3655 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3656 if (err) { 3657 IL_WARN("No suitable DMA available.\n"); 3658 goto out_pci_disable_device; 3659 } 3660 3661 pci_set_drvdata(pdev, il); 3662 err = pci_request_regions(pdev, DRV_NAME); 3663 if (err) 3664 goto out_pci_disable_device; 3665 3666 /*********************** 3667 * 3. Read REV Register 3668 * ********************/ 3669 il->hw_base = pci_ioremap_bar(pdev, 0); 3670 if (!il->hw_base) { 3671 err = -ENODEV; 3672 goto out_pci_release_regions; 3673 } 3674 3675 D_INFO("pci_resource_len = 0x%08llx\n", 3676 (unsigned long long)pci_resource_len(pdev, 0)); 3677 D_INFO("pci_resource_base = %p\n", il->hw_base); 3678 3679 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3680 * PCI Tx retries from interfering with C3 CPU state */ 3681 pci_write_config_byte(pdev, 0x41, 0x00); 3682 3683 /* these spin locks will be used in apm_init and EEPROM access 3684 * we should init now 3685 */ 3686 spin_lock_init(&il->reg_lock); 3687 spin_lock_init(&il->lock); 3688 3689 /* 3690 * stop and reset the on-board processor just in case it is in a 3691 * strange state ... like being left stranded by a primary kernel 3692 * and this is now the kdump kernel trying to start up 3693 */ 3694 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3695 3696 /*********************** 3697 * 4. Read EEPROM 3698 * ********************/ 3699 3700 /* Read the EEPROM */ 3701 err = il_eeprom_init(il); 3702 if (err) { 3703 IL_ERR("Unable to init EEPROM\n"); 3704 goto out_iounmap; 3705 } 3706 /* MAC Address location in EEPROM same for 3945/4965 */ 3707 eeprom = (struct il3945_eeprom *)il->eeprom; 3708 D_INFO("MAC address: %pM\n", eeprom->mac_address); 3709 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address); 3710 3711 /*********************** 3712 * 5. Setup HW Constants 3713 * ********************/ 3714 /* Device-specific setup */ 3715 err = il3945_hw_set_hw_params(il); 3716 if (err) { 3717 IL_ERR("failed to set hw settings\n"); 3718 goto out_eeprom_free; 3719 } 3720 3721 /*********************** 3722 * 6. Setup il 3723 * ********************/ 3724 3725 err = il3945_init_drv(il); 3726 if (err) { 3727 IL_ERR("initializing driver failed\n"); 3728 goto out_unset_hw_params; 3729 } 3730 3731 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name); 3732 3733 /*********************** 3734 * 7. Setup Services 3735 * ********************/ 3736 3737 spin_lock_irqsave(&il->lock, flags); 3738 il_disable_interrupts(il); 3739 spin_unlock_irqrestore(&il->lock, flags); 3740 3741 pci_enable_msi(il->pci_dev); 3742 3743 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); 3744 if (err) { 3745 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); 3746 goto out_disable_msi; 3747 } 3748 3749 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group); 3750 if (err) { 3751 IL_ERR("failed to create sysfs device attributes\n"); 3752 goto out_release_irq; 3753 } 3754 3755 il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]); 3756 il3945_setup_deferred_work(il); 3757 il3945_setup_handlers(il); 3758 il_power_initialize(il); 3759 3760 /********************************* 3761 * 8. Setup and Register mac80211 3762 * *******************************/ 3763 3764 il_enable_interrupts(il); 3765 3766 err = il3945_setup_mac(il); 3767 if (err) 3768 goto out_remove_sysfs; 3769 3770 err = il_dbgfs_register(il, DRV_NAME); 3771 if (err) 3772 IL_ERR("failed to create debugfs files. Ignoring error: %d\n", 3773 err); 3774 3775 /* Start monitoring the killswitch */ 3776 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); 3777 3778 return 0; 3779 3780 out_remove_sysfs: 3781 destroy_workqueue(il->workqueue); 3782 il->workqueue = NULL; 3783 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); 3784 out_release_irq: 3785 free_irq(il->pci_dev->irq, il); 3786 out_disable_msi: 3787 pci_disable_msi(il->pci_dev); 3788 il_free_geos(il); 3789 il_free_channel_map(il); 3790 out_unset_hw_params: 3791 il3945_unset_hw_params(il); 3792 out_eeprom_free: 3793 il_eeprom_free(il); 3794 out_iounmap: 3795 iounmap(il->hw_base); 3796 out_pci_release_regions: 3797 pci_release_regions(pdev); 3798 out_pci_disable_device: 3799 pci_disable_device(pdev); 3800 out_ieee80211_free_hw: 3801 ieee80211_free_hw(il->hw); 3802 out: 3803 return err; 3804 } 3805 3806 static void 3807 il3945_pci_remove(struct pci_dev *pdev) 3808 { 3809 struct il_priv *il = pci_get_drvdata(pdev); 3810 unsigned long flags; 3811 3812 if (!il) 3813 return; 3814 3815 D_INFO("*** UNLOAD DRIVER ***\n"); 3816 3817 il_dbgfs_unregister(il); 3818 3819 set_bit(S_EXIT_PENDING, &il->status); 3820 3821 il_leds_exit(il); 3822 3823 if (il->mac80211_registered) { 3824 ieee80211_unregister_hw(il->hw); 3825 il->mac80211_registered = 0; 3826 } else { 3827 il3945_down(il); 3828 } 3829 3830 /* 3831 * Make sure device is reset to low power before unloading driver. 3832 * This may be redundant with il_down(), but there are paths to 3833 * run il_down() without calling apm_ops.stop(), and there are 3834 * paths to avoid running il_down() at all before leaving driver. 3835 * This (inexpensive) call *makes sure* device is reset. 3836 */ 3837 il_apm_stop(il); 3838 3839 /* make sure we flush any pending irq or 3840 * tasklet for the driver 3841 */ 3842 spin_lock_irqsave(&il->lock, flags); 3843 il_disable_interrupts(il); 3844 spin_unlock_irqrestore(&il->lock, flags); 3845 3846 il3945_synchronize_irq(il); 3847 3848 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); 3849 3850 cancel_delayed_work_sync(&il->_3945.rfkill_poll); 3851 3852 il3945_dealloc_ucode_pci(il); 3853 3854 if (il->rxq.bd) 3855 il3945_rx_queue_free(il, &il->rxq); 3856 il3945_hw_txq_ctx_free(il); 3857 3858 il3945_unset_hw_params(il); 3859 3860 /*netif_stop_queue(dev); */ 3861 flush_workqueue(il->workqueue); 3862 3863 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes 3864 * il->workqueue... so we can't take down the workqueue 3865 * until now... */ 3866 destroy_workqueue(il->workqueue); 3867 il->workqueue = NULL; 3868 3869 free_irq(pdev->irq, il); 3870 pci_disable_msi(pdev); 3871 3872 iounmap(il->hw_base); 3873 pci_release_regions(pdev); 3874 pci_disable_device(pdev); 3875 3876 il_free_channel_map(il); 3877 il_free_geos(il); 3878 kfree(il->scan_cmd); 3879 if (il->beacon_skb) 3880 dev_kfree_skb(il->beacon_skb); 3881 3882 ieee80211_free_hw(il->hw); 3883 } 3884 3885 /***************************************************************************** 3886 * 3887 * driver and module entry point 3888 * 3889 *****************************************************************************/ 3890 3891 static struct pci_driver il3945_driver = { 3892 .name = DRV_NAME, 3893 .id_table = il3945_hw_card_ids, 3894 .probe = il3945_pci_probe, 3895 .remove = il3945_pci_remove, 3896 .driver.pm = IL_LEGACY_PM_OPS, 3897 }; 3898 3899 static int __init 3900 il3945_init(void) 3901 { 3902 3903 int ret; 3904 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 3905 pr_info(DRV_COPYRIGHT "\n"); 3906 3907 /* 3908 * Disabling hardware scan means that mac80211 will perform scans 3909 * "the hard way", rather than using device's scan. 3910 */ 3911 if (il3945_mod_params.disable_hw_scan) { 3912 pr_info("hw_scan is disabled\n"); 3913 il3945_mac_ops.hw_scan = NULL; 3914 } 3915 3916 ret = il3945_rate_control_register(); 3917 if (ret) { 3918 pr_err("Unable to register rate control algorithm: %d\n", ret); 3919 return ret; 3920 } 3921 3922 ret = pci_register_driver(&il3945_driver); 3923 if (ret) { 3924 pr_err("Unable to initialize PCI module\n"); 3925 goto error_register; 3926 } 3927 3928 return ret; 3929 3930 error_register: 3931 il3945_rate_control_unregister(); 3932 return ret; 3933 } 3934 3935 static void __exit 3936 il3945_exit(void) 3937 { 3938 pci_unregister_driver(&il3945_driver); 3939 il3945_rate_control_unregister(); 3940 } 3941 3942 MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); 3943 3944 module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO); 3945 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 3946 module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO); 3947 MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); 3948 module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, 3949 S_IRUGO); 3950 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); 3951 #ifdef CONFIG_IWLEGACY_DEBUG 3952 module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); 3953 MODULE_PARM_DESC(debug, "debug output mask"); 3954 #endif 3955 module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO); 3956 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 3957 3958 module_exit(il3945_exit); 3959 module_init(il3945_init); 3960