1 // SPDX-License-Identifier: GPL-2.0-only 2 /****************************************************************************** 3 * 4 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. 5 * 6 * Portions of this file are derived from the ipw3945 project, as well 7 * as portions of the ieee80211 subsystem header files. 8 * 9 * Contact Information: 10 * Intel Linux Wireless <ilw@linux.intel.com> 11 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 12 * 13 *****************************************************************************/ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/pci.h> 21 #include <linux/slab.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/delay.h> 24 #include <linux/sched.h> 25 #include <linux/skbuff.h> 26 #include <linux/netdevice.h> 27 #include <linux/firmware.h> 28 #include <linux/etherdevice.h> 29 #include <linux/if_arp.h> 30 31 #include <net/ieee80211_radiotap.h> 32 #include <net/mac80211.h> 33 34 #include <asm/div64.h> 35 36 #define DRV_NAME "iwl3945" 37 38 #include "commands.h" 39 #include "common.h" 40 #include "3945.h" 41 #include "iwl-spectrum.h" 42 43 /* 44 * module name, copyright, version, etc. 45 */ 46 47 #define DRV_DESCRIPTION \ 48 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 49 50 #ifdef CONFIG_IWLEGACY_DEBUG 51 #define VD "d" 52 #else 53 #define VD 54 #endif 55 56 /* 57 * add "s" to indicate spectrum measurement included. 58 * we add it here to be consistent with previous releases in which 59 * this was configurable. 60 */ 61 #define DRV_VERSION IWLWIFI_VERSION VD "s" 62 #define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" 63 #define DRV_AUTHOR "<ilw@linux.intel.com>" 64 65 MODULE_DESCRIPTION(DRV_DESCRIPTION); 66 MODULE_VERSION(DRV_VERSION); 67 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 68 MODULE_LICENSE("GPL"); 69 70 /* module parameters */ 71 struct il_mod_params il3945_mod_params = { 72 .sw_crypto = 1, 73 .restart_fw = 1, 74 .disable_hw_scan = 1, 75 /* the rest are 0 by default */ 76 }; 77 78 /** 79 * il3945_get_antenna_flags - Get antenna flags for RXON command 80 * @il: eeprom and antenna fields are used to determine antenna flags 81 * 82 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed 83 * il3945_mod_params.antenna specifies the antenna diversity mode: 84 * 85 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself 86 * IL_ANTENNA_MAIN - Force MAIN antenna 87 * IL_ANTENNA_AUX - Force AUX antenna 88 */ 89 __le32 90 il3945_get_antenna_flags(const struct il_priv *il) 91 { 92 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; 93 94 switch (il3945_mod_params.antenna) { 95 case IL_ANTENNA_DIVERSITY: 96 return 0; 97 98 case IL_ANTENNA_MAIN: 99 if (eeprom->antenna_switch_type) 100 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 101 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 102 103 case IL_ANTENNA_AUX: 104 if (eeprom->antenna_switch_type) 105 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 106 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 107 } 108 109 /* bad antenna selector value */ 110 IL_ERR("Bad antenna selector value (0x%x)\n", 111 il3945_mod_params.antenna); 112 113 return 0; /* "diversity" is default if error */ 114 } 115 116 static int 117 il3945_set_ccmp_dynamic_key_info(struct il_priv *il, 118 struct ieee80211_key_conf *keyconf, u8 sta_id) 119 { 120 unsigned long flags; 121 __le16 key_flags = 0; 122 int ret; 123 124 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 125 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 126 127 if (sta_id == il->hw_params.bcast_id) 128 key_flags |= STA_KEY_MULTICAST_MSK; 129 130 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 131 keyconf->hw_key_idx = keyconf->keyidx; 132 key_flags &= ~STA_KEY_FLG_INVALID; 133 134 spin_lock_irqsave(&il->sta_lock, flags); 135 il->stations[sta_id].keyinfo.cipher = keyconf->cipher; 136 il->stations[sta_id].keyinfo.keylen = keyconf->keylen; 137 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); 138 139 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); 140 141 if ((il->stations[sta_id].sta.key. 142 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) 143 il->stations[sta_id].sta.key.key_offset = 144 il_get_free_ucode_key_idx(il); 145 /* else, we are overriding an existing key => no need to allocated room 146 * in uCode. */ 147 148 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 149 "no space for a new key"); 150 151 il->stations[sta_id].sta.key.key_flags = key_flags; 152 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 153 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 154 155 D_INFO("hwcrypto: modify ucode station key info\n"); 156 157 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); 158 159 spin_unlock_irqrestore(&il->sta_lock, flags); 160 161 return ret; 162 } 163 164 static int 165 il3945_set_tkip_dynamic_key_info(struct il_priv *il, 166 struct ieee80211_key_conf *keyconf, u8 sta_id) 167 { 168 return -EOPNOTSUPP; 169 } 170 171 static int 172 il3945_set_wep_dynamic_key_info(struct il_priv *il, 173 struct ieee80211_key_conf *keyconf, u8 sta_id) 174 { 175 return -EOPNOTSUPP; 176 } 177 178 static int 179 il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id) 180 { 181 unsigned long flags; 182 struct il_addsta_cmd sta_cmd; 183 184 spin_lock_irqsave(&il->sta_lock, flags); 185 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); 186 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); 187 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 188 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 189 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 190 memcpy(&sta_cmd, &il->stations[sta_id].sta, 191 sizeof(struct il_addsta_cmd)); 192 spin_unlock_irqrestore(&il->sta_lock, flags); 193 194 D_INFO("hwcrypto: clear ucode station key info\n"); 195 return il_send_add_sta(il, &sta_cmd, CMD_SYNC); 196 } 197 198 static int 199 il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, 200 u8 sta_id) 201 { 202 int ret = 0; 203 204 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 205 206 switch (keyconf->cipher) { 207 case WLAN_CIPHER_SUITE_CCMP: 208 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id); 209 break; 210 case WLAN_CIPHER_SUITE_TKIP: 211 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id); 212 break; 213 case WLAN_CIPHER_SUITE_WEP40: 214 case WLAN_CIPHER_SUITE_WEP104: 215 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id); 216 break; 217 default: 218 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher); 219 ret = -EINVAL; 220 } 221 222 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", 223 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); 224 225 return ret; 226 } 227 228 static int 229 il3945_remove_static_key(struct il_priv *il) 230 { 231 return -EOPNOTSUPP; 232 } 233 234 static int 235 il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key) 236 { 237 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || 238 key->cipher == WLAN_CIPHER_SUITE_WEP104) 239 return -EOPNOTSUPP; 240 241 IL_ERR("Static key invalid: cipher %x\n", key->cipher); 242 return -EINVAL; 243 } 244 245 static void 246 il3945_clear_free_frames(struct il_priv *il) 247 { 248 struct list_head *element; 249 250 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); 251 252 while (!list_empty(&il->free_frames)) { 253 element = il->free_frames.next; 254 list_del(element); 255 kfree(list_entry(element, struct il3945_frame, list)); 256 il->frames_count--; 257 } 258 259 if (il->frames_count) { 260 IL_WARN("%d frames still in use. Did we lose one?\n", 261 il->frames_count); 262 il->frames_count = 0; 263 } 264 } 265 266 static struct il3945_frame * 267 il3945_get_free_frame(struct il_priv *il) 268 { 269 struct il3945_frame *frame; 270 struct list_head *element; 271 if (list_empty(&il->free_frames)) { 272 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 273 if (!frame) { 274 IL_ERR("Could not allocate frame!\n"); 275 return NULL; 276 } 277 278 il->frames_count++; 279 return frame; 280 } 281 282 element = il->free_frames.next; 283 list_del(element); 284 return list_entry(element, struct il3945_frame, list); 285 } 286 287 static void 288 il3945_free_frame(struct il_priv *il, struct il3945_frame *frame) 289 { 290 memset(frame, 0, sizeof(*frame)); 291 list_add(&frame->list, &il->free_frames); 292 } 293 294 unsigned int 295 il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, 296 int left) 297 { 298 299 if (!il_is_associated(il) || !il->beacon_skb) 300 return 0; 301 302 if (il->beacon_skb->len > left) 303 return 0; 304 305 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); 306 307 return il->beacon_skb->len; 308 } 309 310 static int 311 il3945_send_beacon_cmd(struct il_priv *il) 312 { 313 struct il3945_frame *frame; 314 unsigned int frame_size; 315 int rc; 316 u8 rate; 317 318 frame = il3945_get_free_frame(il); 319 320 if (!frame) { 321 IL_ERR("Could not obtain free frame buffer for beacon " 322 "command.\n"); 323 return -ENOMEM; 324 } 325 326 rate = il_get_lowest_plcp(il); 327 328 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate); 329 330 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); 331 332 il3945_free_frame(il, frame); 333 334 return rc; 335 } 336 337 static void 338 il3945_unset_hw_params(struct il_priv *il) 339 { 340 if (il->_3945.shared_virt) 341 dma_free_coherent(&il->pci_dev->dev, 342 sizeof(struct il3945_shared), 343 il->_3945.shared_virt, il->_3945.shared_phys); 344 } 345 346 static void 347 il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, 348 struct il_device_cmd *cmd, 349 struct sk_buff *skb_frag, int sta_id) 350 { 351 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; 352 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo; 353 354 tx_cmd->sec_ctl = 0; 355 356 switch (keyinfo->cipher) { 357 case WLAN_CIPHER_SUITE_CCMP: 358 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 359 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); 360 D_TX("tx_cmd with AES hwcrypto\n"); 361 break; 362 363 case WLAN_CIPHER_SUITE_TKIP: 364 break; 365 366 case WLAN_CIPHER_SUITE_WEP104: 367 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; 368 fallthrough; 369 case WLAN_CIPHER_SUITE_WEP40: 370 tx_cmd->sec_ctl |= 371 TX_CMD_SEC_WEP | (info->control.hw_key-> 372 hw_key_idx & TX_CMD_SEC_MSK) << 373 TX_CMD_SEC_SHIFT; 374 375 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); 376 377 D_TX("Configuring packet for WEP encryption " "with key %d\n", 378 info->control.hw_key->hw_key_idx); 379 break; 380 381 default: 382 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher); 383 break; 384 } 385 } 386 387 /* 388 * handle build C_TX command notification. 389 */ 390 static void 391 il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, 392 struct ieee80211_tx_info *info, 393 struct ieee80211_hdr *hdr, u8 std_id) 394 { 395 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; 396 __le32 tx_flags = tx_cmd->tx_flags; 397 __le16 fc = hdr->frame_control; 398 399 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 400 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 401 tx_flags |= TX_CMD_FLG_ACK_MSK; 402 if (ieee80211_is_mgmt(fc)) 403 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 404 if (ieee80211_is_probe_resp(fc) && 405 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) 406 tx_flags |= TX_CMD_FLG_TSF_MSK; 407 } else { 408 tx_flags &= (~TX_CMD_FLG_ACK_MSK); 409 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 410 } 411 412 tx_cmd->sta_id = std_id; 413 if (ieee80211_has_morefrags(fc)) 414 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 415 416 if (ieee80211_is_data_qos(fc)) { 417 u8 *qc = ieee80211_get_qos_ctl(hdr); 418 tx_cmd->tid_tspec = qc[0] & 0xf; 419 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 420 } else { 421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 422 } 423 424 il_tx_cmd_protection(il, info, fc, &tx_flags); 425 426 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 427 if (ieee80211_is_mgmt(fc)) { 428 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 429 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); 430 else 431 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); 432 } else { 433 tx_cmd->timeout.pm_frame_timeout = 0; 434 } 435 436 tx_cmd->driver_txop = 0; 437 tx_cmd->tx_flags = tx_flags; 438 tx_cmd->next_frame_len = 0; 439 } 440 441 /* 442 * start C_TX command process 443 */ 444 static int 445 il3945_tx_skb(struct il_priv *il, 446 struct ieee80211_sta *sta, 447 struct sk_buff *skb) 448 { 449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 450 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 451 struct il3945_tx_cmd *tx_cmd; 452 struct il_tx_queue *txq = NULL; 453 struct il_queue *q = NULL; 454 struct il_device_cmd *out_cmd; 455 struct il_cmd_meta *out_meta; 456 dma_addr_t phys_addr; 457 dma_addr_t txcmd_phys; 458 int txq_id = skb_get_queue_mapping(skb); 459 u16 len, idx, hdr_len; 460 u16 firstlen, secondlen; 461 u8 sta_id; 462 u8 tid = 0; 463 __le16 fc; 464 u8 wait_write_ptr = 0; 465 unsigned long flags; 466 467 spin_lock_irqsave(&il->lock, flags); 468 if (il_is_rfkill(il)) { 469 D_DROP("Dropping - RF KILL\n"); 470 goto drop_unlock; 471 } 472 473 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) == 474 IL_INVALID_RATE) { 475 IL_ERR("ERROR: No TX rate available.\n"); 476 goto drop_unlock; 477 } 478 479 fc = hdr->frame_control; 480 481 #ifdef CONFIG_IWLEGACY_DEBUG 482 if (ieee80211_is_auth(fc)) 483 D_TX("Sending AUTH frame\n"); 484 else if (ieee80211_is_assoc_req(fc)) 485 D_TX("Sending ASSOC frame\n"); 486 else if (ieee80211_is_reassoc_req(fc)) 487 D_TX("Sending REASSOC frame\n"); 488 #endif 489 490 spin_unlock_irqrestore(&il->lock, flags); 491 492 hdr_len = ieee80211_hdrlen(fc); 493 494 /* Find idx into station table for destination station */ 495 sta_id = il_sta_id_or_broadcast(il, sta); 496 if (sta_id == IL_INVALID_STATION) { 497 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); 498 goto drop; 499 } 500 501 D_RATE("station Id %d\n", sta_id); 502 503 if (ieee80211_is_data_qos(fc)) { 504 u8 *qc = ieee80211_get_qos_ctl(hdr); 505 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 506 if (unlikely(tid >= MAX_TID_COUNT)) 507 goto drop; 508 } 509 510 /* Descriptor for chosen Tx queue */ 511 txq = &il->txq[txq_id]; 512 q = &txq->q; 513 514 if ((il_queue_space(q) < q->high_mark)) 515 goto drop; 516 517 spin_lock_irqsave(&il->lock, flags); 518 519 idx = il_get_cmd_idx(q, q->write_ptr, 0); 520 521 txq->skbs[q->write_ptr] = skb; 522 523 /* Init first empty entry in queue's array of Tx/cmd buffers */ 524 out_cmd = txq->cmd[idx]; 525 out_meta = &txq->meta[idx]; 526 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload; 527 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 528 memset(tx_cmd, 0, sizeof(*tx_cmd)); 529 530 /* 531 * Set up the Tx-command (not MAC!) header. 532 * Store the chosen Tx queue and TFD idx within the sequence field; 533 * after Tx, uCode's Tx response will return this value so driver can 534 * locate the frame within the tx queue and do post-tx processing. 535 */ 536 out_cmd->hdr.cmd = C_TX; 537 out_cmd->hdr.sequence = 538 cpu_to_le16((u16) 539 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); 540 541 /* Copy MAC header from skb into command buffer */ 542 memcpy(tx_cmd->hdr, hdr, hdr_len); 543 544 if (info->control.hw_key) 545 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id); 546 547 /* TODO need this for burst mode later on */ 548 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id); 549 550 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); 551 552 /* Total # bytes to be transmitted */ 553 tx_cmd->len = cpu_to_le16((u16) skb->len); 554 555 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 556 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 557 558 /* 559 * Use the first empty entry in this queue's command buffer array 560 * to contain the Tx command and MAC header concatenated together 561 * (payload data will be in another buffer). 562 * Size of this varies, due to varying MAC header length. 563 * If end is not dword aligned, we'll have 2 extra bytes at the end 564 * of the MAC header (device reads on dword boundaries). 565 * We'll tell device about this padding later. 566 */ 567 len = 568 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + 569 hdr_len; 570 firstlen = (len + 3) & ~3; 571 572 /* Physical address of this Tx command's header (not MAC header!), 573 * within command buffer array. */ 574 txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, 575 DMA_TO_DEVICE); 576 if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) 577 goto drop_unlock; 578 579 /* Set up TFD's 2nd entry to point directly to remainder of skb, 580 * if any (802.11 null frames have no payload). */ 581 secondlen = skb->len - hdr_len; 582 if (secondlen > 0) { 583 phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, 584 secondlen, DMA_TO_DEVICE); 585 if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) 586 goto drop_unlock; 587 } 588 589 /* Add buffer containing Tx command and MAC(!) header to TFD's 590 * first entry */ 591 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); 592 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 593 dma_unmap_len_set(out_meta, len, firstlen); 594 if (secondlen > 0) 595 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, 596 U32_PAD(secondlen)); 597 598 if (!ieee80211_has_morefrags(hdr->frame_control)) { 599 txq->need_update = 1; 600 } else { 601 wait_write_ptr = 1; 602 txq->need_update = 0; 603 } 604 605 il_update_stats(il, true, fc, skb->len); 606 607 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); 608 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 609 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); 610 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, 611 ieee80211_hdrlen(fc)); 612 613 /* Tell device the write idx *just past* this latest filled TFD */ 614 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); 615 il_txq_update_write_ptr(il, txq); 616 spin_unlock_irqrestore(&il->lock, flags); 617 618 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { 619 if (wait_write_ptr) { 620 spin_lock_irqsave(&il->lock, flags); 621 txq->need_update = 1; 622 il_txq_update_write_ptr(il, txq); 623 spin_unlock_irqrestore(&il->lock, flags); 624 } 625 626 il_stop_queue(il, txq); 627 } 628 629 return 0; 630 631 drop_unlock: 632 spin_unlock_irqrestore(&il->lock, flags); 633 drop: 634 return -1; 635 } 636 637 static int 638 il3945_get_measurement(struct il_priv *il, 639 struct ieee80211_measurement_params *params, u8 type) 640 { 641 struct il_spectrum_cmd spectrum; 642 struct il_rx_pkt *pkt; 643 struct il_host_cmd cmd = { 644 .id = C_SPECTRUM_MEASUREMENT, 645 .data = (void *)&spectrum, 646 .flags = CMD_WANT_SKB, 647 }; 648 u32 add_time = le64_to_cpu(params->start_time); 649 int rc; 650 int spectrum_resp_status; 651 int duration = le16_to_cpu(params->duration); 652 653 if (il_is_associated(il)) 654 add_time = 655 il_usecs_to_beacons(il, 656 le64_to_cpu(params->start_time) - 657 il->_3945.last_tsf, 658 le16_to_cpu(il->timing.beacon_interval)); 659 660 memset(&spectrum, 0, sizeof(spectrum)); 661 662 spectrum.channel_count = cpu_to_le16(1); 663 spectrum.flags = 664 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; 665 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; 666 cmd.len = sizeof(spectrum); 667 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 668 669 if (il_is_associated(il)) 670 spectrum.start_time = 671 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time, 672 le16_to_cpu(il->timing.beacon_interval)); 673 else 674 spectrum.start_time = 0; 675 676 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); 677 spectrum.channels[0].channel = params->channel; 678 spectrum.channels[0].type = type; 679 if (il->active.flags & RXON_FLG_BAND_24G_MSK) 680 spectrum.flags |= 681 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | 682 RXON_FLG_TGG_PROTECT_MSK; 683 684 rc = il_send_cmd_sync(il, &cmd); 685 if (rc) 686 return rc; 687 688 pkt = (struct il_rx_pkt *)cmd.reply_page; 689 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { 690 IL_ERR("Bad return from N_RX_ON_ASSOC command\n"); 691 rc = -EIO; 692 } 693 694 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); 695 switch (spectrum_resp_status) { 696 case 0: /* Command will be handled */ 697 if (pkt->u.spectrum.id != 0xff) { 698 D_INFO("Replaced existing measurement: %d\n", 699 pkt->u.spectrum.id); 700 il->measurement_status &= ~MEASUREMENT_READY; 701 } 702 il->measurement_status |= MEASUREMENT_ACTIVE; 703 rc = 0; 704 break; 705 706 case 1: /* Command will not be handled */ 707 rc = -EAGAIN; 708 break; 709 } 710 711 il_free_pages(il, cmd.reply_page); 712 713 return rc; 714 } 715 716 static void 717 il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) 718 { 719 struct il_rx_pkt *pkt = rxb_addr(rxb); 720 struct il_alive_resp *palive; 721 struct delayed_work *pwork; 722 723 palive = &pkt->u.alive_frame; 724 725 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", 726 palive->is_valid, palive->ver_type, palive->ver_subtype); 727 728 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 729 D_INFO("Initialization Alive received.\n"); 730 memcpy(&il->card_alive_init, &pkt->u.alive_frame, 731 sizeof(struct il_alive_resp)); 732 pwork = &il->init_alive_start; 733 } else { 734 D_INFO("Runtime Alive received.\n"); 735 memcpy(&il->card_alive, &pkt->u.alive_frame, 736 sizeof(struct il_alive_resp)); 737 pwork = &il->alive_start; 738 il3945_disable_events(il); 739 } 740 741 /* We delay the ALIVE response by 5ms to 742 * give the HW RF Kill time to activate... */ 743 if (palive->is_valid == UCODE_VALID_OK) 744 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); 745 else 746 IL_WARN("uCode did not respond OK.\n"); 747 } 748 749 static void 750 il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) 751 { 752 struct il_rx_pkt *pkt = rxb_addr(rxb); 753 754 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); 755 } 756 757 static void 758 il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) 759 { 760 struct il_rx_pkt *pkt = rxb_addr(rxb); 761 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status); 762 #ifdef CONFIG_IWLEGACY_DEBUG 763 u8 rate = beacon->beacon_notify_hdr.rate; 764 765 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", 766 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, 767 beacon->beacon_notify_hdr.failure_frame, 768 le32_to_cpu(beacon->ibss_mgr_status), 769 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); 770 #endif 771 772 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 773 774 } 775 776 /* Handle notification from uCode that card's power state is changing 777 * due to software, hardware, or critical temperature RFKILL */ 778 static void 779 il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) 780 { 781 struct il_rx_pkt *pkt = rxb_addr(rxb); 782 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 783 unsigned long status = il->status; 784 785 IL_WARN("Card state received: HW:%s SW:%s\n", 786 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 787 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 788 789 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 790 791 if (flags & HW_CARD_DISABLED) 792 set_bit(S_RFKILL, &il->status); 793 else 794 clear_bit(S_RFKILL, &il->status); 795 796 il_scan_cancel(il); 797 798 if ((test_bit(S_RFKILL, &status) != 799 test_bit(S_RFKILL, &il->status))) 800 wiphy_rfkill_set_hw_state(il->hw->wiphy, 801 test_bit(S_RFKILL, &il->status)); 802 else 803 wake_up(&il->wait_command_queue); 804 } 805 806 /* 807 * il3945_setup_handlers - Initialize Rx handler callbacks 808 * 809 * Setup the RX handlers for each of the reply types sent from the uCode 810 * to the host. 811 * 812 * This function chains into the hardware specific files for them to setup 813 * any hardware specific handlers as well. 814 */ 815 static void 816 il3945_setup_handlers(struct il_priv *il) 817 { 818 il->handlers[N_ALIVE] = il3945_hdl_alive; 819 il->handlers[C_ADD_STA] = il3945_hdl_add_sta; 820 il->handlers[N_ERROR] = il_hdl_error; 821 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; 822 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; 823 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; 824 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; 825 il->handlers[N_BEACON] = il3945_hdl_beacon; 826 827 /* 828 * The same handler is used for both the REPLY to a discrete 829 * stats request from the host as well as for the periodic 830 * stats notifications (after received beacons) from the uCode. 831 */ 832 il->handlers[C_STATS] = il3945_hdl_c_stats; 833 il->handlers[N_STATS] = il3945_hdl_stats; 834 835 il_setup_rx_scan_handlers(il); 836 il->handlers[N_CARD_STATE] = il3945_hdl_card_state; 837 838 /* Set up hardware specific Rx handlers */ 839 il3945_hw_handler_setup(il); 840 } 841 842 /************************** RX-FUNCTIONS ****************************/ 843 /* 844 * Rx theory of operation 845 * 846 * The host allocates 32 DMA target addresses and passes the host address 847 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is 848 * 0 to 31 849 * 850 * Rx Queue Indexes 851 * The host/firmware share two idx registers for managing the Rx buffers. 852 * 853 * The READ idx maps to the first position that the firmware may be writing 854 * to -- the driver can read up to (but not including) this position and get 855 * good data. 856 * The READ idx is managed by the firmware once the card is enabled. 857 * 858 * The WRITE idx maps to the last position the driver has read from -- the 859 * position preceding WRITE is the last slot the firmware can place a packet. 860 * 861 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 862 * WRITE = READ. 863 * 864 * During initialization, the host sets up the READ queue position to the first 865 * IDX position, and WRITE to the last (READ - 1 wrapped) 866 * 867 * When the firmware places a packet in a buffer, it will advance the READ idx 868 * and fire the RX interrupt. The driver can then query the READ idx and 869 * process as many packets as possible, moving the WRITE idx forward as it 870 * resets the Rx queue buffers with new memory. 871 * 872 * The management in the driver is as follows: 873 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 874 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 875 * to replenish the iwl->rxq->rx_free. 876 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the 877 * iwl->rxq is replenished and the READ IDX is updated (updating the 878 * 'processed' and 'read' driver idxes as well) 879 * + A received packet is processed and handed to the kernel network stack, 880 * detached from the iwl->rxq. The driver 'processed' idx is updated. 881 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 882 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 883 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there 884 * were enough free buffers and RX_STALLED is set it is cleared. 885 * 886 * 887 * Driver sequence: 888 * 889 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls 890 * il3945_rx_queue_restock 891 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx 892 * queue, updates firmware pointers, and updates 893 * the WRITE idx. If insufficient rx_free buffers 894 * are available, schedules il3945_rx_replenish 895 * 896 * -- enable interrupts -- 897 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the 898 * READ IDX, detaching the SKB from the pool. 899 * Moves the packet buffer from queue to rx_used. 900 * Calls il3945_rx_queue_restock to refill any empty 901 * slots. 902 * ... 903 * 904 */ 905 906 /* 907 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 908 */ 909 static inline __le32 910 il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) 911 { 912 return cpu_to_le32((u32) dma_addr); 913 } 914 915 /* 916 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool 917 * 918 * If there are slots in the RX queue that need to be restocked, 919 * and we have free pre-allocated buffers, fill the ranks as much 920 * as we can, pulling from rx_free. 921 * 922 * This moves the 'write' idx forward to catch up with 'processed', and 923 * also updates the memory address in the firmware to reference the new 924 * target buffer. 925 */ 926 static void 927 il3945_rx_queue_restock(struct il_priv *il) 928 { 929 struct il_rx_queue *rxq = &il->rxq; 930 struct list_head *element; 931 struct il_rx_buf *rxb; 932 unsigned long flags; 933 934 spin_lock_irqsave(&rxq->lock, flags); 935 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { 936 /* Get next free Rx buffer, remove from free list */ 937 element = rxq->rx_free.next; 938 rxb = list_entry(element, struct il_rx_buf, list); 939 list_del(element); 940 941 /* Point to Rx buffer via next RBD in circular buffer */ 942 rxq->bd[rxq->write] = 943 il3945_dma_addr2rbd_ptr(il, rxb->page_dma); 944 rxq->queue[rxq->write] = rxb; 945 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 946 rxq->free_count--; 947 } 948 spin_unlock_irqrestore(&rxq->lock, flags); 949 /* If the pre-allocated buffer pool is dropping low, schedule to 950 * refill it */ 951 if (rxq->free_count <= RX_LOW_WATERMARK) 952 queue_work(il->workqueue, &il->rx_replenish); 953 954 /* If we've added more space for the firmware to place data, tell it. 955 * Increment device's write pointer in multiples of 8. */ 956 if (rxq->write_actual != (rxq->write & ~0x7) || 957 abs(rxq->write - rxq->read) > 7) { 958 spin_lock_irqsave(&rxq->lock, flags); 959 rxq->need_update = 1; 960 spin_unlock_irqrestore(&rxq->lock, flags); 961 il_rx_queue_update_write_ptr(il, rxq); 962 } 963 } 964 965 /* 966 * il3945_rx_replenish - Move all used packet from rx_used to rx_free 967 * 968 * When moving to rx_free an SKB is allocated for the slot. 969 * 970 * Also restock the Rx queue via il3945_rx_queue_restock. 971 * This is called as a scheduled work item (except for during initialization) 972 */ 973 static void 974 il3945_rx_allocate(struct il_priv *il, gfp_t priority) 975 { 976 struct il_rx_queue *rxq = &il->rxq; 977 struct list_head *element; 978 struct il_rx_buf *rxb; 979 struct page *page; 980 dma_addr_t page_dma; 981 unsigned long flags; 982 gfp_t gfp_mask = priority; 983 984 while (1) { 985 spin_lock_irqsave(&rxq->lock, flags); 986 if (list_empty(&rxq->rx_used)) { 987 spin_unlock_irqrestore(&rxq->lock, flags); 988 return; 989 } 990 spin_unlock_irqrestore(&rxq->lock, flags); 991 992 if (rxq->free_count > RX_LOW_WATERMARK) 993 gfp_mask |= __GFP_NOWARN; 994 995 if (il->hw_params.rx_page_order > 0) 996 gfp_mask |= __GFP_COMP; 997 998 /* Alloc a new receive buffer */ 999 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); 1000 if (!page) { 1001 if (net_ratelimit()) 1002 D_INFO("Failed to allocate SKB buffer.\n"); 1003 if (rxq->free_count <= RX_LOW_WATERMARK && 1004 net_ratelimit()) 1005 IL_ERR("Failed to allocate SKB buffer with %0x." 1006 "Only %u free buffers remaining.\n", 1007 priority, rxq->free_count); 1008 /* We don't reschedule replenish work here -- we will 1009 * call the restock method and if it still needs 1010 * more buffers it will schedule replenish */ 1011 break; 1012 } 1013 1014 /* Get physical address of RB/SKB */ 1015 page_dma = 1016 dma_map_page(&il->pci_dev->dev, page, 0, 1017 PAGE_SIZE << il->hw_params.rx_page_order, 1018 DMA_FROM_DEVICE); 1019 1020 if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { 1021 __free_pages(page, il->hw_params.rx_page_order); 1022 break; 1023 } 1024 1025 spin_lock_irqsave(&rxq->lock, flags); 1026 1027 if (list_empty(&rxq->rx_used)) { 1028 spin_unlock_irqrestore(&rxq->lock, flags); 1029 dma_unmap_page(&il->pci_dev->dev, page_dma, 1030 PAGE_SIZE << il->hw_params.rx_page_order, 1031 DMA_FROM_DEVICE); 1032 __free_pages(page, il->hw_params.rx_page_order); 1033 return; 1034 } 1035 1036 element = rxq->rx_used.next; 1037 rxb = list_entry(element, struct il_rx_buf, list); 1038 list_del(element); 1039 1040 rxb->page = page; 1041 rxb->page_dma = page_dma; 1042 list_add_tail(&rxb->list, &rxq->rx_free); 1043 rxq->free_count++; 1044 il->alloc_rxb_page++; 1045 1046 spin_unlock_irqrestore(&rxq->lock, flags); 1047 } 1048 } 1049 1050 void 1051 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) 1052 { 1053 unsigned long flags; 1054 int i; 1055 spin_lock_irqsave(&rxq->lock, flags); 1056 INIT_LIST_HEAD(&rxq->rx_free); 1057 INIT_LIST_HEAD(&rxq->rx_used); 1058 /* Fill the rx_used queue with _all_ of the Rx buffers */ 1059 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1060 /* In the reset function, these buffers may have been allocated 1061 * to an SKB, so we need to unmap and free potential storage */ 1062 if (rxq->pool[i].page != NULL) { 1063 dma_unmap_page(&il->pci_dev->dev, 1064 rxq->pool[i].page_dma, 1065 PAGE_SIZE << il->hw_params.rx_page_order, 1066 DMA_FROM_DEVICE); 1067 __il_free_pages(il, rxq->pool[i].page); 1068 rxq->pool[i].page = NULL; 1069 } 1070 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1071 } 1072 1073 /* Set us so that we have processed and used all buffers, but have 1074 * not restocked the Rx queue with fresh buffers */ 1075 rxq->read = rxq->write = 0; 1076 rxq->write_actual = 0; 1077 rxq->free_count = 0; 1078 spin_unlock_irqrestore(&rxq->lock, flags); 1079 } 1080 1081 void 1082 il3945_rx_replenish(void *data) 1083 { 1084 struct il_priv *il = data; 1085 unsigned long flags; 1086 1087 il3945_rx_allocate(il, GFP_KERNEL); 1088 1089 spin_lock_irqsave(&il->lock, flags); 1090 il3945_rx_queue_restock(il); 1091 spin_unlock_irqrestore(&il->lock, flags); 1092 } 1093 1094 static void 1095 il3945_rx_replenish_now(struct il_priv *il) 1096 { 1097 il3945_rx_allocate(il, GFP_ATOMIC); 1098 1099 il3945_rx_queue_restock(il); 1100 } 1101 1102 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 1103 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 1104 * This free routine walks the list of POOL entries and if SKB is set to 1105 * non NULL it is unmapped and freed 1106 */ 1107 static void 1108 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) 1109 { 1110 int i; 1111 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1112 if (rxq->pool[i].page != NULL) { 1113 dma_unmap_page(&il->pci_dev->dev, 1114 rxq->pool[i].page_dma, 1115 PAGE_SIZE << il->hw_params.rx_page_order, 1116 DMA_FROM_DEVICE); 1117 __il_free_pages(il, rxq->pool[i].page); 1118 rxq->pool[i].page = NULL; 1119 } 1120 } 1121 1122 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1123 rxq->bd_dma); 1124 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), 1125 rxq->rb_stts, rxq->rb_stts_dma); 1126 rxq->bd = NULL; 1127 rxq->rb_stts = NULL; 1128 } 1129 1130 /* Convert linear signal-to-noise ratio into dB */ 1131 static u8 ratio2dB[100] = { 1132 /* 0 1 2 3 4 5 6 7 8 9 */ 1133 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ 1134 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ 1135 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ 1136 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ 1137 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ 1138 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ 1139 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ 1140 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ 1141 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ 1142 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ 1143 }; 1144 1145 /* Calculates a relative dB value from a ratio of linear 1146 * (i.e. not dB) signal levels. 1147 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ 1148 int 1149 il3945_calc_db_from_ratio(int sig_ratio) 1150 { 1151 /* 1000:1 or higher just report as 60 dB */ 1152 if (sig_ratio >= 1000) 1153 return 60; 1154 1155 /* 100:1 or higher, divide by 10 and use table, 1156 * add 20 dB to make up for divide by 10 */ 1157 if (sig_ratio >= 100) 1158 return 20 + (int)ratio2dB[sig_ratio / 10]; 1159 1160 /* We shouldn't see this */ 1161 if (sig_ratio < 1) 1162 return 0; 1163 1164 /* Use table for ratios 1:1 - 99:1 */ 1165 return (int)ratio2dB[sig_ratio]; 1166 } 1167 1168 /* 1169 * il3945_rx_handle - Main entry function for receiving responses from uCode 1170 * 1171 * Uses the il->handlers callback function array to invoke 1172 * the appropriate handlers, including command responses, 1173 * frame-received notifications, and other notifications. 1174 */ 1175 static void 1176 il3945_rx_handle(struct il_priv *il) 1177 { 1178 struct il_rx_buf *rxb; 1179 struct il_rx_pkt *pkt; 1180 struct il_rx_queue *rxq = &il->rxq; 1181 u32 r, i; 1182 int reclaim; 1183 unsigned long flags; 1184 u8 fill_rx = 0; 1185 u32 count = 8; 1186 int total_empty = 0; 1187 1188 /* uCode's read idx (stored in shared DRAM) indicates the last Rx 1189 * buffer that the driver may process (last buffer filled by ucode). */ 1190 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 1191 i = rxq->read; 1192 1193 /* calculate total frames need to be restock after handling RX */ 1194 total_empty = r - rxq->write_actual; 1195 if (total_empty < 0) 1196 total_empty += RX_QUEUE_SIZE; 1197 1198 if (total_empty > (RX_QUEUE_SIZE / 2)) 1199 fill_rx = 1; 1200 /* Rx interrupt, but nothing sent from uCode */ 1201 if (i == r) 1202 D_RX("r = %d, i = %d\n", r, i); 1203 1204 while (i != r) { 1205 rxb = rxq->queue[i]; 1206 1207 /* If an RXB doesn't have a Rx queue slot associated with it, 1208 * then a bug has been introduced in the queue refilling 1209 * routines -- catch it here */ 1210 BUG_ON(rxb == NULL); 1211 1212 rxq->queue[i] = NULL; 1213 1214 dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, 1215 PAGE_SIZE << il->hw_params.rx_page_order, 1216 DMA_FROM_DEVICE); 1217 pkt = rxb_addr(rxb); 1218 reclaim = il_need_reclaim(il, pkt); 1219 1220 /* Based on type of command response or notification, 1221 * handle those that need handling via function in 1222 * handlers table. See il3945_setup_handlers() */ 1223 if (il->handlers[pkt->hdr.cmd]) { 1224 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, 1225 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1226 il->isr_stats.handlers[pkt->hdr.cmd]++; 1227 il->handlers[pkt->hdr.cmd] (il, rxb); 1228 } else { 1229 /* No handling needed */ 1230 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, 1231 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1232 } 1233 1234 /* 1235 * XXX: After here, we should always check rxb->page 1236 * against NULL before touching it or its virtual 1237 * memory (pkt). Because some handler might have 1238 * already taken or freed the pages. 1239 */ 1240 1241 if (reclaim) { 1242 /* Invoke any callbacks, transfer the buffer to caller, 1243 * and fire off the (possibly) blocking il_send_cmd() 1244 * as we reclaim the driver command queue */ 1245 if (rxb->page) 1246 il_tx_cmd_complete(il, rxb); 1247 else 1248 IL_WARN("Claim null rxb?\n"); 1249 } 1250 1251 /* Reuse the page if possible. For notification packets and 1252 * SKBs that fail to Rx correctly, add them back into the 1253 * rx_free list for reuse later. */ 1254 spin_lock_irqsave(&rxq->lock, flags); 1255 if (rxb->page != NULL) { 1256 rxb->page_dma = 1257 dma_map_page(&il->pci_dev->dev, rxb->page, 0, 1258 PAGE_SIZE << il->hw_params.rx_page_order, 1259 DMA_FROM_DEVICE); 1260 if (unlikely(dma_mapping_error(&il->pci_dev->dev, 1261 rxb->page_dma))) { 1262 __il_free_pages(il, rxb->page); 1263 rxb->page = NULL; 1264 list_add_tail(&rxb->list, &rxq->rx_used); 1265 } else { 1266 list_add_tail(&rxb->list, &rxq->rx_free); 1267 rxq->free_count++; 1268 } 1269 } else 1270 list_add_tail(&rxb->list, &rxq->rx_used); 1271 1272 spin_unlock_irqrestore(&rxq->lock, flags); 1273 1274 i = (i + 1) & RX_QUEUE_MASK; 1275 /* If there are a lot of unused frames, 1276 * restock the Rx queue so ucode won't assert. */ 1277 if (fill_rx) { 1278 count++; 1279 if (count >= 8) { 1280 rxq->read = i; 1281 il3945_rx_replenish_now(il); 1282 count = 0; 1283 } 1284 } 1285 } 1286 1287 /* Backtrack one entry */ 1288 rxq->read = i; 1289 if (fill_rx) 1290 il3945_rx_replenish_now(il); 1291 else 1292 il3945_rx_queue_restock(il); 1293 } 1294 1295 /* call this function to flush any scheduled tasklet */ 1296 static inline void 1297 il3945_synchronize_irq(struct il_priv *il) 1298 { 1299 /* wait to make sure we flush pending tasklet */ 1300 synchronize_irq(il->pci_dev->irq); 1301 tasklet_kill(&il->irq_tasklet); 1302 } 1303 1304 static const char * 1305 il3945_desc_lookup(int i) 1306 { 1307 switch (i) { 1308 case 1: 1309 return "FAIL"; 1310 case 2: 1311 return "BAD_PARAM"; 1312 case 3: 1313 return "BAD_CHECKSUM"; 1314 case 4: 1315 return "NMI_INTERRUPT"; 1316 case 5: 1317 return "SYSASSERT"; 1318 case 6: 1319 return "FATAL_ERROR"; 1320 } 1321 1322 return "UNKNOWN"; 1323 } 1324 1325 #define ERROR_START_OFFSET (1 * sizeof(u32)) 1326 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 1327 1328 void 1329 il3945_dump_nic_error_log(struct il_priv *il) 1330 { 1331 u32 i; 1332 u32 desc, time, count, base, data1; 1333 u32 blink1, blink2, ilink1, ilink2; 1334 1335 base = le32_to_cpu(il->card_alive.error_event_table_ptr); 1336 1337 if (!il3945_hw_valid_rtc_data_addr(base)) { 1338 IL_ERR("Not valid error log pointer 0x%08X\n", base); 1339 return; 1340 } 1341 1342 count = il_read_targ_mem(il, base); 1343 1344 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1345 IL_ERR("Start IWL Error Log Dump:\n"); 1346 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); 1347 } 1348 1349 IL_ERR("Desc Time asrtPC blink2 " 1350 "ilink1 nmiPC Line\n"); 1351 for (i = ERROR_START_OFFSET; 1352 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1353 i += ERROR_ELEM_SIZE) { 1354 desc = il_read_targ_mem(il, base + i); 1355 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32)); 1356 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32)); 1357 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32)); 1358 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32)); 1359 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32)); 1360 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32)); 1361 1362 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1363 il3945_desc_lookup(desc), desc, time, blink1, blink2, 1364 ilink1, ilink2, data1); 1365 } 1366 } 1367 1368 static void 1369 il3945_irq_tasklet(struct tasklet_struct *t) 1370 { 1371 struct il_priv *il = from_tasklet(il, t, irq_tasklet); 1372 u32 inta, handled = 0; 1373 u32 inta_fh; 1374 unsigned long flags; 1375 #ifdef CONFIG_IWLEGACY_DEBUG 1376 u32 inta_mask; 1377 #endif 1378 1379 spin_lock_irqsave(&il->lock, flags); 1380 1381 /* Ack/clear/reset pending uCode interrupts. 1382 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1383 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 1384 inta = _il_rd(il, CSR_INT); 1385 _il_wr(il, CSR_INT, inta); 1386 1387 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 1388 * Any new interrupts that happen after this, either while we're 1389 * in this tasklet, or later, will show up in next ISR/tasklet. */ 1390 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 1391 _il_wr(il, CSR_FH_INT_STATUS, inta_fh); 1392 1393 #ifdef CONFIG_IWLEGACY_DEBUG 1394 if (il_get_debug_level(il) & IL_DL_ISR) { 1395 /* just for debug */ 1396 inta_mask = _il_rd(il, CSR_INT_MASK); 1397 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, 1398 inta_mask, inta_fh); 1399 } 1400 #endif 1401 1402 spin_unlock_irqrestore(&il->lock, flags); 1403 1404 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1405 * atomic, make sure that inta covers all the interrupts that 1406 * we've discovered, even if FH interrupt came in just after 1407 * reading CSR_INT. */ 1408 if (inta_fh & CSR39_FH_INT_RX_MASK) 1409 inta |= CSR_INT_BIT_FH_RX; 1410 if (inta_fh & CSR39_FH_INT_TX_MASK) 1411 inta |= CSR_INT_BIT_FH_TX; 1412 1413 /* Now service all interrupt bits discovered above. */ 1414 if (inta & CSR_INT_BIT_HW_ERR) { 1415 IL_ERR("Hardware error detected. Restarting.\n"); 1416 1417 /* Tell the device to stop sending interrupts */ 1418 il_disable_interrupts(il); 1419 1420 il->isr_stats.hw++; 1421 il_irq_handle_error(il); 1422 1423 handled |= CSR_INT_BIT_HW_ERR; 1424 1425 return; 1426 } 1427 #ifdef CONFIG_IWLEGACY_DEBUG 1428 if (il_get_debug_level(il) & (IL_DL_ISR)) { 1429 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1430 if (inta & CSR_INT_BIT_SCD) { 1431 D_ISR("Scheduler finished to transmit " 1432 "the frame/frames.\n"); 1433 il->isr_stats.sch++; 1434 } 1435 1436 /* Alive notification via Rx interrupt will do the real work */ 1437 if (inta & CSR_INT_BIT_ALIVE) { 1438 D_ISR("Alive interrupt\n"); 1439 il->isr_stats.alive++; 1440 } 1441 } 1442 #endif 1443 /* Safely ignore these bits for debug checks below */ 1444 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1445 1446 /* Error detected by uCode */ 1447 if (inta & CSR_INT_BIT_SW_ERR) { 1448 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n", 1449 inta); 1450 il->isr_stats.sw++; 1451 il_irq_handle_error(il); 1452 handled |= CSR_INT_BIT_SW_ERR; 1453 } 1454 1455 /* uCode wakes up after power-down sleep */ 1456 if (inta & CSR_INT_BIT_WAKEUP) { 1457 D_ISR("Wakeup interrupt\n"); 1458 il_rx_queue_update_write_ptr(il, &il->rxq); 1459 1460 spin_lock_irqsave(&il->lock, flags); 1461 il_txq_update_write_ptr(il, &il->txq[0]); 1462 il_txq_update_write_ptr(il, &il->txq[1]); 1463 il_txq_update_write_ptr(il, &il->txq[2]); 1464 il_txq_update_write_ptr(il, &il->txq[3]); 1465 il_txq_update_write_ptr(il, &il->txq[4]); 1466 spin_unlock_irqrestore(&il->lock, flags); 1467 1468 il->isr_stats.wakeup++; 1469 handled |= CSR_INT_BIT_WAKEUP; 1470 } 1471 1472 /* All uCode command responses, including Tx command responses, 1473 * Rx "responses" (frame-received notification), and other 1474 * notifications from uCode come through here*/ 1475 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1476 il3945_rx_handle(il); 1477 il->isr_stats.rx++; 1478 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1479 } 1480 1481 if (inta & CSR_INT_BIT_FH_TX) { 1482 D_ISR("Tx interrupt\n"); 1483 il->isr_stats.tx++; 1484 1485 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6)); 1486 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0); 1487 handled |= CSR_INT_BIT_FH_TX; 1488 } 1489 1490 if (inta & ~handled) { 1491 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 1492 il->isr_stats.unhandled++; 1493 } 1494 1495 if (inta & ~il->inta_mask) { 1496 IL_WARN("Disabled INTA bits 0x%08x were pending\n", 1497 inta & ~il->inta_mask); 1498 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh); 1499 } 1500 1501 /* Re-enable all interrupts */ 1502 /* only Re-enable if disabled by irq */ 1503 if (test_bit(S_INT_ENABLED, &il->status)) 1504 il_enable_interrupts(il); 1505 1506 #ifdef CONFIG_IWLEGACY_DEBUG 1507 if (il_get_debug_level(il) & (IL_DL_ISR)) { 1508 inta = _il_rd(il, CSR_INT); 1509 inta_mask = _il_rd(il, CSR_INT_MASK); 1510 inta_fh = _il_rd(il, CSR_FH_INT_STATUS); 1511 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 1512 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1513 } 1514 #endif 1515 } 1516 1517 static int 1518 il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band, 1519 u8 is_active, u8 n_probes, 1520 struct il3945_scan_channel *scan_ch, 1521 struct ieee80211_vif *vif) 1522 { 1523 struct ieee80211_channel *chan; 1524 const struct ieee80211_supported_band *sband; 1525 const struct il_channel_info *ch_info; 1526 u16 passive_dwell = 0; 1527 u16 active_dwell = 0; 1528 int added, i; 1529 1530 sband = il_get_hw_mode(il, band); 1531 if (!sband) 1532 return 0; 1533 1534 active_dwell = il_get_active_dwell_time(il, band, n_probes); 1535 passive_dwell = il_get_passive_dwell_time(il, band, vif); 1536 1537 if (passive_dwell <= active_dwell) 1538 passive_dwell = active_dwell + 1; 1539 1540 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { 1541 chan = il->scan_request->channels[i]; 1542 1543 if (chan->band != band) 1544 continue; 1545 1546 scan_ch->channel = chan->hw_value; 1547 1548 ch_info = il_get_channel_info(il, band, scan_ch->channel); 1549 if (!il_is_channel_valid(ch_info)) { 1550 D_SCAN("Channel %d is INVALID for this band.\n", 1551 scan_ch->channel); 1552 continue; 1553 } 1554 1555 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1556 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1557 /* If passive , set up for auto-switch 1558 * and use long active_dwell time. 1559 */ 1560 if (!is_active || il_is_channel_passive(ch_info) || 1561 (chan->flags & IEEE80211_CHAN_NO_IR)) { 1562 scan_ch->type = 0; /* passive */ 1563 if (IL_UCODE_API(il->ucode_ver) == 1) 1564 scan_ch->active_dwell = 1565 cpu_to_le16(passive_dwell - 1); 1566 } else { 1567 scan_ch->type = 1; /* active */ 1568 } 1569 1570 /* Set direct probe bits. These may be used both for active 1571 * scan channels (probes gets sent right away), 1572 * or for passive channels (probes get se sent only after 1573 * hearing clear Rx packet).*/ 1574 if (IL_UCODE_API(il->ucode_ver) >= 2) { 1575 if (n_probes) 1576 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); 1577 } else { 1578 /* uCode v1 does not allow setting direct probe bits on 1579 * passive channel. */ 1580 if ((scan_ch->type & 1) && n_probes) 1581 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); 1582 } 1583 1584 /* Set txpower levels to defaults */ 1585 scan_ch->tpc.dsp_atten = 110; 1586 /* scan_pwr_info->tpc.dsp_atten; */ 1587 1588 /*scan_pwr_info->tpc.tx_gain; */ 1589 if (band == NL80211_BAND_5GHZ) 1590 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 1591 else { 1592 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 1593 /* NOTE: if we were doing 6Mb OFDM for scans we'd use 1594 * power level: 1595 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; 1596 */ 1597 } 1598 1599 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel, 1600 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", 1601 (scan_ch->type & 1) ? active_dwell : passive_dwell); 1602 1603 scan_ch++; 1604 added++; 1605 } 1606 1607 D_SCAN("total channels to scan %d\n", added); 1608 return added; 1609 } 1610 1611 static void 1612 il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) 1613 { 1614 int i; 1615 1616 for (i = 0; i < RATE_COUNT_LEGACY; i++) { 1617 rates[i].bitrate = il3945_rates[i].ieee * 5; 1618 rates[i].hw_value = i; /* Rate scaling will work on idxes */ 1619 rates[i].hw_value_short = i; 1620 rates[i].flags = 0; 1621 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) { 1622 /* 1623 * If CCK != 1M then set short preamble rate flag. 1624 */ 1625 rates[i].flags |= 1626 (il3945_rates[i].plcp == 1627 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; 1628 } 1629 } 1630 } 1631 1632 /****************************************************************************** 1633 * 1634 * uCode download functions 1635 * 1636 ******************************************************************************/ 1637 1638 static void 1639 il3945_dealloc_ucode_pci(struct il_priv *il) 1640 { 1641 il_free_fw_desc(il->pci_dev, &il->ucode_code); 1642 il_free_fw_desc(il->pci_dev, &il->ucode_data); 1643 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); 1644 il_free_fw_desc(il->pci_dev, &il->ucode_init); 1645 il_free_fw_desc(il->pci_dev, &il->ucode_init_data); 1646 il_free_fw_desc(il->pci_dev, &il->ucode_boot); 1647 } 1648 1649 /* 1650 * il3945_verify_inst_full - verify runtime uCode image in card vs. host, 1651 * looking at all data. 1652 */ 1653 static int 1654 il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) 1655 { 1656 u32 val; 1657 u32 save_len = len; 1658 int rc = 0; 1659 u32 errcnt; 1660 1661 D_INFO("ucode inst image size is %u\n", len); 1662 1663 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND); 1664 1665 errcnt = 0; 1666 for (; len > 0; len -= sizeof(u32), image++) { 1667 /* read data comes through single port, auto-incr addr */ 1668 /* NOTE: Use the debugless read so we don't flood kernel log 1669 * if IL_DL_IO is set */ 1670 val = _il_rd(il, HBUS_TARG_MEM_RDAT); 1671 if (val != le32_to_cpu(*image)) { 1672 IL_ERR("uCode INST section is invalid at " 1673 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1674 save_len - len, val, le32_to_cpu(*image)); 1675 rc = -EIO; 1676 errcnt++; 1677 if (errcnt >= 20) 1678 break; 1679 } 1680 } 1681 1682 if (!errcnt) 1683 D_INFO("ucode image in INSTRUCTION memory is good\n"); 1684 1685 return rc; 1686 } 1687 1688 /* 1689 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host, 1690 * using sample data 100 bytes apart. If these sample points are good, 1691 * it's a pretty good bet that everything between them is good, too. 1692 */ 1693 static int 1694 il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) 1695 { 1696 u32 val; 1697 int rc = 0; 1698 u32 errcnt = 0; 1699 u32 i; 1700 1701 D_INFO("ucode inst image size is %u\n", len); 1702 1703 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { 1704 /* read data comes through single port, auto-incr addr */ 1705 /* NOTE: Use the debugless read so we don't flood kernel log 1706 * if IL_DL_IO is set */ 1707 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND); 1708 val = _il_rd(il, HBUS_TARG_MEM_RDAT); 1709 if (val != le32_to_cpu(*image)) { 1710 #if 0 /* Enable this if you want to see details */ 1711 IL_ERR("uCode INST section is invalid at " 1712 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val, 1713 *image); 1714 #endif 1715 rc = -EIO; 1716 errcnt++; 1717 if (errcnt >= 3) 1718 break; 1719 } 1720 } 1721 1722 return rc; 1723 } 1724 1725 /* 1726 * il3945_verify_ucode - determine which instruction image is in SRAM, 1727 * and verify its contents 1728 */ 1729 static int 1730 il3945_verify_ucode(struct il_priv *il) 1731 { 1732 __le32 *image; 1733 u32 len; 1734 int rc = 0; 1735 1736 /* Try bootstrap */ 1737 image = (__le32 *) il->ucode_boot.v_addr; 1738 len = il->ucode_boot.len; 1739 rc = il3945_verify_inst_sparse(il, image, len); 1740 if (rc == 0) { 1741 D_INFO("Bootstrap uCode is good in inst SRAM\n"); 1742 return 0; 1743 } 1744 1745 /* Try initialize */ 1746 image = (__le32 *) il->ucode_init.v_addr; 1747 len = il->ucode_init.len; 1748 rc = il3945_verify_inst_sparse(il, image, len); 1749 if (rc == 0) { 1750 D_INFO("Initialize uCode is good in inst SRAM\n"); 1751 return 0; 1752 } 1753 1754 /* Try runtime/protocol */ 1755 image = (__le32 *) il->ucode_code.v_addr; 1756 len = il->ucode_code.len; 1757 rc = il3945_verify_inst_sparse(il, image, len); 1758 if (rc == 0) { 1759 D_INFO("Runtime uCode is good in inst SRAM\n"); 1760 return 0; 1761 } 1762 1763 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 1764 1765 /* Since nothing seems to match, show first several data entries in 1766 * instruction SRAM, so maybe visual inspection will give a clue. 1767 * Selection of bootstrap image (vs. other images) is arbitrary. */ 1768 image = (__le32 *) il->ucode_boot.v_addr; 1769 len = il->ucode_boot.len; 1770 rc = il3945_verify_inst_full(il, image, len); 1771 1772 return rc; 1773 } 1774 1775 static void 1776 il3945_nic_start(struct il_priv *il) 1777 { 1778 /* Remove all resets to allow NIC to operate */ 1779 _il_wr(il, CSR_RESET, 0); 1780 } 1781 1782 #define IL3945_UCODE_GET(item) \ 1783 static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\ 1784 { \ 1785 return le32_to_cpu(ucode->v1.item); \ 1786 } 1787 1788 static u32 1789 il3945_ucode_get_header_size(u32 api_ver) 1790 { 1791 return 24; 1792 } 1793 1794 static u8 * 1795 il3945_ucode_get_data(const struct il_ucode_header *ucode) 1796 { 1797 return (u8 *) ucode->v1.data; 1798 } 1799 1800 IL3945_UCODE_GET(inst_size); 1801 IL3945_UCODE_GET(data_size); 1802 IL3945_UCODE_GET(init_size); 1803 IL3945_UCODE_GET(init_data_size); 1804 IL3945_UCODE_GET(boot_size); 1805 1806 /* 1807 * il3945_read_ucode - Read uCode images from disk file. 1808 * 1809 * Copy into buffers for card to fetch via bus-mastering 1810 */ 1811 static int 1812 il3945_read_ucode(struct il_priv *il) 1813 { 1814 const struct il_ucode_header *ucode; 1815 int ret = -EINVAL, idx; 1816 const struct firmware *ucode_raw; 1817 /* firmware file name contains uCode/driver compatibility version */ 1818 const char *name_pre = il->cfg->fw_name_pre; 1819 const unsigned int api_max = il->cfg->ucode_api_max; 1820 const unsigned int api_min = il->cfg->ucode_api_min; 1821 char buf[25]; 1822 u8 *src; 1823 size_t len; 1824 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; 1825 1826 /* Ask kernel firmware_class module to get the boot firmware off disk. 1827 * request_firmware() is synchronous, file is in memory on return. */ 1828 for (idx = api_max; idx >= api_min; idx--) { 1829 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); 1830 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); 1831 if (ret < 0) { 1832 IL_ERR("%s firmware file req failed: %d\n", buf, ret); 1833 if (ret == -ENOENT) 1834 continue; 1835 else 1836 goto error; 1837 } else { 1838 if (idx < api_max) 1839 IL_ERR("Loaded firmware %s, " 1840 "which is deprecated. " 1841 " Please use API v%u instead.\n", buf, 1842 api_max); 1843 D_INFO("Got firmware '%s' file " 1844 "(%zd bytes) from disk\n", buf, ucode_raw->size); 1845 break; 1846 } 1847 } 1848 1849 if (ret < 0) 1850 goto error; 1851 1852 /* Make sure that we got at least our header! */ 1853 if (ucode_raw->size < il3945_ucode_get_header_size(1)) { 1854 IL_ERR("File size way too small!\n"); 1855 ret = -EINVAL; 1856 goto err_release; 1857 } 1858 1859 /* Data from ucode file: header followed by uCode images */ 1860 ucode = (struct il_ucode_header *)ucode_raw->data; 1861 1862 il->ucode_ver = le32_to_cpu(ucode->ver); 1863 api_ver = IL_UCODE_API(il->ucode_ver); 1864 inst_size = il3945_ucode_get_inst_size(ucode); 1865 data_size = il3945_ucode_get_data_size(ucode); 1866 init_size = il3945_ucode_get_init_size(ucode); 1867 init_data_size = il3945_ucode_get_init_data_size(ucode); 1868 boot_size = il3945_ucode_get_boot_size(ucode); 1869 src = il3945_ucode_get_data(ucode); 1870 1871 /* api_ver should match the api version forming part of the 1872 * firmware filename ... but we don't check for that and only rely 1873 * on the API version read from firmware header from here on forward */ 1874 1875 if (api_ver < api_min || api_ver > api_max) { 1876 IL_ERR("Driver unable to support your firmware API. " 1877 "Driver supports v%u, firmware is v%u.\n", api_max, 1878 api_ver); 1879 il->ucode_ver = 0; 1880 ret = -EINVAL; 1881 goto err_release; 1882 } 1883 if (api_ver != api_max) 1884 IL_ERR("Firmware has old API version. Expected %u, " 1885 "got %u. New firmware can be obtained " 1886 "from http://www.intellinuxwireless.org.\n", api_max, 1887 api_ver); 1888 1889 IL_INFO("loaded firmware version %u.%u.%u.%u\n", 1890 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), 1891 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); 1892 1893 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), 1894 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), 1895 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), 1896 IL_UCODE_SERIAL(il->ucode_ver)); 1897 1898 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); 1899 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size); 1900 D_INFO("f/w package hdr runtime data size = %u\n", data_size); 1901 D_INFO("f/w package hdr init inst size = %u\n", init_size); 1902 D_INFO("f/w package hdr init data size = %u\n", init_data_size); 1903 D_INFO("f/w package hdr boot inst size = %u\n", boot_size); 1904 1905 /* Verify size of file vs. image size info in file's header */ 1906 if (ucode_raw->size != 1907 il3945_ucode_get_header_size(api_ver) + inst_size + data_size + 1908 init_size + init_data_size + boot_size) { 1909 1910 D_INFO("uCode file size %zd does not match expected size\n", 1911 ucode_raw->size); 1912 ret = -EINVAL; 1913 goto err_release; 1914 } 1915 1916 /* Verify that uCode images will fit in card's SRAM */ 1917 if (inst_size > IL39_MAX_INST_SIZE) { 1918 D_INFO("uCode instr len %d too large to fit in\n", inst_size); 1919 ret = -EINVAL; 1920 goto err_release; 1921 } 1922 1923 if (data_size > IL39_MAX_DATA_SIZE) { 1924 D_INFO("uCode data len %d too large to fit in\n", data_size); 1925 ret = -EINVAL; 1926 goto err_release; 1927 } 1928 if (init_size > IL39_MAX_INST_SIZE) { 1929 D_INFO("uCode init instr len %d too large to fit in\n", 1930 init_size); 1931 ret = -EINVAL; 1932 goto err_release; 1933 } 1934 if (init_data_size > IL39_MAX_DATA_SIZE) { 1935 D_INFO("uCode init data len %d too large to fit in\n", 1936 init_data_size); 1937 ret = -EINVAL; 1938 goto err_release; 1939 } 1940 if (boot_size > IL39_MAX_BSM_SIZE) { 1941 D_INFO("uCode boot instr len %d too large to fit in\n", 1942 boot_size); 1943 ret = -EINVAL; 1944 goto err_release; 1945 } 1946 1947 /* Allocate ucode buffers for card's bus-master loading ... */ 1948 1949 /* Runtime instructions and 2 copies of data: 1950 * 1) unmodified from disk 1951 * 2) backup cache for save/restore during power-downs */ 1952 il->ucode_code.len = inst_size; 1953 il_alloc_fw_desc(il->pci_dev, &il->ucode_code); 1954 1955 il->ucode_data.len = data_size; 1956 il_alloc_fw_desc(il->pci_dev, &il->ucode_data); 1957 1958 il->ucode_data_backup.len = data_size; 1959 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); 1960 1961 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || 1962 !il->ucode_data_backup.v_addr) 1963 goto err_pci_alloc; 1964 1965 /* Initialization instructions and data */ 1966 if (init_size && init_data_size) { 1967 il->ucode_init.len = init_size; 1968 il_alloc_fw_desc(il->pci_dev, &il->ucode_init); 1969 1970 il->ucode_init_data.len = init_data_size; 1971 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); 1972 1973 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) 1974 goto err_pci_alloc; 1975 } 1976 1977 /* Bootstrap (instructions only, no data) */ 1978 if (boot_size) { 1979 il->ucode_boot.len = boot_size; 1980 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); 1981 1982 if (!il->ucode_boot.v_addr) 1983 goto err_pci_alloc; 1984 } 1985 1986 /* Copy images into buffers for card's bus-master reads ... */ 1987 1988 /* Runtime instructions (first block of data in file) */ 1989 len = inst_size; 1990 D_INFO("Copying (but not loading) uCode instr len %zd\n", len); 1991 memcpy(il->ucode_code.v_addr, src, len); 1992 src += len; 1993 1994 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 1995 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); 1996 1997 /* Runtime data (2nd block) 1998 * NOTE: Copy into backup buffer will be done in il3945_up() */ 1999 len = data_size; 2000 D_INFO("Copying (but not loading) uCode data len %zd\n", len); 2001 memcpy(il->ucode_data.v_addr, src, len); 2002 memcpy(il->ucode_data_backup.v_addr, src, len); 2003 src += len; 2004 2005 /* Initialization instructions (3rd block) */ 2006 if (init_size) { 2007 len = init_size; 2008 D_INFO("Copying (but not loading) init instr len %zd\n", len); 2009 memcpy(il->ucode_init.v_addr, src, len); 2010 src += len; 2011 } 2012 2013 /* Initialization data (4th block) */ 2014 if (init_data_size) { 2015 len = init_data_size; 2016 D_INFO("Copying (but not loading) init data len %zd\n", len); 2017 memcpy(il->ucode_init_data.v_addr, src, len); 2018 src += len; 2019 } 2020 2021 /* Bootstrap instructions (5th block) */ 2022 len = boot_size; 2023 D_INFO("Copying (but not loading) boot instr len %zd\n", len); 2024 memcpy(il->ucode_boot.v_addr, src, len); 2025 2026 /* We have our copies now, allow OS release its copies */ 2027 release_firmware(ucode_raw); 2028 return 0; 2029 2030 err_pci_alloc: 2031 IL_ERR("failed to allocate pci memory\n"); 2032 ret = -ENOMEM; 2033 il3945_dealloc_ucode_pci(il); 2034 2035 err_release: 2036 release_firmware(ucode_raw); 2037 2038 error: 2039 return ret; 2040 } 2041 2042 /* 2043 * il3945_set_ucode_ptrs - Set uCode address location 2044 * 2045 * Tell initialization uCode where to find runtime uCode. 2046 * 2047 * BSM registers initially contain pointers to initialization uCode. 2048 * We need to replace them to load runtime uCode inst and data, 2049 * and to save runtime data when powering down. 2050 */ 2051 static int 2052 il3945_set_ucode_ptrs(struct il_priv *il) 2053 { 2054 dma_addr_t pinst; 2055 dma_addr_t pdata; 2056 2057 /* bits 31:0 for 3945 */ 2058 pinst = il->ucode_code.p_addr; 2059 pdata = il->ucode_data_backup.p_addr; 2060 2061 /* Tell bootstrap uCode where to find image to load */ 2062 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); 2063 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); 2064 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); 2065 2066 /* Inst byte count must be last to set up, bit 31 signals uCode 2067 * that all new ptr/size info is in place */ 2068 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, 2069 il->ucode_code.len | BSM_DRAM_INST_LOAD); 2070 2071 D_INFO("Runtime uCode pointers are set.\n"); 2072 2073 return 0; 2074 } 2075 2076 /* 2077 * il3945_init_alive_start - Called after N_ALIVE notification received 2078 * 2079 * Called after N_ALIVE notification received from "initialize" uCode. 2080 * 2081 * Tell "initialize" uCode to go ahead and load the runtime uCode. 2082 */ 2083 static void 2084 il3945_init_alive_start(struct il_priv *il) 2085 { 2086 /* Check alive response for "valid" sign from uCode */ 2087 if (il->card_alive_init.is_valid != UCODE_VALID_OK) { 2088 /* We had an error bringing up the hardware, so take it 2089 * all the way back down so we can try again */ 2090 D_INFO("Initialize Alive failed.\n"); 2091 goto restart; 2092 } 2093 2094 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 2095 * This is a paranoid check, because we would not have gotten the 2096 * "initialize" alive if code weren't properly loaded. */ 2097 if (il3945_verify_ucode(il)) { 2098 /* Runtime instruction load was bad; 2099 * take it all the way back down so we can try again */ 2100 D_INFO("Bad \"initialize\" uCode load.\n"); 2101 goto restart; 2102 } 2103 2104 /* Send pointers to protocol/runtime uCode image ... init code will 2105 * load and launch runtime uCode, which will send us another "Alive" 2106 * notification. */ 2107 D_INFO("Initialization Alive received.\n"); 2108 if (il3945_set_ucode_ptrs(il)) { 2109 /* Runtime instruction load won't happen; 2110 * take it all the way back down so we can try again */ 2111 D_INFO("Couldn't set up uCode pointers.\n"); 2112 goto restart; 2113 } 2114 return; 2115 2116 restart: 2117 queue_work(il->workqueue, &il->restart); 2118 } 2119 2120 /* 2121 * il3945_alive_start - called after N_ALIVE notification received 2122 * from protocol/runtime uCode (initialization uCode's 2123 * Alive gets handled by il3945_init_alive_start()). 2124 */ 2125 static void 2126 il3945_alive_start(struct il_priv *il) 2127 { 2128 int thermal_spin = 0; 2129 u32 rfkill; 2130 2131 D_INFO("Runtime Alive received.\n"); 2132 2133 if (il->card_alive.is_valid != UCODE_VALID_OK) { 2134 /* We had an error bringing up the hardware, so take it 2135 * all the way back down so we can try again */ 2136 D_INFO("Alive failed.\n"); 2137 goto restart; 2138 } 2139 2140 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2141 * This is a paranoid check, because we would not have gotten the 2142 * "runtime" alive if code weren't properly loaded. */ 2143 if (il3945_verify_ucode(il)) { 2144 /* Runtime instruction load was bad; 2145 * take it all the way back down so we can try again */ 2146 D_INFO("Bad runtime uCode load.\n"); 2147 goto restart; 2148 } 2149 2150 rfkill = il_rd_prph(il, APMG_RFKILL_REG); 2151 D_INFO("RFKILL status: 0x%x\n", rfkill); 2152 2153 if (rfkill & 0x1) { 2154 clear_bit(S_RFKILL, &il->status); 2155 /* if RFKILL is not on, then wait for thermal 2156 * sensor in adapter to kick in */ 2157 while (il3945_hw_get_temperature(il) == 0) { 2158 thermal_spin++; 2159 udelay(10); 2160 } 2161 2162 if (thermal_spin) 2163 D_INFO("Thermal calibration took %dus\n", 2164 thermal_spin * 10); 2165 } else 2166 set_bit(S_RFKILL, &il->status); 2167 2168 /* After the ALIVE response, we can send commands to 3945 uCode */ 2169 set_bit(S_ALIVE, &il->status); 2170 2171 /* Enable watchdog to monitor the driver tx queues */ 2172 il_setup_watchdog(il); 2173 2174 if (il_is_rfkill(il)) 2175 return; 2176 2177 ieee80211_wake_queues(il->hw); 2178 2179 il->active_rate = RATES_MASK_3945; 2180 2181 il_power_update_mode(il, true); 2182 2183 if (il_is_associated(il)) { 2184 struct il3945_rxon_cmd *active_rxon = 2185 (struct il3945_rxon_cmd *)(&il->active); 2186 2187 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2188 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2189 } else { 2190 /* Initialize our rx_config data */ 2191 il_connection_init_rx_config(il); 2192 } 2193 2194 /* Configure Bluetooth device coexistence support */ 2195 il_send_bt_config(il); 2196 2197 set_bit(S_READY, &il->status); 2198 2199 /* Configure the adapter for unassociated operation */ 2200 il3945_commit_rxon(il); 2201 2202 il3945_reg_txpower_periodic(il); 2203 2204 D_INFO("ALIVE processing complete.\n"); 2205 wake_up(&il->wait_command_queue); 2206 2207 return; 2208 2209 restart: 2210 queue_work(il->workqueue, &il->restart); 2211 } 2212 2213 static void il3945_cancel_deferred_work(struct il_priv *il); 2214 2215 static void 2216 __il3945_down(struct il_priv *il) 2217 { 2218 unsigned long flags; 2219 int exit_pending; 2220 2221 D_INFO(DRV_NAME " is going down\n"); 2222 2223 il_scan_cancel_timeout(il, 200); 2224 2225 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); 2226 2227 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 2228 * to prevent rearm timer */ 2229 del_timer_sync(&il->watchdog); 2230 2231 /* Station information will now be cleared in device */ 2232 il_clear_ucode_stations(il); 2233 il_dealloc_bcast_stations(il); 2234 il_clear_driver_stations(il); 2235 2236 /* Unblock any waiting calls */ 2237 wake_up_all(&il->wait_command_queue); 2238 2239 /* Wipe out the EXIT_PENDING status bit if we are not actually 2240 * exiting the module */ 2241 if (!exit_pending) 2242 clear_bit(S_EXIT_PENDING, &il->status); 2243 2244 /* stop and reset the on-board processor */ 2245 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 2246 2247 /* tell the device to stop sending interrupts */ 2248 spin_lock_irqsave(&il->lock, flags); 2249 il_disable_interrupts(il); 2250 spin_unlock_irqrestore(&il->lock, flags); 2251 il3945_synchronize_irq(il); 2252 2253 if (il->mac80211_registered) 2254 ieee80211_stop_queues(il->hw); 2255 2256 /* If we have not previously called il3945_init() then 2257 * clear all bits but the RF Kill bits and return */ 2258 if (!il_is_init(il)) { 2259 il->status = 2260 test_bit(S_RFKILL, &il->status) << S_RFKILL | 2261 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | 2262 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; 2263 goto exit; 2264 } 2265 2266 /* ...otherwise clear out all the status bits but the RF Kill 2267 * bit and continue taking the NIC down. */ 2268 il->status &= 2269 test_bit(S_RFKILL, &il->status) << S_RFKILL | 2270 test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | 2271 test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR | 2272 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; 2273 2274 /* 2275 * We disabled and synchronized interrupt, and priv->mutex is taken, so 2276 * here is the only thread which will program device registers, but 2277 * still have lockdep assertions, so we are taking reg_lock. 2278 */ 2279 spin_lock_irq(&il->reg_lock); 2280 /* FIXME: il_grab_nic_access if rfkill is off ? */ 2281 2282 il3945_hw_txq_ctx_stop(il); 2283 il3945_hw_rxq_stop(il); 2284 /* Power-down device's busmaster DMA clocks */ 2285 _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2286 udelay(5); 2287 /* Stop the device, and put it in low power state */ 2288 _il_apm_stop(il); 2289 2290 spin_unlock_irq(&il->reg_lock); 2291 2292 il3945_hw_txq_ctx_free(il); 2293 exit: 2294 memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); 2295 dev_kfree_skb(il->beacon_skb); 2296 il->beacon_skb = NULL; 2297 2298 /* clear out any free frames */ 2299 il3945_clear_free_frames(il); 2300 } 2301 2302 static void 2303 il3945_down(struct il_priv *il) 2304 { 2305 mutex_lock(&il->mutex); 2306 __il3945_down(il); 2307 mutex_unlock(&il->mutex); 2308 2309 il3945_cancel_deferred_work(il); 2310 } 2311 2312 #define MAX_HW_RESTARTS 5 2313 2314 static int 2315 il3945_alloc_bcast_station(struct il_priv *il) 2316 { 2317 unsigned long flags; 2318 u8 sta_id; 2319 2320 spin_lock_irqsave(&il->sta_lock, flags); 2321 sta_id = il_prep_station(il, il_bcast_addr, false, NULL); 2322 if (sta_id == IL_INVALID_STATION) { 2323 IL_ERR("Unable to prepare broadcast station\n"); 2324 spin_unlock_irqrestore(&il->sta_lock, flags); 2325 2326 return -EINVAL; 2327 } 2328 2329 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; 2330 il->stations[sta_id].used |= IL_STA_BCAST; 2331 spin_unlock_irqrestore(&il->sta_lock, flags); 2332 2333 return 0; 2334 } 2335 2336 static int 2337 __il3945_up(struct il_priv *il) 2338 { 2339 int rc, i; 2340 2341 rc = il3945_alloc_bcast_station(il); 2342 if (rc) 2343 return rc; 2344 2345 if (test_bit(S_EXIT_PENDING, &il->status)) { 2346 IL_WARN("Exit pending; will not bring the NIC up\n"); 2347 return -EIO; 2348 } 2349 2350 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { 2351 IL_ERR("ucode not available for device bring up\n"); 2352 return -EIO; 2353 } 2354 2355 /* If platform's RF_KILL switch is NOT set to KILL */ 2356 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2357 clear_bit(S_RFKILL, &il->status); 2358 else { 2359 set_bit(S_RFKILL, &il->status); 2360 return -ERFKILL; 2361 } 2362 2363 _il_wr(il, CSR_INT, 0xFFFFFFFF); 2364 2365 rc = il3945_hw_nic_init(il); 2366 if (rc) { 2367 IL_ERR("Unable to int nic\n"); 2368 return rc; 2369 } 2370 2371 /* make sure rfkill handshake bits are cleared */ 2372 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2373 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2374 2375 /* clear (again), then enable host interrupts */ 2376 _il_wr(il, CSR_INT, 0xFFFFFFFF); 2377 il_enable_interrupts(il); 2378 2379 /* really make sure rfkill handshake bits are cleared */ 2380 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2381 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2382 2383 /* Copy original ucode data image from disk into backup cache. 2384 * This will be used to initialize the on-board processor's 2385 * data SRAM for a clean start when the runtime program first loads. */ 2386 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, 2387 il->ucode_data.len); 2388 2389 /* We return success when we resume from suspend and rf_kill is on. */ 2390 if (test_bit(S_RFKILL, &il->status)) 2391 return 0; 2392 2393 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2394 2395 /* load bootstrap state machine, 2396 * load bootstrap program into processor's memory, 2397 * prepare to load the "initialize" uCode */ 2398 rc = il->ops->load_ucode(il); 2399 2400 if (rc) { 2401 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc); 2402 continue; 2403 } 2404 2405 /* start card; "initialize" will load runtime ucode */ 2406 il3945_nic_start(il); 2407 2408 D_INFO(DRV_NAME " is coming up\n"); 2409 2410 return 0; 2411 } 2412 2413 set_bit(S_EXIT_PENDING, &il->status); 2414 __il3945_down(il); 2415 clear_bit(S_EXIT_PENDING, &il->status); 2416 2417 /* tried to restart and config the device for as long as our 2418 * patience could withstand */ 2419 IL_ERR("Unable to initialize device after %d attempts.\n", i); 2420 return -EIO; 2421 } 2422 2423 /***************************************************************************** 2424 * 2425 * Workqueue callbacks 2426 * 2427 *****************************************************************************/ 2428 2429 static void 2430 il3945_bg_init_alive_start(struct work_struct *data) 2431 { 2432 struct il_priv *il = 2433 container_of(data, struct il_priv, init_alive_start.work); 2434 2435 mutex_lock(&il->mutex); 2436 if (test_bit(S_EXIT_PENDING, &il->status)) 2437 goto out; 2438 2439 il3945_init_alive_start(il); 2440 out: 2441 mutex_unlock(&il->mutex); 2442 } 2443 2444 static void 2445 il3945_bg_alive_start(struct work_struct *data) 2446 { 2447 struct il_priv *il = 2448 container_of(data, struct il_priv, alive_start.work); 2449 2450 mutex_lock(&il->mutex); 2451 if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL) 2452 goto out; 2453 2454 il3945_alive_start(il); 2455 out: 2456 mutex_unlock(&il->mutex); 2457 } 2458 2459 /* 2460 * 3945 cannot interrupt driver when hardware rf kill switch toggles; 2461 * driver must poll CSR_GP_CNTRL_REG register for change. This register 2462 * *is* readable even when device has been SW_RESET into low power mode 2463 * (e.g. during RF KILL). 2464 */ 2465 static void 2466 il3945_rfkill_poll(struct work_struct *data) 2467 { 2468 struct il_priv *il = 2469 container_of(data, struct il_priv, _3945.rfkill_poll.work); 2470 bool old_rfkill = test_bit(S_RFKILL, &il->status); 2471 bool new_rfkill = 2472 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 2473 2474 if (new_rfkill != old_rfkill) { 2475 if (new_rfkill) 2476 set_bit(S_RFKILL, &il->status); 2477 else 2478 clear_bit(S_RFKILL, &il->status); 2479 2480 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill); 2481 2482 D_RF_KILL("RF_KILL bit toggled to %s.\n", 2483 new_rfkill ? "disable radio" : "enable radio"); 2484 } 2485 2486 /* Keep this running, even if radio now enabled. This will be 2487 * cancelled in mac_start() if system decides to start again */ 2488 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2489 round_jiffies_relative(2 * HZ)); 2490 2491 } 2492 2493 int 2494 il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) 2495 { 2496 struct il_host_cmd cmd = { 2497 .id = C_SCAN, 2498 .len = sizeof(struct il3945_scan_cmd), 2499 .flags = CMD_SIZE_HUGE, 2500 }; 2501 struct il3945_scan_cmd *scan; 2502 u8 n_probes = 0; 2503 enum nl80211_band band; 2504 bool is_active = false; 2505 int ret; 2506 u16 len; 2507 2508 lockdep_assert_held(&il->mutex); 2509 2510 if (!il->scan_cmd) { 2511 il->scan_cmd = 2512 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE, 2513 GFP_KERNEL); 2514 if (!il->scan_cmd) { 2515 D_SCAN("Fail to allocate scan memory\n"); 2516 return -ENOMEM; 2517 } 2518 } 2519 scan = il->scan_cmd; 2520 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE); 2521 2522 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; 2523 scan->quiet_time = IL_ACTIVE_QUIET_TIME; 2524 2525 if (il_is_associated(il)) { 2526 u16 interval; 2527 u32 extra; 2528 u32 suspend_time = 100; 2529 u32 scan_suspend_time = 100; 2530 2531 D_INFO("Scanning while associated...\n"); 2532 2533 interval = vif->bss_conf.beacon_int; 2534 2535 scan->suspend_time = 0; 2536 scan->max_out_time = cpu_to_le32(200 * 1024); 2537 if (!interval) 2538 interval = suspend_time; 2539 /* 2540 * suspend time format: 2541 * 0-19: beacon interval in usec (time before exec.) 2542 * 20-23: 0 2543 * 24-31: number of beacons (suspend between channels) 2544 */ 2545 2546 extra = (suspend_time / interval) << 24; 2547 scan_suspend_time = 2548 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024)); 2549 2550 scan->suspend_time = cpu_to_le32(scan_suspend_time); 2551 D_SCAN("suspend_time 0x%X beacon interval %d\n", 2552 scan_suspend_time, interval); 2553 } 2554 2555 if (il->scan_request->n_ssids) { 2556 int i, p = 0; 2557 D_SCAN("Kicking off active scan\n"); 2558 for (i = 0; i < il->scan_request->n_ssids; i++) { 2559 /* always does wildcard anyway */ 2560 if (!il->scan_request->ssids[i].ssid_len) 2561 continue; 2562 scan->direct_scan[p].id = WLAN_EID_SSID; 2563 scan->direct_scan[p].len = 2564 il->scan_request->ssids[i].ssid_len; 2565 memcpy(scan->direct_scan[p].ssid, 2566 il->scan_request->ssids[i].ssid, 2567 il->scan_request->ssids[i].ssid_len); 2568 n_probes++; 2569 p++; 2570 } 2571 is_active = true; 2572 } else 2573 D_SCAN("Kicking off passive scan.\n"); 2574 2575 /* We don't build a direct scan probe request; the uCode will do 2576 * that based on the direct_mask added to each channel entry */ 2577 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 2578 scan->tx_cmd.sta_id = il->hw_params.bcast_id; 2579 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2580 2581 /* flags + rate selection */ 2582 2583 switch (il->scan_band) { 2584 case NL80211_BAND_2GHZ: 2585 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2586 scan->tx_cmd.rate = RATE_1M_PLCP; 2587 band = NL80211_BAND_2GHZ; 2588 break; 2589 case NL80211_BAND_5GHZ: 2590 scan->tx_cmd.rate = RATE_6M_PLCP; 2591 band = NL80211_BAND_5GHZ; 2592 break; 2593 default: 2594 IL_WARN("Invalid scan band\n"); 2595 return -EIO; 2596 } 2597 2598 /* 2599 * If active scaning is requested but a certain channel is marked 2600 * passive, we can do active scanning if we detect transmissions. For 2601 * passive only scanning disable switching to active on any channel. 2602 */ 2603 scan->good_CRC_th = 2604 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; 2605 2606 len = 2607 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, 2608 vif->addr, il->scan_request->ie, 2609 il->scan_request->ie_len, 2610 IL_MAX_SCAN_SIZE - sizeof(*scan)); 2611 scan->tx_cmd.len = cpu_to_le16(len); 2612 2613 /* select Rx antennas */ 2614 scan->flags |= il3945_get_antenna_flags(il); 2615 2616 scan->channel_count = 2617 il3945_get_channels_for_scan(il, band, is_active, n_probes, 2618 (void *)&scan->data[len], vif); 2619 if (scan->channel_count == 0) { 2620 D_SCAN("channel count %d\n", scan->channel_count); 2621 return -EIO; 2622 } 2623 2624 cmd.len += 2625 le16_to_cpu(scan->tx_cmd.len) + 2626 scan->channel_count * sizeof(struct il3945_scan_channel); 2627 cmd.data = scan; 2628 scan->len = cpu_to_le16(cmd.len); 2629 2630 set_bit(S_SCAN_HW, &il->status); 2631 ret = il_send_cmd_sync(il, &cmd); 2632 if (ret) 2633 clear_bit(S_SCAN_HW, &il->status); 2634 return ret; 2635 } 2636 2637 void 2638 il3945_post_scan(struct il_priv *il) 2639 { 2640 /* 2641 * Since setting the RXON may have been deferred while 2642 * performing the scan, fire one off if needed 2643 */ 2644 if (memcmp(&il->staging, &il->active, sizeof(il->staging))) 2645 il3945_commit_rxon(il); 2646 } 2647 2648 static void 2649 il3945_bg_restart(struct work_struct *data) 2650 { 2651 struct il_priv *il = container_of(data, struct il_priv, restart); 2652 2653 if (test_bit(S_EXIT_PENDING, &il->status)) 2654 return; 2655 2656 if (test_and_clear_bit(S_FW_ERROR, &il->status)) { 2657 mutex_lock(&il->mutex); 2658 il->is_open = 0; 2659 mutex_unlock(&il->mutex); 2660 il3945_down(il); 2661 ieee80211_restart_hw(il->hw); 2662 } else { 2663 il3945_down(il); 2664 2665 mutex_lock(&il->mutex); 2666 if (test_bit(S_EXIT_PENDING, &il->status)) { 2667 mutex_unlock(&il->mutex); 2668 return; 2669 } 2670 2671 __il3945_up(il); 2672 mutex_unlock(&il->mutex); 2673 } 2674 } 2675 2676 static void 2677 il3945_bg_rx_replenish(struct work_struct *data) 2678 { 2679 struct il_priv *il = container_of(data, struct il_priv, rx_replenish); 2680 2681 mutex_lock(&il->mutex); 2682 if (test_bit(S_EXIT_PENDING, &il->status)) 2683 goto out; 2684 2685 il3945_rx_replenish(il); 2686 out: 2687 mutex_unlock(&il->mutex); 2688 } 2689 2690 void 2691 il3945_post_associate(struct il_priv *il) 2692 { 2693 int rc = 0; 2694 2695 if (!il->vif || !il->is_open) 2696 return; 2697 2698 D_ASSOC("Associated as %d to: %pM\n", il->vif->cfg.aid, 2699 il->active.bssid_addr); 2700 2701 if (test_bit(S_EXIT_PENDING, &il->status)) 2702 return; 2703 2704 il_scan_cancel_timeout(il, 200); 2705 2706 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2707 il3945_commit_rxon(il); 2708 2709 rc = il_send_rxon_timing(il); 2710 if (rc) 2711 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); 2712 2713 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2714 2715 il->staging.assoc_id = cpu_to_le16(il->vif->cfg.aid); 2716 2717 D_ASSOC("assoc id %d beacon interval %d\n", il->vif->cfg.aid, 2718 il->vif->bss_conf.beacon_int); 2719 2720 if (il->vif->bss_conf.use_short_preamble) 2721 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2722 else 2723 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 2724 2725 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { 2726 if (il->vif->bss_conf.use_short_slot) 2727 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 2728 else 2729 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2730 } 2731 2732 il3945_commit_rxon(il); 2733 2734 switch (il->vif->type) { 2735 case NL80211_IFTYPE_STATION: 2736 il3945_rate_scale_init(il->hw, IL_AP_ID); 2737 break; 2738 case NL80211_IFTYPE_ADHOC: 2739 il3945_send_beacon_cmd(il); 2740 break; 2741 default: 2742 IL_ERR("%s Should not be called in %d mode\n", __func__, 2743 il->vif->type); 2744 break; 2745 } 2746 } 2747 2748 /***************************************************************************** 2749 * 2750 * mac80211 entry point functions 2751 * 2752 *****************************************************************************/ 2753 2754 #define UCODE_READY_TIMEOUT (2 * HZ) 2755 2756 static int 2757 il3945_mac_start(struct ieee80211_hw *hw) 2758 { 2759 struct il_priv *il = hw->priv; 2760 int ret; 2761 2762 /* we should be verifying the device is ready to be opened */ 2763 mutex_lock(&il->mutex); 2764 D_MAC80211("enter\n"); 2765 2766 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 2767 * ucode filename and max sizes are card-specific. */ 2768 2769 if (!il->ucode_code.len) { 2770 ret = il3945_read_ucode(il); 2771 if (ret) { 2772 IL_ERR("Could not read microcode: %d\n", ret); 2773 mutex_unlock(&il->mutex); 2774 goto out_release_irq; 2775 } 2776 } 2777 2778 ret = __il3945_up(il); 2779 2780 mutex_unlock(&il->mutex); 2781 2782 if (ret) 2783 goto out_release_irq; 2784 2785 D_INFO("Start UP work.\n"); 2786 2787 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 2788 * mac80211 will not be run successfully. */ 2789 ret = wait_event_timeout(il->wait_command_queue, 2790 test_bit(S_READY, &il->status), 2791 UCODE_READY_TIMEOUT); 2792 if (!ret) { 2793 if (!test_bit(S_READY, &il->status)) { 2794 IL_ERR("Wait for START_ALIVE timeout after %dms.\n", 2795 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 2796 ret = -ETIMEDOUT; 2797 goto out_release_irq; 2798 } 2799 } 2800 2801 /* ucode is running and will send rfkill notifications, 2802 * no need to poll the killswitch state anymore */ 2803 cancel_delayed_work(&il->_3945.rfkill_poll); 2804 2805 il->is_open = 1; 2806 D_MAC80211("leave\n"); 2807 return 0; 2808 2809 out_release_irq: 2810 il->is_open = 0; 2811 D_MAC80211("leave - failed\n"); 2812 return ret; 2813 } 2814 2815 static void 2816 il3945_mac_stop(struct ieee80211_hw *hw) 2817 { 2818 struct il_priv *il = hw->priv; 2819 2820 D_MAC80211("enter\n"); 2821 2822 if (!il->is_open) { 2823 D_MAC80211("leave - skip\n"); 2824 return; 2825 } 2826 2827 il->is_open = 0; 2828 2829 il3945_down(il); 2830 2831 flush_workqueue(il->workqueue); 2832 2833 /* start polling the killswitch state again */ 2834 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2835 round_jiffies_relative(2 * HZ)); 2836 2837 D_MAC80211("leave\n"); 2838 } 2839 2840 static void 2841 il3945_mac_tx(struct ieee80211_hw *hw, 2842 struct ieee80211_tx_control *control, 2843 struct sk_buff *skb) 2844 { 2845 struct il_priv *il = hw->priv; 2846 2847 D_MAC80211("enter\n"); 2848 2849 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2850 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2851 2852 if (il3945_tx_skb(il, control->sta, skb)) 2853 dev_kfree_skb_any(skb); 2854 2855 D_MAC80211("leave\n"); 2856 } 2857 2858 void 2859 il3945_config_ap(struct il_priv *il) 2860 { 2861 struct ieee80211_vif *vif = il->vif; 2862 int rc = 0; 2863 2864 if (test_bit(S_EXIT_PENDING, &il->status)) 2865 return; 2866 2867 /* The following should be done only at AP bring up */ 2868 if (!(il_is_associated(il))) { 2869 2870 /* RXON - unassoc (to set timing command) */ 2871 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2872 il3945_commit_rxon(il); 2873 2874 /* RXON Timing */ 2875 rc = il_send_rxon_timing(il); 2876 if (rc) 2877 IL_WARN("C_RXON_TIMING failed - " 2878 "Attempting to continue.\n"); 2879 2880 il->staging.assoc_id = 0; 2881 2882 if (vif->bss_conf.use_short_preamble) 2883 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2884 else 2885 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 2886 2887 if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { 2888 if (vif->bss_conf.use_short_slot) 2889 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 2890 else 2891 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2892 } 2893 /* restore RXON assoc */ 2894 il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 2895 il3945_commit_rxon(il); 2896 } 2897 il3945_send_beacon_cmd(il); 2898 } 2899 2900 static int 2901 il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2902 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 2903 struct ieee80211_key_conf *key) 2904 { 2905 struct il_priv *il = hw->priv; 2906 int ret = 0; 2907 u8 sta_id = IL_INVALID_STATION; 2908 u8 static_key; 2909 2910 D_MAC80211("enter\n"); 2911 2912 if (il3945_mod_params.sw_crypto) { 2913 D_MAC80211("leave - hwcrypto disabled\n"); 2914 return -EOPNOTSUPP; 2915 } 2916 2917 /* 2918 * To support IBSS RSN, don't program group keys in IBSS, the 2919 * hardware will then not attempt to decrypt the frames. 2920 */ 2921 if (vif->type == NL80211_IFTYPE_ADHOC && 2922 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 2923 D_MAC80211("leave - IBSS RSN\n"); 2924 return -EOPNOTSUPP; 2925 } 2926 2927 static_key = !il_is_associated(il); 2928 2929 if (!static_key) { 2930 sta_id = il_sta_id_or_broadcast(il, sta); 2931 if (sta_id == IL_INVALID_STATION) { 2932 D_MAC80211("leave - station not found\n"); 2933 return -EINVAL; 2934 } 2935 } 2936 2937 mutex_lock(&il->mutex); 2938 il_scan_cancel_timeout(il, 100); 2939 2940 switch (cmd) { 2941 case SET_KEY: 2942 if (static_key) 2943 ret = il3945_set_static_key(il, key); 2944 else 2945 ret = il3945_set_dynamic_key(il, key, sta_id); 2946 D_MAC80211("enable hwcrypto key\n"); 2947 break; 2948 case DISABLE_KEY: 2949 if (static_key) 2950 ret = il3945_remove_static_key(il); 2951 else 2952 ret = il3945_clear_sta_key_info(il, sta_id); 2953 D_MAC80211("disable hwcrypto key\n"); 2954 break; 2955 default: 2956 ret = -EINVAL; 2957 } 2958 2959 D_MAC80211("leave ret %d\n", ret); 2960 mutex_unlock(&il->mutex); 2961 2962 return ret; 2963 } 2964 2965 static int 2966 il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2967 struct ieee80211_sta *sta) 2968 { 2969 struct il_priv *il = hw->priv; 2970 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv; 2971 int ret; 2972 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 2973 u8 sta_id; 2974 2975 mutex_lock(&il->mutex); 2976 D_INFO("station %pM\n", sta->addr); 2977 sta_priv->common.sta_id = IL_INVALID_STATION; 2978 2979 ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id); 2980 if (ret) { 2981 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); 2982 /* Should we return success if return code is EEXIST ? */ 2983 mutex_unlock(&il->mutex); 2984 return ret; 2985 } 2986 2987 sta_priv->common.sta_id = sta_id; 2988 2989 /* Initialize rate scaling */ 2990 D_INFO("Initializing rate scaling for station %pM\n", sta->addr); 2991 il3945_rs_rate_init(il, sta, sta_id); 2992 mutex_unlock(&il->mutex); 2993 2994 return 0; 2995 } 2996 2997 static void 2998 il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, 2999 unsigned int *total_flags, u64 multicast) 3000 { 3001 struct il_priv *il = hw->priv; 3002 __le32 filter_or = 0, filter_nand = 0; 3003 3004 #define CHK(test, flag) do { \ 3005 if (*total_flags & (test)) \ 3006 filter_or |= (flag); \ 3007 else \ 3008 filter_nand |= (flag); \ 3009 } while (0) 3010 3011 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, 3012 *total_flags); 3013 3014 CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK); 3015 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 3016 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 3017 3018 #undef CHK 3019 3020 mutex_lock(&il->mutex); 3021 3022 il->staging.filter_flags &= ~filter_nand; 3023 il->staging.filter_flags |= filter_or; 3024 3025 /* 3026 * Not committing directly because hardware can perform a scan, 3027 * but even if hw is ready, committing here breaks for some reason, 3028 * we'll eventually commit the filter flags change anyway. 3029 */ 3030 3031 mutex_unlock(&il->mutex); 3032 3033 /* 3034 * Receiving all multicast frames is always enabled by the 3035 * default flags setup in il_connection_init_rx_config() 3036 * since we currently do not support programming multicast 3037 * filters into the device. 3038 */ 3039 *total_flags &= 3040 FIF_OTHER_BSS | FIF_ALLMULTI | 3041 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3042 } 3043 3044 /***************************************************************************** 3045 * 3046 * sysfs attributes 3047 * 3048 *****************************************************************************/ 3049 3050 #ifdef CONFIG_IWLEGACY_DEBUG 3051 3052 /* 3053 * The following adds a new attribute to the sysfs representation 3054 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) 3055 * used for controlling the debug level. 3056 * 3057 * See the level definitions in iwl for details. 3058 * 3059 * The debug_level being managed using sysfs below is a per device debug 3060 * level that is used instead of the global debug level if it (the per 3061 * device debug level) is set. 3062 */ 3063 static ssize_t 3064 il3945_show_debug_level(struct device *d, struct device_attribute *attr, 3065 char *buf) 3066 { 3067 struct il_priv *il = dev_get_drvdata(d); 3068 return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); 3069 } 3070 3071 static ssize_t 3072 il3945_store_debug_level(struct device *d, struct device_attribute *attr, 3073 const char *buf, size_t count) 3074 { 3075 struct il_priv *il = dev_get_drvdata(d); 3076 unsigned long val; 3077 int ret; 3078 3079 ret = kstrtoul(buf, 0, &val); 3080 if (ret) 3081 IL_INFO("%s is not in hex or decimal form.\n", buf); 3082 else 3083 il->debug_level = val; 3084 3085 return strnlen(buf, count); 3086 } 3087 3088 static DEVICE_ATTR(debug_level, 0644, il3945_show_debug_level, 3089 il3945_store_debug_level); 3090 3091 #endif /* CONFIG_IWLEGACY_DEBUG */ 3092 3093 static ssize_t 3094 il3945_show_temperature(struct device *d, struct device_attribute *attr, 3095 char *buf) 3096 { 3097 struct il_priv *il = dev_get_drvdata(d); 3098 3099 if (!il_is_alive(il)) 3100 return -EAGAIN; 3101 3102 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); 3103 } 3104 3105 static DEVICE_ATTR(temperature, 0444, il3945_show_temperature, NULL); 3106 3107 static ssize_t 3108 il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) 3109 { 3110 struct il_priv *il = dev_get_drvdata(d); 3111 return sprintf(buf, "%d\n", il->tx_power_user_lmt); 3112 } 3113 3114 static ssize_t 3115 il3945_store_tx_power(struct device *d, struct device_attribute *attr, 3116 const char *buf, size_t count) 3117 { 3118 struct il_priv *il = dev_get_drvdata(d); 3119 char *p = (char *)buf; 3120 u32 val; 3121 3122 val = simple_strtoul(p, &p, 10); 3123 if (p == buf) 3124 IL_INFO(": %s is not in decimal form.\n", buf); 3125 else 3126 il3945_hw_reg_set_txpower(il, val); 3127 3128 return count; 3129 } 3130 3131 static DEVICE_ATTR(tx_power, 0644, il3945_show_tx_power, il3945_store_tx_power); 3132 3133 static ssize_t 3134 il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) 3135 { 3136 struct il_priv *il = dev_get_drvdata(d); 3137 3138 return sprintf(buf, "0x%04X\n", il->active.flags); 3139 } 3140 3141 static ssize_t 3142 il3945_store_flags(struct device *d, struct device_attribute *attr, 3143 const char *buf, size_t count) 3144 { 3145 struct il_priv *il = dev_get_drvdata(d); 3146 u32 flags = simple_strtoul(buf, NULL, 0); 3147 3148 mutex_lock(&il->mutex); 3149 if (le32_to_cpu(il->staging.flags) != flags) { 3150 /* Cancel any currently running scans... */ 3151 if (il_scan_cancel_timeout(il, 100)) 3152 IL_WARN("Could not cancel scan.\n"); 3153 else { 3154 D_INFO("Committing rxon.flags = 0x%04X\n", flags); 3155 il->staging.flags = cpu_to_le32(flags); 3156 il3945_commit_rxon(il); 3157 } 3158 } 3159 mutex_unlock(&il->mutex); 3160 3161 return count; 3162 } 3163 3164 static DEVICE_ATTR(flags, 0644, il3945_show_flags, il3945_store_flags); 3165 3166 static ssize_t 3167 il3945_show_filter_flags(struct device *d, struct device_attribute *attr, 3168 char *buf) 3169 { 3170 struct il_priv *il = dev_get_drvdata(d); 3171 3172 return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags)); 3173 } 3174 3175 static ssize_t 3176 il3945_store_filter_flags(struct device *d, struct device_attribute *attr, 3177 const char *buf, size_t count) 3178 { 3179 struct il_priv *il = dev_get_drvdata(d); 3180 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3181 3182 mutex_lock(&il->mutex); 3183 if (le32_to_cpu(il->staging.filter_flags) != filter_flags) { 3184 /* Cancel any currently running scans... */ 3185 if (il_scan_cancel_timeout(il, 100)) 3186 IL_WARN("Could not cancel scan.\n"); 3187 else { 3188 D_INFO("Committing rxon.filter_flags = " "0x%04X\n", 3189 filter_flags); 3190 il->staging.filter_flags = cpu_to_le32(filter_flags); 3191 il3945_commit_rxon(il); 3192 } 3193 } 3194 mutex_unlock(&il->mutex); 3195 3196 return count; 3197 } 3198 3199 static DEVICE_ATTR(filter_flags, 0644, il3945_show_filter_flags, 3200 il3945_store_filter_flags); 3201 3202 static ssize_t 3203 il3945_show_measurement(struct device *d, struct device_attribute *attr, 3204 char *buf) 3205 { 3206 struct il_priv *il = dev_get_drvdata(d); 3207 struct il_spectrum_notification measure_report; 3208 u32 size = sizeof(measure_report), len = 0, ofs = 0; 3209 u8 *data = (u8 *) &measure_report; 3210 unsigned long flags; 3211 3212 spin_lock_irqsave(&il->lock, flags); 3213 if (!(il->measurement_status & MEASUREMENT_READY)) { 3214 spin_unlock_irqrestore(&il->lock, flags); 3215 return 0; 3216 } 3217 memcpy(&measure_report, &il->measure_report, size); 3218 il->measurement_status = 0; 3219 spin_unlock_irqrestore(&il->lock, flags); 3220 3221 while (size && PAGE_SIZE - len) { 3222 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, 3223 PAGE_SIZE - len, true); 3224 len = strlen(buf); 3225 if (PAGE_SIZE - len) 3226 buf[len++] = '\n'; 3227 3228 ofs += 16; 3229 size -= min(size, 16U); 3230 } 3231 3232 return len; 3233 } 3234 3235 static ssize_t 3236 il3945_store_measurement(struct device *d, struct device_attribute *attr, 3237 const char *buf, size_t count) 3238 { 3239 struct il_priv *il = dev_get_drvdata(d); 3240 struct ieee80211_measurement_params params = { 3241 .channel = le16_to_cpu(il->active.channel), 3242 .start_time = cpu_to_le64(il->_3945.last_tsf), 3243 .duration = cpu_to_le16(1), 3244 }; 3245 u8 type = IL_MEASURE_BASIC; 3246 u8 buffer[32]; 3247 u8 channel; 3248 3249 if (count) { 3250 char *p = buffer; 3251 strscpy(buffer, buf, sizeof(buffer)); 3252 channel = simple_strtoul(p, NULL, 0); 3253 if (channel) 3254 params.channel = channel; 3255 3256 p = buffer; 3257 while (*p && *p != ' ') 3258 p++; 3259 if (*p) 3260 type = simple_strtoul(p + 1, NULL, 0); 3261 } 3262 3263 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n", 3264 type, params.channel, buf); 3265 il3945_get_measurement(il, ¶ms, type); 3266 3267 return count; 3268 } 3269 3270 static DEVICE_ATTR(measurement, 0600, il3945_show_measurement, 3271 il3945_store_measurement); 3272 3273 static ssize_t 3274 il3945_store_retry_rate(struct device *d, struct device_attribute *attr, 3275 const char *buf, size_t count) 3276 { 3277 struct il_priv *il = dev_get_drvdata(d); 3278 3279 il->retry_rate = simple_strtoul(buf, NULL, 0); 3280 if (il->retry_rate <= 0) 3281 il->retry_rate = 1; 3282 3283 return count; 3284 } 3285 3286 static ssize_t 3287 il3945_show_retry_rate(struct device *d, struct device_attribute *attr, 3288 char *buf) 3289 { 3290 struct il_priv *il = dev_get_drvdata(d); 3291 return sprintf(buf, "%d", il->retry_rate); 3292 } 3293 3294 static DEVICE_ATTR(retry_rate, 0600, il3945_show_retry_rate, 3295 il3945_store_retry_rate); 3296 3297 static ssize_t 3298 il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) 3299 { 3300 /* all this shit doesn't belong into sysfs anyway */ 3301 return 0; 3302 } 3303 3304 static DEVICE_ATTR(channels, 0400, il3945_show_channels, NULL); 3305 3306 static ssize_t 3307 il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) 3308 { 3309 struct il_priv *il = dev_get_drvdata(d); 3310 3311 if (!il_is_alive(il)) 3312 return -EAGAIN; 3313 3314 return sprintf(buf, "%d\n", il3945_mod_params.antenna); 3315 } 3316 3317 static ssize_t 3318 il3945_store_antenna(struct device *d, struct device_attribute *attr, 3319 const char *buf, size_t count) 3320 { 3321 struct il_priv *il __maybe_unused = dev_get_drvdata(d); 3322 int ant; 3323 3324 if (count == 0) 3325 return 0; 3326 3327 if (sscanf(buf, "%1i", &ant) != 1) { 3328 D_INFO("not in hex or decimal form.\n"); 3329 return count; 3330 } 3331 3332 if (ant >= 0 && ant <= 2) { 3333 D_INFO("Setting antenna select to %d.\n", ant); 3334 il3945_mod_params.antenna = (enum il3945_antenna)ant; 3335 } else 3336 D_INFO("Bad antenna select value %d.\n", ant); 3337 3338 return count; 3339 } 3340 3341 static DEVICE_ATTR(antenna, 0644, il3945_show_antenna, il3945_store_antenna); 3342 3343 static ssize_t 3344 il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) 3345 { 3346 struct il_priv *il = dev_get_drvdata(d); 3347 if (!il_is_alive(il)) 3348 return -EAGAIN; 3349 return sprintf(buf, "0x%08x\n", (int)il->status); 3350 } 3351 3352 static DEVICE_ATTR(status, 0444, il3945_show_status, NULL); 3353 3354 static ssize_t 3355 il3945_dump_error_log(struct device *d, struct device_attribute *attr, 3356 const char *buf, size_t count) 3357 { 3358 struct il_priv *il = dev_get_drvdata(d); 3359 char *p = (char *)buf; 3360 3361 if (p[0] == '1') 3362 il3945_dump_nic_error_log(il); 3363 3364 return strnlen(buf, count); 3365 } 3366 3367 static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log); 3368 3369 /***************************************************************************** 3370 * 3371 * driver setup and tear down 3372 * 3373 *****************************************************************************/ 3374 3375 static int 3376 il3945_setup_deferred_work(struct il_priv *il) 3377 { 3378 il->workqueue = create_singlethread_workqueue(DRV_NAME); 3379 if (!il->workqueue) 3380 return -ENOMEM; 3381 3382 init_waitqueue_head(&il->wait_command_queue); 3383 3384 INIT_WORK(&il->restart, il3945_bg_restart); 3385 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish); 3386 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start); 3387 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start); 3388 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll); 3389 3390 il_setup_scan_deferred_work(il); 3391 3392 il3945_hw_setup_deferred_work(il); 3393 3394 timer_setup(&il->watchdog, il_bg_watchdog, 0); 3395 3396 tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet); 3397 3398 return 0; 3399 } 3400 3401 static void 3402 il3945_cancel_deferred_work(struct il_priv *il) 3403 { 3404 il3945_hw_cancel_deferred_work(il); 3405 3406 cancel_delayed_work_sync(&il->init_alive_start); 3407 cancel_delayed_work(&il->alive_start); 3408 3409 il_cancel_scan_deferred_work(il); 3410 } 3411 3412 static struct attribute *il3945_sysfs_entries[] = { 3413 &dev_attr_antenna.attr, 3414 &dev_attr_channels.attr, 3415 &dev_attr_dump_errors.attr, 3416 &dev_attr_flags.attr, 3417 &dev_attr_filter_flags.attr, 3418 &dev_attr_measurement.attr, 3419 &dev_attr_retry_rate.attr, 3420 &dev_attr_status.attr, 3421 &dev_attr_temperature.attr, 3422 &dev_attr_tx_power.attr, 3423 #ifdef CONFIG_IWLEGACY_DEBUG 3424 &dev_attr_debug_level.attr, 3425 #endif 3426 NULL 3427 }; 3428 3429 static const struct attribute_group il3945_attribute_group = { 3430 .name = NULL, /* put in device directory */ 3431 .attrs = il3945_sysfs_entries, 3432 }; 3433 3434 static struct ieee80211_ops il3945_mac_ops __ro_after_init = { 3435 .add_chanctx = ieee80211_emulate_add_chanctx, 3436 .remove_chanctx = ieee80211_emulate_remove_chanctx, 3437 .change_chanctx = ieee80211_emulate_change_chanctx, 3438 .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, 3439 .tx = il3945_mac_tx, 3440 .wake_tx_queue = ieee80211_handle_wake_tx_queue, 3441 .start = il3945_mac_start, 3442 .stop = il3945_mac_stop, 3443 .add_interface = il_mac_add_interface, 3444 .remove_interface = il_mac_remove_interface, 3445 .change_interface = il_mac_change_interface, 3446 .config = il_mac_config, 3447 .configure_filter = il3945_configure_filter, 3448 .set_key = il3945_mac_set_key, 3449 .conf_tx = il_mac_conf_tx, 3450 .reset_tsf = il_mac_reset_tsf, 3451 .bss_info_changed = il_mac_bss_info_changed, 3452 .hw_scan = il_mac_hw_scan, 3453 .sta_add = il3945_mac_sta_add, 3454 .sta_remove = il_mac_sta_remove, 3455 .tx_last_beacon = il_mac_tx_last_beacon, 3456 .flush = il_mac_flush, 3457 }; 3458 3459 static int 3460 il3945_init_drv(struct il_priv *il) 3461 { 3462 int ret; 3463 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; 3464 3465 il->retry_rate = 1; 3466 il->beacon_skb = NULL; 3467 3468 spin_lock_init(&il->sta_lock); 3469 spin_lock_init(&il->hcmd_lock); 3470 3471 INIT_LIST_HEAD(&il->free_frames); 3472 3473 mutex_init(&il->mutex); 3474 3475 il->ieee_channels = NULL; 3476 il->ieee_rates = NULL; 3477 il->band = NL80211_BAND_2GHZ; 3478 3479 il->iw_mode = NL80211_IFTYPE_STATION; 3480 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; 3481 3482 /* initialize force reset */ 3483 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; 3484 3485 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3486 IL_WARN("Unsupported EEPROM version: 0x%04X\n", 3487 eeprom->version); 3488 ret = -EINVAL; 3489 goto err; 3490 } 3491 ret = il_init_channel_map(il); 3492 if (ret) { 3493 IL_ERR("initializing regulatory failed: %d\n", ret); 3494 goto err; 3495 } 3496 3497 /* Set up txpower settings in driver for all channels */ 3498 if (il3945_txpower_set_from_eeprom(il)) { 3499 ret = -EIO; 3500 goto err_free_channel_map; 3501 } 3502 3503 ret = il_init_geos(il); 3504 if (ret) { 3505 IL_ERR("initializing geos failed: %d\n", ret); 3506 goto err_free_channel_map; 3507 } 3508 il3945_init_hw_rates(il, il->ieee_rates); 3509 3510 return 0; 3511 3512 err_free_channel_map: 3513 il_free_channel_map(il); 3514 err: 3515 return ret; 3516 } 3517 3518 #define IL3945_MAX_PROBE_REQUEST 200 3519 3520 static int 3521 il3945_setup_mac(struct il_priv *il) 3522 { 3523 int ret; 3524 struct ieee80211_hw *hw = il->hw; 3525 3526 hw->rate_control_algorithm = "iwl-3945-rs"; 3527 hw->sta_data_size = sizeof(struct il3945_sta_priv); 3528 hw->vif_data_size = sizeof(struct il_vif_priv); 3529 3530 /* Tell mac80211 our characteristics */ 3531 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 3532 ieee80211_hw_set(hw, SUPPORTS_PS); 3533 ieee80211_hw_set(hw, SIGNAL_DBM); 3534 ieee80211_hw_set(hw, SPECTRUM_MGMT); 3535 3536 hw->wiphy->interface_modes = 3537 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 3538 3539 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 3540 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 3541 REGULATORY_DISABLE_BEACON_HINTS; 3542 3543 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 3544 3545 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3546 /* we create the 802.11 header and a zero-length SSID element */ 3547 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; 3548 3549 /* Default value; 4 EDCA QOS priorities */ 3550 hw->queues = 4; 3551 3552 if (il->bands[NL80211_BAND_2GHZ].n_channels) 3553 il->hw->wiphy->bands[NL80211_BAND_2GHZ] = 3554 &il->bands[NL80211_BAND_2GHZ]; 3555 3556 if (il->bands[NL80211_BAND_5GHZ].n_channels) 3557 il->hw->wiphy->bands[NL80211_BAND_5GHZ] = 3558 &il->bands[NL80211_BAND_5GHZ]; 3559 3560 il_leds_init(il); 3561 3562 wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 3563 3564 ret = ieee80211_register_hw(il->hw); 3565 if (ret) { 3566 IL_ERR("Failed to register hw (error %d)\n", ret); 3567 return ret; 3568 } 3569 il->mac80211_registered = 1; 3570 3571 return 0; 3572 } 3573 3574 static int 3575 il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3576 { 3577 int err = 0; 3578 struct il_priv *il; 3579 struct ieee80211_hw *hw; 3580 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); 3581 struct il3945_eeprom *eeprom; 3582 unsigned long flags; 3583 3584 /*********************** 3585 * 1. Allocating HW data 3586 * ********************/ 3587 3588 hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops); 3589 if (!hw) { 3590 err = -ENOMEM; 3591 goto out; 3592 } 3593 il = hw->priv; 3594 il->hw = hw; 3595 SET_IEEE80211_DEV(hw, &pdev->dev); 3596 3597 il->cmd_queue = IL39_CMD_QUEUE_NUM; 3598 3599 D_INFO("*** LOAD DRIVER ***\n"); 3600 il->cfg = cfg; 3601 il->ops = &il3945_ops; 3602 #ifdef CONFIG_IWLEGACY_DEBUGFS 3603 il->debugfs_ops = &il3945_debugfs_ops; 3604 #endif 3605 il->pci_dev = pdev; 3606 il->inta_mask = CSR_INI_SET_MASK; 3607 3608 /*************************** 3609 * 2. Initializing PCI bus 3610 * *************************/ 3611 pci_disable_link_state(pdev, 3612 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | 3613 PCIE_LINK_STATE_CLKPM); 3614 3615 if (pci_enable_device(pdev)) { 3616 err = -ENODEV; 3617 goto out_ieee80211_free_hw; 3618 } 3619 3620 pci_set_master(pdev); 3621 3622 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3623 if (err) { 3624 IL_WARN("No suitable DMA available.\n"); 3625 goto out_pci_disable_device; 3626 } 3627 3628 pci_set_drvdata(pdev, il); 3629 err = pci_request_regions(pdev, DRV_NAME); 3630 if (err) 3631 goto out_pci_disable_device; 3632 3633 /*********************** 3634 * 3. Read REV Register 3635 * ********************/ 3636 il->hw_base = pci_ioremap_bar(pdev, 0); 3637 if (!il->hw_base) { 3638 err = -ENODEV; 3639 goto out_pci_release_regions; 3640 } 3641 3642 D_INFO("pci_resource_len = 0x%08llx\n", 3643 (unsigned long long)pci_resource_len(pdev, 0)); 3644 D_INFO("pci_resource_base = %p\n", il->hw_base); 3645 3646 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3647 * PCI Tx retries from interfering with C3 CPU state */ 3648 pci_write_config_byte(pdev, 0x41, 0x00); 3649 3650 /* these spin locks will be used in apm_init and EEPROM access 3651 * we should init now 3652 */ 3653 spin_lock_init(&il->reg_lock); 3654 spin_lock_init(&il->lock); 3655 3656 /* 3657 * stop and reset the on-board processor just in case it is in a 3658 * strange state ... like being left stranded by a primary kernel 3659 * and this is now the kdump kernel trying to start up 3660 */ 3661 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3662 3663 /*********************** 3664 * 4. Read EEPROM 3665 * ********************/ 3666 3667 /* Read the EEPROM */ 3668 err = il_eeprom_init(il); 3669 if (err) { 3670 IL_ERR("Unable to init EEPROM\n"); 3671 goto out_iounmap; 3672 } 3673 /* MAC Address location in EEPROM same for 3945/4965 */ 3674 eeprom = (struct il3945_eeprom *)il->eeprom; 3675 D_INFO("MAC address: %pM\n", eeprom->mac_address); 3676 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address); 3677 3678 /*********************** 3679 * 5. Setup HW Constants 3680 * ********************/ 3681 /* Device-specific setup */ 3682 err = il3945_hw_set_hw_params(il); 3683 if (err) { 3684 IL_ERR("failed to set hw settings\n"); 3685 goto out_eeprom_free; 3686 } 3687 3688 /*********************** 3689 * 6. Setup il 3690 * ********************/ 3691 3692 err = il3945_init_drv(il); 3693 if (err) { 3694 IL_ERR("initializing driver failed\n"); 3695 goto out_unset_hw_params; 3696 } 3697 3698 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name); 3699 3700 /*********************** 3701 * 7. Setup Services 3702 * ********************/ 3703 3704 spin_lock_irqsave(&il->lock, flags); 3705 il_disable_interrupts(il); 3706 spin_unlock_irqrestore(&il->lock, flags); 3707 3708 pci_enable_msi(il->pci_dev); 3709 3710 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); 3711 if (err) { 3712 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); 3713 goto out_disable_msi; 3714 } 3715 3716 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group); 3717 if (err) { 3718 IL_ERR("failed to create sysfs device attributes\n"); 3719 goto out_release_irq; 3720 } 3721 3722 il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]); 3723 err = il3945_setup_deferred_work(il); 3724 if (err) 3725 goto out_remove_sysfs; 3726 3727 il3945_setup_handlers(il); 3728 il_power_initialize(il); 3729 3730 /********************************* 3731 * 8. Setup and Register mac80211 3732 * *******************************/ 3733 3734 il_enable_interrupts(il); 3735 3736 err = il3945_setup_mac(il); 3737 if (err) 3738 goto out_destroy_workqueue; 3739 3740 il_dbgfs_register(il, DRV_NAME); 3741 3742 /* Start monitoring the killswitch */ 3743 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); 3744 3745 return 0; 3746 3747 out_destroy_workqueue: 3748 destroy_workqueue(il->workqueue); 3749 il->workqueue = NULL; 3750 out_remove_sysfs: 3751 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); 3752 out_release_irq: 3753 free_irq(il->pci_dev->irq, il); 3754 out_disable_msi: 3755 pci_disable_msi(il->pci_dev); 3756 il_free_geos(il); 3757 il_free_channel_map(il); 3758 out_unset_hw_params: 3759 il3945_unset_hw_params(il); 3760 out_eeprom_free: 3761 il_eeprom_free(il); 3762 out_iounmap: 3763 iounmap(il->hw_base); 3764 out_pci_release_regions: 3765 pci_release_regions(pdev); 3766 out_pci_disable_device: 3767 pci_disable_device(pdev); 3768 out_ieee80211_free_hw: 3769 ieee80211_free_hw(il->hw); 3770 out: 3771 return err; 3772 } 3773 3774 static void 3775 il3945_pci_remove(struct pci_dev *pdev) 3776 { 3777 struct il_priv *il = pci_get_drvdata(pdev); 3778 unsigned long flags; 3779 3780 if (!il) 3781 return; 3782 3783 D_INFO("*** UNLOAD DRIVER ***\n"); 3784 3785 il_dbgfs_unregister(il); 3786 3787 set_bit(S_EXIT_PENDING, &il->status); 3788 3789 il_leds_exit(il); 3790 3791 if (il->mac80211_registered) { 3792 ieee80211_unregister_hw(il->hw); 3793 il->mac80211_registered = 0; 3794 } else { 3795 il3945_down(il); 3796 } 3797 3798 /* 3799 * Make sure device is reset to low power before unloading driver. 3800 * This may be redundant with il_down(), but there are paths to 3801 * run il_down() without calling apm_ops.stop(), and there are 3802 * paths to avoid running il_down() at all before leaving driver. 3803 * This (inexpensive) call *makes sure* device is reset. 3804 */ 3805 il_apm_stop(il); 3806 3807 /* make sure we flush any pending irq or 3808 * tasklet for the driver 3809 */ 3810 spin_lock_irqsave(&il->lock, flags); 3811 il_disable_interrupts(il); 3812 spin_unlock_irqrestore(&il->lock, flags); 3813 3814 il3945_synchronize_irq(il); 3815 3816 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); 3817 3818 cancel_delayed_work_sync(&il->_3945.rfkill_poll); 3819 3820 il3945_dealloc_ucode_pci(il); 3821 3822 if (il->rxq.bd) 3823 il3945_rx_queue_free(il, &il->rxq); 3824 il3945_hw_txq_ctx_free(il); 3825 3826 il3945_unset_hw_params(il); 3827 3828 /*netif_stop_queue(dev); */ 3829 3830 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes 3831 * il->workqueue... so we can't take down the workqueue 3832 * until now... */ 3833 destroy_workqueue(il->workqueue); 3834 il->workqueue = NULL; 3835 3836 free_irq(pdev->irq, il); 3837 pci_disable_msi(pdev); 3838 3839 iounmap(il->hw_base); 3840 pci_release_regions(pdev); 3841 pci_disable_device(pdev); 3842 3843 il_free_channel_map(il); 3844 il_free_geos(il); 3845 kfree(il->scan_cmd); 3846 dev_kfree_skb(il->beacon_skb); 3847 ieee80211_free_hw(il->hw); 3848 } 3849 3850 /***************************************************************************** 3851 * 3852 * driver and module entry point 3853 * 3854 *****************************************************************************/ 3855 3856 static struct pci_driver il3945_driver = { 3857 .name = DRV_NAME, 3858 .id_table = il3945_hw_card_ids, 3859 .probe = il3945_pci_probe, 3860 .remove = il3945_pci_remove, 3861 .driver.pm = IL_LEGACY_PM_OPS, 3862 }; 3863 3864 static int __init 3865 il3945_init(void) 3866 { 3867 3868 int ret; 3869 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 3870 pr_info(DRV_COPYRIGHT "\n"); 3871 3872 /* 3873 * Disabling hardware scan means that mac80211 will perform scans 3874 * "the hard way", rather than using device's scan. 3875 */ 3876 if (il3945_mod_params.disable_hw_scan) { 3877 pr_info("hw_scan is disabled\n"); 3878 il3945_mac_ops.hw_scan = NULL; 3879 } 3880 3881 ret = il3945_rate_control_register(); 3882 if (ret) { 3883 pr_err("Unable to register rate control algorithm: %d\n", ret); 3884 return ret; 3885 } 3886 3887 ret = pci_register_driver(&il3945_driver); 3888 if (ret) { 3889 pr_err("Unable to initialize PCI module\n"); 3890 goto error_register; 3891 } 3892 3893 return ret; 3894 3895 error_register: 3896 il3945_rate_control_unregister(); 3897 return ret; 3898 } 3899 3900 static void __exit 3901 il3945_exit(void) 3902 { 3903 pci_unregister_driver(&il3945_driver); 3904 il3945_rate_control_unregister(); 3905 } 3906 3907 MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); 3908 3909 module_param_named(antenna, il3945_mod_params.antenna, int, 0444); 3910 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 3911 module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, 0444); 3912 MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); 3913 module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, 3914 0444); 3915 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); 3916 #ifdef CONFIG_IWLEGACY_DEBUG 3917 module_param_named(debug, il_debug_level, uint, 0644); 3918 MODULE_PARM_DESC(debug, "debug output mask"); 3919 #endif 3920 module_param_named(fw_restart, il3945_mod_params.restart_fw, int, 0444); 3921 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 3922 3923 module_exit(il3945_exit); 3924 module_init(il3945_init); 3925