1 /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com> 2 * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 3 * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 4 * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 5 * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> 6 * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com> 7 * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> 8 * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com> 9 * <http://rt2x00.serialmonkey.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, see <http://www.gnu.org/licenses/>. 23 */ 24 25 /* Module: rt2800mmio 26 * Abstract: rt2800 MMIO device routines. 27 */ 28 29 #include <linux/kernel.h> 30 #include <linux/module.h> 31 #include <linux/export.h> 32 33 #include "rt2x00.h" 34 #include "rt2x00mmio.h" 35 #include "rt2800.h" 36 #include "rt2800lib.h" 37 #include "rt2800mmio.h" 38 39 /* 40 * TX descriptor initialization 41 */ 42 __le32 *rt2800mmio_get_txwi(struct queue_entry *entry) 43 { 44 return (__le32 *) entry->skb->data; 45 } 46 EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi); 47 48 void rt2800mmio_write_tx_desc(struct queue_entry *entry, 49 struct txentry_desc *txdesc) 50 { 51 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 52 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 53 __le32 *txd = entry_priv->desc; 54 u32 word; 55 const unsigned int txwi_size = entry->queue->winfo_size; 56 57 /* 58 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 59 * must contains a TXWI structure + 802.11 header + padding + 802.11 60 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and 61 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 62 * data. It means that LAST_SEC0 is always 0. 63 */ 64 65 /* 66 * Initialize TX descriptor 67 */ 68 word = 0; 69 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); 70 rt2x00_desc_write(txd, 0, word); 71 72 word = 0; 73 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len); 74 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, 75 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 76 rt2x00_set_field32(&word, TXD_W1_BURST, 77 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 78 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size); 79 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); 80 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); 81 rt2x00_desc_write(txd, 1, word); 82 83 word = 0; 84 rt2x00_set_field32(&word, TXD_W2_SD_PTR1, 85 skbdesc->skb_dma + txwi_size); 86 rt2x00_desc_write(txd, 2, word); 87 88 word = 0; 89 rt2x00_set_field32(&word, TXD_W3_WIV, 90 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 91 rt2x00_set_field32(&word, TXD_W3_QSEL, 2); 92 rt2x00_desc_write(txd, 3, word); 93 94 /* 95 * Register descriptor details in skb frame descriptor. 96 */ 97 skbdesc->desc = txd; 98 skbdesc->desc_len = TXD_DESC_SIZE; 99 } 100 EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc); 101 102 /* 103 * RX control handlers 104 */ 105 void rt2800mmio_fill_rxdone(struct queue_entry *entry, 106 struct rxdone_entry_desc *rxdesc) 107 { 108 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 109 __le32 *rxd = entry_priv->desc; 110 u32 word; 111 112 word = rt2x00_desc_read(rxd, 3); 113 114 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR)) 115 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 116 117 /* 118 * Unfortunately we don't know the cipher type used during 119 * decryption. This prevents us from correct providing 120 * correct statistics through debugfs. 121 */ 122 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR); 123 124 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) { 125 /* 126 * Hardware has stripped IV/EIV data from 802.11 frame during 127 * decryption. Unfortunately the descriptor doesn't contain 128 * any fields with the EIV/IV data either, so they can't 129 * be restored by rt2x00lib. 130 */ 131 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 132 133 /* 134 * The hardware has already checked the Michael Mic and has 135 * stripped it from the frame. Signal this to mac80211. 136 */ 137 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 138 139 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { 140 rxdesc->flags |= RX_FLAG_DECRYPTED; 141 } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { 142 /* 143 * In order to check the Michael Mic, the packet must have 144 * been decrypted. Mac80211 doesnt check the MMIC failure 145 * flag to initiate MMIC countermeasures if the decoded flag 146 * has not been set. 147 */ 148 rxdesc->flags |= RX_FLAG_DECRYPTED; 149 150 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 151 } 152 } 153 154 if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) 155 rxdesc->dev_flags |= RXDONE_MY_BSS; 156 157 if (rt2x00_get_field32(word, RXD_W3_L2PAD)) 158 rxdesc->dev_flags |= RXDONE_L2PAD; 159 160 /* 161 * Process the RXWI structure that is at the start of the buffer. 162 */ 163 rt2800_process_rxwi(entry, rxdesc); 164 } 165 EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone); 166 167 /* 168 * Interrupt functions. 169 */ 170 static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev) 171 { 172 struct ieee80211_conf conf = { .flags = 0 }; 173 struct rt2x00lib_conf libconf = { .conf = &conf }; 174 175 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 176 } 177 178 static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, 179 struct rt2x00_field32 irq_field) 180 { 181 u32 reg; 182 183 /* 184 * Enable a single interrupt. The interrupt mask register 185 * access needs locking. 186 */ 187 spin_lock_irq(&rt2x00dev->irqmask_lock); 188 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 189 rt2x00_set_field32(®, irq_field, 1); 190 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 191 spin_unlock_irq(&rt2x00dev->irqmask_lock); 192 } 193 194 void rt2800mmio_pretbtt_tasklet(unsigned long data) 195 { 196 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 197 rt2x00lib_pretbtt(rt2x00dev); 198 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 199 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); 200 } 201 EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet); 202 203 void rt2800mmio_tbtt_tasklet(unsigned long data) 204 { 205 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 206 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 207 u32 reg; 208 209 rt2x00lib_beacondone(rt2x00dev); 210 211 if (rt2x00dev->intf_ap_count) { 212 /* 213 * The rt2800pci hardware tbtt timer is off by 1us per tbtt 214 * causing beacon skew and as a result causing problems with 215 * some powersaving clients over time. Shorten the beacon 216 * interval every 64 beacons by 64us to mitigate this effect. 217 */ 218 if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { 219 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 220 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 221 (rt2x00dev->beacon_int * 16) - 1); 222 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 223 } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { 224 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 225 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_INTERVAL, 226 (rt2x00dev->beacon_int * 16)); 227 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 228 } 229 drv_data->tbtt_tick++; 230 drv_data->tbtt_tick %= BCN_TBTT_OFFSET; 231 } 232 233 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 234 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); 235 } 236 EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet); 237 238 void rt2800mmio_rxdone_tasklet(unsigned long data) 239 { 240 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 241 if (rt2x00mmio_rxdone(rt2x00dev)) 242 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 243 else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 244 rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); 245 } 246 EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet); 247 248 void rt2800mmio_autowake_tasklet(unsigned long data) 249 { 250 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 251 rt2800mmio_wakeup(rt2x00dev); 252 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 253 rt2800mmio_enable_interrupt(rt2x00dev, 254 INT_MASK_CSR_AUTO_WAKEUP); 255 } 256 EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet); 257 258 static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev) 259 { 260 bool timeout = false; 261 262 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || 263 (timeout = rt2800_txstatus_timeout(rt2x00dev))) { 264 265 rt2800_txdone(rt2x00dev); 266 267 if (timeout) 268 rt2800_txdone_nostatus(rt2x00dev); 269 } 270 } 271 272 static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev) 273 { 274 u32 status; 275 bool more = false; 276 277 /* FIXEME: rewrite this comment 278 * The TX_FIFO_STATUS interrupt needs special care. We should 279 * read TX_STA_FIFO but we should do it immediately as otherwise 280 * the register can overflow and we would lose status reports. 281 * 282 * Hence, read the TX_STA_FIFO register and copy all tx status 283 * reports into a kernel FIFO which is handled in the txstatus 284 * tasklet. We use a tasklet to process the tx status reports 285 * because we can schedule the tasklet multiple times (when the 286 * interrupt fires again during tx status processing). 287 * 288 * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS 289 * disabled so have only one producer and one consumer - we don't 290 * need to lock the kfifo. 291 */ 292 while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) { 293 status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO); 294 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 295 break; 296 297 kfifo_put(&rt2x00dev->txstatus_fifo, status); 298 more = true; 299 } 300 301 return more; 302 } 303 304 void rt2800mmio_txstatus_tasklet(unsigned long data) 305 { 306 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 307 308 do { 309 rt2800mmio_txdone(rt2x00dev); 310 311 } while (rt2800mmio_fetch_txstatus(rt2x00dev)); 312 313 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 314 rt2800mmio_enable_interrupt(rt2x00dev, 315 INT_SOURCE_CSR_TX_FIFO_STATUS); 316 } 317 EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); 318 319 irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) 320 { 321 struct rt2x00_dev *rt2x00dev = dev_instance; 322 u32 reg, mask; 323 324 /* Read status and ACK all interrupts */ 325 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 326 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 327 328 if (!reg) 329 return IRQ_NONE; 330 331 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 332 return IRQ_HANDLED; 333 334 /* 335 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits 336 * for interrupts and interrupt masks we can just use the value of 337 * INT_SOURCE_CSR to create the interrupt mask. 338 */ 339 mask = ~reg; 340 341 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { 342 rt2800mmio_fetch_txstatus(rt2x00dev); 343 tasklet_schedule(&rt2x00dev->txstatus_tasklet); 344 } 345 346 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) 347 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); 348 349 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) 350 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); 351 352 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) 353 tasklet_schedule(&rt2x00dev->rxdone_tasklet); 354 355 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 356 tasklet_schedule(&rt2x00dev->autowake_tasklet); 357 358 /* 359 * Disable all interrupts for which a tasklet was scheduled right now, 360 * the tasklet will reenable the appropriate interrupts. 361 */ 362 spin_lock(&rt2x00dev->irqmask_lock); 363 reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); 364 reg &= mask; 365 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 366 spin_unlock(&rt2x00dev->irqmask_lock); 367 368 return IRQ_HANDLED; 369 } 370 EXPORT_SYMBOL_GPL(rt2800mmio_interrupt); 371 372 void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev, 373 enum dev_state state) 374 { 375 u32 reg; 376 unsigned long flags; 377 378 /* 379 * When interrupts are being enabled, the interrupt registers 380 * should clear the register to assure a clean state. 381 */ 382 if (state == STATE_RADIO_IRQ_ON) { 383 reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); 384 rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 385 } 386 387 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 388 reg = 0; 389 if (state == STATE_RADIO_IRQ_ON) { 390 rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, 1); 391 rt2x00_set_field32(®, INT_MASK_CSR_TBTT, 1); 392 rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, 1); 393 rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); 394 rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, 1); 395 } 396 rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); 397 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 398 399 if (state == STATE_RADIO_IRQ_OFF) { 400 /* 401 * Wait for possibly running tasklets to finish. 402 */ 403 tasklet_kill(&rt2x00dev->txstatus_tasklet); 404 tasklet_kill(&rt2x00dev->rxdone_tasklet); 405 tasklet_kill(&rt2x00dev->autowake_tasklet); 406 tasklet_kill(&rt2x00dev->tbtt_tasklet); 407 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 408 } 409 } 410 EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq); 411 412 /* 413 * Queue handlers. 414 */ 415 void rt2800mmio_start_queue(struct data_queue *queue) 416 { 417 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 418 u32 reg; 419 420 switch (queue->qid) { 421 case QID_RX: 422 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 423 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 1); 424 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 425 break; 426 case QID_BEACON: 427 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 428 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); 429 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); 430 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); 431 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 432 433 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 434 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 1); 435 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 436 break; 437 default: 438 break; 439 } 440 } 441 EXPORT_SYMBOL_GPL(rt2800mmio_start_queue); 442 443 void rt2800mmio_kick_queue(struct data_queue *queue) 444 { 445 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 446 struct queue_entry *entry; 447 448 switch (queue->qid) { 449 case QID_AC_VO: 450 case QID_AC_VI: 451 case QID_AC_BE: 452 case QID_AC_BK: 453 WARN_ON_ONCE(rt2x00queue_empty(queue)); 454 entry = rt2x00queue_get_entry(queue, Q_INDEX); 455 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), 456 entry->entry_idx); 457 break; 458 case QID_MGMT: 459 entry = rt2x00queue_get_entry(queue, Q_INDEX); 460 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5), 461 entry->entry_idx); 462 break; 463 default: 464 break; 465 } 466 } 467 EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue); 468 469 void rt2800mmio_flush_queue(struct data_queue *queue, bool drop) 470 { 471 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 472 bool tx_queue = false; 473 unsigned int i; 474 475 switch (queue->qid) { 476 case QID_AC_VO: 477 case QID_AC_VI: 478 case QID_AC_BE: 479 case QID_AC_BK: 480 tx_queue = true; 481 break; 482 case QID_RX: 483 break; 484 default: 485 return; 486 } 487 488 for (i = 0; i < 5; i++) { 489 /* 490 * Check if the driver is already done, otherwise we 491 * have to sleep a little while to give the driver/hw 492 * the oppurtunity to complete interrupt process itself. 493 */ 494 if (rt2x00queue_empty(queue)) 495 break; 496 497 /* 498 * For TX queues schedule completion tasklet to catch 499 * tx status timeouts, othewise just wait. 500 */ 501 if (tx_queue) { 502 tasklet_disable(&rt2x00dev->txstatus_tasklet); 503 rt2800mmio_txdone(rt2x00dev); 504 tasklet_enable(&rt2x00dev->txstatus_tasklet); 505 } 506 507 /* 508 * Wait for a little while to give the driver 509 * the oppurtunity to recover itself. 510 */ 511 msleep(50); 512 } 513 } 514 EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue); 515 516 void rt2800mmio_stop_queue(struct data_queue *queue) 517 { 518 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 519 u32 reg; 520 521 switch (queue->qid) { 522 case QID_RX: 523 reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); 524 rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); 525 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 526 break; 527 case QID_BEACON: 528 reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); 529 rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 0); 530 rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); 531 rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); 532 rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); 533 534 reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); 535 rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 0); 536 rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); 537 538 /* 539 * Wait for current invocation to finish. The tasklet 540 * won't be scheduled anymore afterwards since we disabled 541 * the TBTT and PRE TBTT timer. 542 */ 543 tasklet_kill(&rt2x00dev->tbtt_tasklet); 544 tasklet_kill(&rt2x00dev->pretbtt_tasklet); 545 546 break; 547 default: 548 break; 549 } 550 } 551 EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue); 552 553 void rt2800mmio_queue_init(struct data_queue *queue) 554 { 555 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 556 unsigned short txwi_size, rxwi_size; 557 558 rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); 559 560 switch (queue->qid) { 561 case QID_RX: 562 queue->limit = 128; 563 queue->data_size = AGGREGATION_SIZE; 564 queue->desc_size = RXD_DESC_SIZE; 565 queue->winfo_size = rxwi_size; 566 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 567 break; 568 569 case QID_AC_VO: 570 case QID_AC_VI: 571 case QID_AC_BE: 572 case QID_AC_BK: 573 queue->limit = 64; 574 queue->data_size = AGGREGATION_SIZE; 575 queue->desc_size = TXD_DESC_SIZE; 576 queue->winfo_size = txwi_size; 577 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 578 break; 579 580 case QID_BEACON: 581 queue->limit = 8; 582 queue->data_size = 0; /* No DMA required for beacons */ 583 queue->desc_size = TXD_DESC_SIZE; 584 queue->winfo_size = txwi_size; 585 queue->priv_size = sizeof(struct queue_entry_priv_mmio); 586 break; 587 588 case QID_ATIM: 589 /* fallthrough */ 590 default: 591 BUG(); 592 break; 593 } 594 } 595 EXPORT_SYMBOL_GPL(rt2800mmio_queue_init); 596 597 /* 598 * Initialization functions. 599 */ 600 bool rt2800mmio_get_entry_state(struct queue_entry *entry) 601 { 602 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 603 u32 word; 604 605 if (entry->queue->qid == QID_RX) { 606 word = rt2x00_desc_read(entry_priv->desc, 1); 607 608 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); 609 } else { 610 word = rt2x00_desc_read(entry_priv->desc, 1); 611 612 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); 613 } 614 } 615 EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state); 616 617 void rt2800mmio_clear_entry(struct queue_entry *entry) 618 { 619 struct queue_entry_priv_mmio *entry_priv = entry->priv_data; 620 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 621 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 622 u32 word; 623 624 if (entry->queue->qid == QID_RX) { 625 word = rt2x00_desc_read(entry_priv->desc, 0); 626 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); 627 rt2x00_desc_write(entry_priv->desc, 0, word); 628 629 word = rt2x00_desc_read(entry_priv->desc, 1); 630 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); 631 rt2x00_desc_write(entry_priv->desc, 1, word); 632 633 /* 634 * Set RX IDX in register to inform hardware that we have 635 * handled this entry and it is available for reuse again. 636 */ 637 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 638 entry->entry_idx); 639 } else { 640 word = rt2x00_desc_read(entry_priv->desc, 1); 641 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); 642 rt2x00_desc_write(entry_priv->desc, 1, word); 643 } 644 } 645 EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry); 646 647 int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev) 648 { 649 struct queue_entry_priv_mmio *entry_priv; 650 651 /* 652 * Initialize registers. 653 */ 654 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 655 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0, 656 entry_priv->desc_dma); 657 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0, 658 rt2x00dev->tx[0].limit); 659 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0); 660 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0); 661 662 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 663 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1, 664 entry_priv->desc_dma); 665 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1, 666 rt2x00dev->tx[1].limit); 667 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0); 668 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0); 669 670 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 671 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2, 672 entry_priv->desc_dma); 673 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2, 674 rt2x00dev->tx[2].limit); 675 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0); 676 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0); 677 678 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 679 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3, 680 entry_priv->desc_dma); 681 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3, 682 rt2x00dev->tx[3].limit); 683 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0); 684 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0); 685 686 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0); 687 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0); 688 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0); 689 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0); 690 691 rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0); 692 rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0); 693 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0); 694 rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0); 695 696 entry_priv = rt2x00dev->rx->entries[0].priv_data; 697 rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR, 698 entry_priv->desc_dma); 699 rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT, 700 rt2x00dev->rx[0].limit); 701 rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, 702 rt2x00dev->rx[0].limit - 1); 703 rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0); 704 705 rt2800_disable_wpdma(rt2x00dev); 706 707 rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0); 708 709 return 0; 710 } 711 EXPORT_SYMBOL_GPL(rt2800mmio_init_queues); 712 713 int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) 714 { 715 u32 reg; 716 717 /* 718 * Reset DMA indexes 719 */ 720 reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX); 721 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); 722 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); 723 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); 724 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); 725 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); 726 rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); 727 rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); 728 rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 729 730 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 731 rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 732 733 if (rt2x00_is_pcie(rt2x00dev) && 734 (rt2x00_rt(rt2x00dev, RT3090) || 735 rt2x00_rt(rt2x00dev, RT3390) || 736 rt2x00_rt(rt2x00dev, RT3572) || 737 rt2x00_rt(rt2x00dev, RT3593) || 738 rt2x00_rt(rt2x00dev, RT5390) || 739 rt2x00_rt(rt2x00dev, RT5392) || 740 rt2x00_rt(rt2x00dev, RT5592))) { 741 reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL); 742 rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); 743 rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); 744 rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg); 745 } 746 747 rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 748 749 reg = 0; 750 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_CSR, 1); 751 rt2x00_set_field32(®, MAC_SYS_CTRL_RESET_BBP, 1); 752 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 753 754 rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 755 756 return 0; 757 } 758 EXPORT_SYMBOL_GPL(rt2800mmio_init_registers); 759 760 /* 761 * Device state switch handlers. 762 */ 763 int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev) 764 { 765 /* Wait for DMA, ignore error until we initialize queues. */ 766 rt2800_wait_wpdma_ready(rt2x00dev); 767 768 if (unlikely(rt2800mmio_init_queues(rt2x00dev))) 769 return -EIO; 770 771 return rt2800_enable_radio(rt2x00dev); 772 } 773 EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio); 774 775 MODULE_AUTHOR(DRV_PROJECT); 776 MODULE_VERSION(DRV_VERSION); 777 MODULE_DESCRIPTION("rt2800 MMIO library"); 778 MODULE_LICENSE("GPL"); 779