Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: ISC
25 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); in mt76s_read_whisr()
30 struct mt76_sdio *sdio = &dev->sdio; in mt76s_read_pcr()
32 return sdio_readl(sdio->func, MCR_WHLPCR, NULL); in mt76s_read_pcr()
38 struct sdio_func *func = dev->sdio.func; in mt76s_read_mailbox()
46 dev_err(dev->dev, "failed setting address [err=%d]\n", err); in mt76s_read_mailbox()
52 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); in mt76s_read_mailbox()
59 dev_err(dev->dev, "query whisr timeout\n"); in mt76s_read_mailbox()
65 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); in mt76s_read_mailbox()
71 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); in mt76s_read_mailbox()
76 dev_err(dev->dev, "register mismatch\n"); in mt76s_read_mailbox()
83 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); in mt76s_read_mailbox()
93 struct sdio_func *func = dev->sdio.func; in mt76s_write_mailbox()
101 dev_err(dev->dev, "failed setting address [err=%d]\n", err); in mt76s_write_mailbox()
107 dev_err(dev->dev, in mt76s_write_mailbox()
114 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); in mt76s_write_mailbox()
121 dev_err(dev->dev, "query whisr timeout\n"); in mt76s_write_mailbox()
127 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); in mt76s_write_mailbox()
133 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); in mt76s_write_mailbox()
138 dev_err(dev->dev, "register mismatch\n"); in mt76s_write_mailbox()
146 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) in mt76s_rr()
147 return dev->mcu_ops->mcu_rr(dev, offset); in mt76s_rr()
155 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) in mt76s_wr()
156 dev->mcu_ops->mcu_wr(dev, offset, val); in mt76s_wr()
172 const void *data, int len) in mt76s_write_copy() argument
177 for (i = 0; i < len / sizeof(u32); i++) { in mt76s_write_copy()
185 void *data, int len) in mt76s_read_copy() argument
190 for (i = 0; i < len / sizeof(u32); i++) { in mt76s_read_copy()
199 int len) in mt76s_wr_rp() argument
203 for (i = 0; i < len; i++) { in mt76s_wr_rp()
204 mt76s_wr(dev, data->reg, data->value); in mt76s_wr_rp()
213 struct mt76_reg_pair *data, int len) in mt76s_rd_rp() argument
217 for (i = 0; i < len; i++) { in mt76s_rd_rp()
218 data->value = mt76s_rr(dev, data->reg); in mt76s_rd_rp()
228 u32 status, ctrl; in mt76s_hw_init() local
231 dev->sdio.hw_ver = hw_ver; in mt76s_hw_init()
248 dev_err(dev->dev, "Cannot get ownership from device"); in mt76s_hw_init()
261 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; in mt76s_hw_init()
263 ctrl |= WHIER_RX1_DONE_INT_EN; in mt76s_hw_init()
264 sdio_writel(func, ctrl, MCR_WHIER, &ret); in mt76s_hw_init()
271 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); in mt76s_hw_init()
274 ctrl = sdio_readl(func, MCR_WHCR, &ret); in mt76s_hw_init()
277 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2; in mt76s_hw_init()
278 ctrl &= ~W_INT_CLR_CTRL; /* read clear */ in mt76s_hw_init()
279 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0); in mt76s_hw_init()
283 sdio_writel(func, ctrl, MCR_WHCR, &ret); in mt76s_hw_init()
306 struct mt76_queue *q = &dev->q_rx[qid]; in mt76s_alloc_rx_queue()
308 spin_lock_init(&q->lock); in mt76s_alloc_rx_queue()
309 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_rx_queue()
310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_rx_queue()
312 if (!q->entry) in mt76s_alloc_rx_queue()
313 return -ENOMEM; in mt76s_alloc_rx_queue()
315 q->ndesc = MT76S_NUM_RX_ENTRIES; in mt76s_alloc_rx_queue()
316 q->head = q->tail = 0; in mt76s_alloc_rx_queue()
317 q->queued = 0; in mt76s_alloc_rx_queue()
327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); in mt76s_alloc_tx_queue()
329 return ERR_PTR(-ENOMEM); in mt76s_alloc_tx_queue()
331 spin_lock_init(&q->lock); in mt76s_alloc_tx_queue()
332 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_tx_queue()
333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_tx_queue()
335 if (!q->entry) in mt76s_alloc_tx_queue()
336 return ERR_PTR(-ENOMEM); in mt76s_alloc_tx_queue()
338 q->ndesc = MT76S_NUM_TX_ENTRIES; in mt76s_alloc_tx_queue()
353 dev->phy.q_tx[i] = q; in mt76s_alloc_tx()
360 dev->q_mcu[MT_MCUQ_WM] = q; in mt76s_alloc_tx()
371 spin_lock_bh(&q->lock); in mt76s_get_next_rx_entry()
372 if (q->queued > 0) { in mt76s_get_next_rx_entry()
373 e = &q->entry[q->tail]; in mt76s_get_next_rx_entry()
374 q->tail = (q->tail + 1) % q->ndesc; in mt76s_get_next_rx_entry()
375 q->queued--; in mt76s_get_next_rx_entry()
377 spin_unlock_bh(&q->lock); in mt76s_get_next_rx_entry()
385 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76s_process_rx_queue()
391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) in mt76s_process_rx_queue()
395 if (!e || !e->skb) in mt76s_process_rx_queue()
398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL); in mt76s_process_rx_queue()
399 e->skb = NULL; in mt76s_process_rx_queue()
422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); in mt76s_net_worker()
438 mcu = q == dev->q_mcu[MT_MCUQ_WM]; in mt76s_process_tx_queue()
439 while (q->queued > 0) { in mt76s_process_tx_queue()
440 if (!q->entry[q->tail].done) in mt76s_process_tx_queue()
443 entry = q->entry[q->tail]; in mt76s_process_tx_queue()
444 q->entry[q->tail].done = false; in mt76s_process_tx_queue()
455 if (!q->queued) in mt76s_process_tx_queue()
456 wake_up(&dev->tx_wait); in mt76s_process_tx_queue()
472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); in mt76s_status_worker()
476 dev->phy.q_tx[i]); in mt76s_status_worker()
481 if (dev->drv->tx_status_data && ndata_frames > 0 && in mt76s_status_worker()
482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && in mt76s_status_worker()
483 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) in mt76s_status_worker()
484 mt76_worker_schedule(&sdio->stat_worker); in mt76s_status_worker()
488 mt76_worker_schedule(&dev->tx_worker); in mt76s_status_worker()
502 if (test_bit(MT76_RESET, &dev->phy.state) || in mt76s_tx_status_data()
503 test_bit(MT76_REMOVED, &dev->phy.state)) in mt76s_tx_status_data()
506 if (!dev->drv->tx_status_data(dev, &update)) in mt76s_tx_status_data()
511 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) in mt76s_tx_status_data()
512 mt76_worker_schedule(&sdio->status_worker); in mt76s_tx_status_data()
514 clear_bit(MT76_READING_STATS, &dev->phy.state); in mt76s_tx_status_data()
525 struct mt76_dev *dev = phy->dev; in mt76s_tx_queue_skb()
526 int err, len = skb->len; in mt76s_tx_queue_skb() local
527 u16 idx = q->head; in mt76s_tx_queue_skb()
529 if (q->queued == q->ndesc) in mt76s_tx_queue_skb()
530 return -ENOSPC; in mt76s_tx_queue_skb()
532 skb->prev = skb->next = NULL; in mt76s_tx_queue_skb()
533 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); in mt76s_tx_queue_skb()
537 q->entry[q->head].skb = tx_info.skb; in mt76s_tx_queue_skb()
538 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb()
539 q->entry[q->head].wcid = 0xffff; in mt76s_tx_queue_skb()
543 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb()
544 q->queued++; in mt76s_tx_queue_skb()
553 int ret, len = skb->len, pad; in mt76s_tx_queue_skb_raw() local
555 pad = round_up(skb->len, 4) - skb->len; in mt76s_tx_queue_skb_raw()
560 spin_lock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
562 if (q->queued == q->ndesc) { in mt76s_tx_queue_skb_raw()
563 ret = -ENOSPC; in mt76s_tx_queue_skb_raw()
564 spin_unlock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
568 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb_raw()
569 q->entry[q->head].skb = skb; in mt76s_tx_queue_skb_raw()
574 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb_raw()
575 q->queued++; in mt76s_tx_queue_skb_raw()
577 spin_unlock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
589 struct mt76_sdio *sdio = &dev->sdio; in mt76s_tx_kick()
591 mt76_worker_schedule(&sdio->txrx_worker); in mt76s_tx_kick()
602 struct mt76_sdio *sdio = &dev->sdio; in mt76s_deinit()
605 mt76_worker_teardown(&sdio->txrx_worker); in mt76s_deinit()
606 mt76_worker_teardown(&sdio->status_worker); in mt76s_deinit()
607 mt76_worker_teardown(&sdio->net_worker); in mt76s_deinit()
608 mt76_worker_teardown(&sdio->stat_worker); in mt76s_deinit()
610 clear_bit(MT76_READING_STATS, &dev->phy.state); in mt76s_deinit()
614 sdio_claim_host(sdio->func); in mt76s_deinit()
615 sdio_release_irq(sdio->func); in mt76s_deinit()
616 sdio_release_host(sdio->func); in mt76s_deinit()
619 struct mt76_queue *q = &dev->q_rx[i]; in mt76s_deinit()
622 for (j = 0; j < q->ndesc; j++) { in mt76s_deinit()
623 struct mt76_queue_entry *e = &q->entry[j]; in mt76s_deinit()
625 if (!e->skb) in mt76s_deinit()
628 dev_kfree_skb(e->skb); in mt76s_deinit()
629 e->skb = NULL; in mt76s_deinit()
638 struct mt76_sdio *sdio = &dev->sdio; in mt76s_init()
642 err = mt76_worker_setup(dev->hw, &sdio->status_worker, in mt76s_init()
643 mt76s_status_worker, "sdio-status"); in mt76s_init()
647 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker, in mt76s_init()
648 "sdio-net"); in mt76s_init()
652 err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data, in mt76s_init()
653 "sdio-sta"); in mt76s_init()
657 sched_set_fifo_low(sdio->status_worker.task); in mt76s_init()
658 sched_set_fifo_low(sdio->net_worker.task); in mt76s_init()
659 sched_set_fifo_low(sdio->stat_worker.task); in mt76s_init()
661 dev->queue_ops = &sdio_queue_ops; in mt76s_init()
662 dev->bus = bus_ops; in mt76s_init()
663 dev->sdio.func = func; in mt76s_init()
665 host_max_cap = min_t(u32, func->card->host->max_req_size, in mt76s_init()
666 func->cur_blksize * in mt76s_init()
667 func->card->host->max_blk_count); in mt76s_init()
668 dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ); in mt76s_init()
669 dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz, in mt76s_init()
671 if (!dev->sdio.xmit_buf) in mt76s_init()
672 err = -ENOMEM; in mt76s_init()