16c92544dSBjoern A. Zeeb // SPDX-License-Identifier: ISC
26c92544dSBjoern A. Zeeb /*
36c92544dSBjoern A. Zeeb * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
46c92544dSBjoern A. Zeeb */
56c92544dSBjoern A. Zeeb
66c92544dSBjoern A. Zeeb #include <linux/module.h>
76c92544dSBjoern A. Zeeb #include "mt76.h"
86c92544dSBjoern A. Zeeb #include "usb_trace.h"
96c92544dSBjoern A. Zeeb #include "dma.h"
106c92544dSBjoern A. Zeeb
116c92544dSBjoern A. Zeeb #define MT_VEND_REQ_MAX_RETRY 10
126c92544dSBjoern A. Zeeb #define MT_VEND_REQ_TOUT_MS 300
136c92544dSBjoern A. Zeeb
146c92544dSBjoern A. Zeeb static bool disable_usb_sg;
156c92544dSBjoern A. Zeeb module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
166c92544dSBjoern A. Zeeb MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
176c92544dSBjoern A. Zeeb
__mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)186c92544dSBjoern A. Zeeb int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
196c92544dSBjoern A. Zeeb u16 val, u16 offset, void *buf, size_t len)
206c92544dSBjoern A. Zeeb {
216c92544dSBjoern A. Zeeb struct usb_interface *uintf = to_usb_interface(dev->dev);
226c92544dSBjoern A. Zeeb struct usb_device *udev = interface_to_usbdev(uintf);
236c92544dSBjoern A. Zeeb unsigned int pipe;
246c92544dSBjoern A. Zeeb int i, ret;
256c92544dSBjoern A. Zeeb
266c92544dSBjoern A. Zeeb lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
276c92544dSBjoern A. Zeeb
286c92544dSBjoern A. Zeeb pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
296c92544dSBjoern A. Zeeb : usb_sndctrlpipe(udev, 0);
306c92544dSBjoern A. Zeeb for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
316c92544dSBjoern A. Zeeb if (test_bit(MT76_REMOVED, &dev->phy.state))
326c92544dSBjoern A. Zeeb return -EIO;
336c92544dSBjoern A. Zeeb
346c92544dSBjoern A. Zeeb ret = usb_control_msg(udev, pipe, req, req_type, val,
356c92544dSBjoern A. Zeeb offset, buf, len, MT_VEND_REQ_TOUT_MS);
36*8ba4d145SBjoern A. Zeeb if (ret == -ENODEV || ret == -EPROTO)
376c92544dSBjoern A. Zeeb set_bit(MT76_REMOVED, &dev->phy.state);
38*8ba4d145SBjoern A. Zeeb if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
396c92544dSBjoern A. Zeeb return ret;
406c92544dSBjoern A. Zeeb usleep_range(5000, 10000);
416c92544dSBjoern A. Zeeb }
426c92544dSBjoern A. Zeeb
436c92544dSBjoern A. Zeeb dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
446c92544dSBjoern A. Zeeb req, offset, ret);
456c92544dSBjoern A. Zeeb return ret;
466c92544dSBjoern A. Zeeb }
476c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
486c92544dSBjoern A. Zeeb
mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)496c92544dSBjoern A. Zeeb int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
506c92544dSBjoern A. Zeeb u8 req_type, u16 val, u16 offset,
516c92544dSBjoern A. Zeeb void *buf, size_t len)
526c92544dSBjoern A. Zeeb {
536c92544dSBjoern A. Zeeb int ret;
546c92544dSBjoern A. Zeeb
556c92544dSBjoern A. Zeeb mutex_lock(&dev->usb.usb_ctrl_mtx);
566c92544dSBjoern A. Zeeb ret = __mt76u_vendor_request(dev, req, req_type,
576c92544dSBjoern A. Zeeb val, offset, buf, len);
586c92544dSBjoern A. Zeeb trace_usb_reg_wr(dev, offset, val);
596c92544dSBjoern A. Zeeb mutex_unlock(&dev->usb.usb_ctrl_mtx);
606c92544dSBjoern A. Zeeb
616c92544dSBjoern A. Zeeb return ret;
626c92544dSBjoern A. Zeeb }
636c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_vendor_request);
646c92544dSBjoern A. Zeeb
___mt76u_rr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr)656c92544dSBjoern A. Zeeb u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
666c92544dSBjoern A. Zeeb {
676c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
686c92544dSBjoern A. Zeeb u32 data = ~0;
696c92544dSBjoern A. Zeeb int ret;
706c92544dSBjoern A. Zeeb
716c92544dSBjoern A. Zeeb ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
726c92544dSBjoern A. Zeeb addr, usb->data, sizeof(__le32));
736c92544dSBjoern A. Zeeb if (ret == sizeof(__le32))
746c92544dSBjoern A. Zeeb data = get_unaligned_le32(usb->data);
756c92544dSBjoern A. Zeeb trace_usb_reg_rr(dev, addr, data);
766c92544dSBjoern A. Zeeb
776c92544dSBjoern A. Zeeb return data;
786c92544dSBjoern A. Zeeb }
796c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(___mt76u_rr);
806c92544dSBjoern A. Zeeb
__mt76u_rr(struct mt76_dev * dev,u32 addr)816c92544dSBjoern A. Zeeb static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
826c92544dSBjoern A. Zeeb {
836c92544dSBjoern A. Zeeb u8 req;
846c92544dSBjoern A. Zeeb
856c92544dSBjoern A. Zeeb switch (addr & MT_VEND_TYPE_MASK) {
866c92544dSBjoern A. Zeeb case MT_VEND_TYPE_EEPROM:
876c92544dSBjoern A. Zeeb req = MT_VEND_READ_EEPROM;
886c92544dSBjoern A. Zeeb break;
896c92544dSBjoern A. Zeeb case MT_VEND_TYPE_CFG:
906c92544dSBjoern A. Zeeb req = MT_VEND_READ_CFG;
916c92544dSBjoern A. Zeeb break;
926c92544dSBjoern A. Zeeb default:
936c92544dSBjoern A. Zeeb req = MT_VEND_MULTI_READ;
946c92544dSBjoern A. Zeeb break;
956c92544dSBjoern A. Zeeb }
966c92544dSBjoern A. Zeeb
976c92544dSBjoern A. Zeeb return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
986c92544dSBjoern A. Zeeb addr & ~MT_VEND_TYPE_MASK);
996c92544dSBjoern A. Zeeb }
1006c92544dSBjoern A. Zeeb
mt76u_rr(struct mt76_dev * dev,u32 addr)1016c92544dSBjoern A. Zeeb static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
1026c92544dSBjoern A. Zeeb {
1036c92544dSBjoern A. Zeeb u32 ret;
1046c92544dSBjoern A. Zeeb
1056c92544dSBjoern A. Zeeb mutex_lock(&dev->usb.usb_ctrl_mtx);
1066c92544dSBjoern A. Zeeb ret = __mt76u_rr(dev, addr);
1076c92544dSBjoern A. Zeeb mutex_unlock(&dev->usb.usb_ctrl_mtx);
1086c92544dSBjoern A. Zeeb
1096c92544dSBjoern A. Zeeb return ret;
1106c92544dSBjoern A. Zeeb }
1116c92544dSBjoern A. Zeeb
___mt76u_wr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr,u32 val)1126c92544dSBjoern A. Zeeb void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
1136c92544dSBjoern A. Zeeb u32 addr, u32 val)
1146c92544dSBjoern A. Zeeb {
1156c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
1166c92544dSBjoern A. Zeeb
1176c92544dSBjoern A. Zeeb put_unaligned_le32(val, usb->data);
1186c92544dSBjoern A. Zeeb __mt76u_vendor_request(dev, req, req_type, addr >> 16,
1196c92544dSBjoern A. Zeeb addr, usb->data, sizeof(__le32));
1206c92544dSBjoern A. Zeeb trace_usb_reg_wr(dev, addr, val);
1216c92544dSBjoern A. Zeeb }
1226c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(___mt76u_wr);
1236c92544dSBjoern A. Zeeb
__mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)1246c92544dSBjoern A. Zeeb static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
1256c92544dSBjoern A. Zeeb {
1266c92544dSBjoern A. Zeeb u8 req;
1276c92544dSBjoern A. Zeeb
1286c92544dSBjoern A. Zeeb switch (addr & MT_VEND_TYPE_MASK) {
1296c92544dSBjoern A. Zeeb case MT_VEND_TYPE_CFG:
1306c92544dSBjoern A. Zeeb req = MT_VEND_WRITE_CFG;
1316c92544dSBjoern A. Zeeb break;
1326c92544dSBjoern A. Zeeb default:
1336c92544dSBjoern A. Zeeb req = MT_VEND_MULTI_WRITE;
1346c92544dSBjoern A. Zeeb break;
1356c92544dSBjoern A. Zeeb }
1366c92544dSBjoern A. Zeeb ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
1376c92544dSBjoern A. Zeeb addr & ~MT_VEND_TYPE_MASK, val);
1386c92544dSBjoern A. Zeeb }
1396c92544dSBjoern A. Zeeb
mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)1406c92544dSBjoern A. Zeeb static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
1416c92544dSBjoern A. Zeeb {
1426c92544dSBjoern A. Zeeb mutex_lock(&dev->usb.usb_ctrl_mtx);
1436c92544dSBjoern A. Zeeb __mt76u_wr(dev, addr, val);
1446c92544dSBjoern A. Zeeb mutex_unlock(&dev->usb.usb_ctrl_mtx);
1456c92544dSBjoern A. Zeeb }
1466c92544dSBjoern A. Zeeb
mt76u_rmw(struct mt76_dev * dev,u32 addr,u32 mask,u32 val)1476c92544dSBjoern A. Zeeb static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
1486c92544dSBjoern A. Zeeb u32 mask, u32 val)
1496c92544dSBjoern A. Zeeb {
1506c92544dSBjoern A. Zeeb mutex_lock(&dev->usb.usb_ctrl_mtx);
1516c92544dSBjoern A. Zeeb val |= __mt76u_rr(dev, addr) & ~mask;
1526c92544dSBjoern A. Zeeb __mt76u_wr(dev, addr, val);
1536c92544dSBjoern A. Zeeb mutex_unlock(&dev->usb.usb_ctrl_mtx);
1546c92544dSBjoern A. Zeeb
1556c92544dSBjoern A. Zeeb return val;
1566c92544dSBjoern A. Zeeb }
1576c92544dSBjoern A. Zeeb
mt76u_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)1586c92544dSBjoern A. Zeeb static void mt76u_copy(struct mt76_dev *dev, u32 offset,
1596c92544dSBjoern A. Zeeb const void *data, int len)
1606c92544dSBjoern A. Zeeb {
1616c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
1626c92544dSBjoern A. Zeeb const u8 *val = data;
1636c92544dSBjoern A. Zeeb int ret;
1646c92544dSBjoern A. Zeeb int current_batch_size;
1656c92544dSBjoern A. Zeeb int i = 0;
1666c92544dSBjoern A. Zeeb
1676c92544dSBjoern A. Zeeb /* Assure that always a multiple of 4 bytes are copied,
1686c92544dSBjoern A. Zeeb * otherwise beacons can be corrupted.
1696c92544dSBjoern A. Zeeb * See: "mt76: round up length on mt76_wr_copy"
1706c92544dSBjoern A. Zeeb * Commit 850e8f6fbd5d0003b0
1716c92544dSBjoern A. Zeeb */
1726c92544dSBjoern A. Zeeb len = round_up(len, 4);
1736c92544dSBjoern A. Zeeb
1746c92544dSBjoern A. Zeeb mutex_lock(&usb->usb_ctrl_mtx);
1756c92544dSBjoern A. Zeeb while (i < len) {
1766c92544dSBjoern A. Zeeb current_batch_size = min_t(int, usb->data_len, len - i);
1776c92544dSBjoern A. Zeeb memcpy(usb->data, val + i, current_batch_size);
1786c92544dSBjoern A. Zeeb ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
1796c92544dSBjoern A. Zeeb USB_DIR_OUT | USB_TYPE_VENDOR,
1806c92544dSBjoern A. Zeeb 0, offset + i, usb->data,
1816c92544dSBjoern A. Zeeb current_batch_size);
1826c92544dSBjoern A. Zeeb if (ret < 0)
1836c92544dSBjoern A. Zeeb break;
1846c92544dSBjoern A. Zeeb
1856c92544dSBjoern A. Zeeb i += current_batch_size;
1866c92544dSBjoern A. Zeeb }
1876c92544dSBjoern A. Zeeb mutex_unlock(&usb->usb_ctrl_mtx);
1886c92544dSBjoern A. Zeeb }
1896c92544dSBjoern A. Zeeb
mt76u_read_copy(struct mt76_dev * dev,u32 offset,void * data,int len)1906c92544dSBjoern A. Zeeb void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
1916c92544dSBjoern A. Zeeb void *data, int len)
1926c92544dSBjoern A. Zeeb {
1936c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
1946c92544dSBjoern A. Zeeb int i = 0, batch_len, ret;
1956c92544dSBjoern A. Zeeb u8 *val = data;
1966c92544dSBjoern A. Zeeb
1976c92544dSBjoern A. Zeeb len = round_up(len, 4);
1986c92544dSBjoern A. Zeeb mutex_lock(&usb->usb_ctrl_mtx);
1996c92544dSBjoern A. Zeeb while (i < len) {
2006c92544dSBjoern A. Zeeb batch_len = min_t(int, usb->data_len, len - i);
2016c92544dSBjoern A. Zeeb ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
2026c92544dSBjoern A. Zeeb USB_DIR_IN | USB_TYPE_VENDOR,
2036c92544dSBjoern A. Zeeb (offset + i) >> 16, offset + i,
2046c92544dSBjoern A. Zeeb usb->data, batch_len);
2056c92544dSBjoern A. Zeeb if (ret < 0)
2066c92544dSBjoern A. Zeeb break;
2076c92544dSBjoern A. Zeeb
2086c92544dSBjoern A. Zeeb memcpy(val + i, usb->data, batch_len);
2096c92544dSBjoern A. Zeeb i += batch_len;
2106c92544dSBjoern A. Zeeb }
2116c92544dSBjoern A. Zeeb mutex_unlock(&usb->usb_ctrl_mtx);
2126c92544dSBjoern A. Zeeb }
2136c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_read_copy);
2146c92544dSBjoern A. Zeeb
mt76u_single_wr(struct mt76_dev * dev,const u8 req,const u16 offset,const u32 val)2156c92544dSBjoern A. Zeeb void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
2166c92544dSBjoern A. Zeeb const u16 offset, const u32 val)
2176c92544dSBjoern A. Zeeb {
2186c92544dSBjoern A. Zeeb mutex_lock(&dev->usb.usb_ctrl_mtx);
2196c92544dSBjoern A. Zeeb __mt76u_vendor_request(dev, req,
2206c92544dSBjoern A. Zeeb USB_DIR_OUT | USB_TYPE_VENDOR,
2216c92544dSBjoern A. Zeeb val & 0xffff, offset, NULL, 0);
2226c92544dSBjoern A. Zeeb __mt76u_vendor_request(dev, req,
2236c92544dSBjoern A. Zeeb USB_DIR_OUT | USB_TYPE_VENDOR,
2246c92544dSBjoern A. Zeeb val >> 16, offset + 2, NULL, 0);
2256c92544dSBjoern A. Zeeb mutex_unlock(&dev->usb.usb_ctrl_mtx);
2266c92544dSBjoern A. Zeeb }
2276c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_single_wr);
2286c92544dSBjoern A. Zeeb
2296c92544dSBjoern A. Zeeb static int
mt76u_req_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int len)2306c92544dSBjoern A. Zeeb mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
2316c92544dSBjoern A. Zeeb const struct mt76_reg_pair *data, int len)
2326c92544dSBjoern A. Zeeb {
2336c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
2346c92544dSBjoern A. Zeeb
2356c92544dSBjoern A. Zeeb mutex_lock(&usb->usb_ctrl_mtx);
2366c92544dSBjoern A. Zeeb while (len > 0) {
2376c92544dSBjoern A. Zeeb __mt76u_wr(dev, base + data->reg, data->value);
2386c92544dSBjoern A. Zeeb len--;
2396c92544dSBjoern A. Zeeb data++;
2406c92544dSBjoern A. Zeeb }
2416c92544dSBjoern A. Zeeb mutex_unlock(&usb->usb_ctrl_mtx);
2426c92544dSBjoern A. Zeeb
2436c92544dSBjoern A. Zeeb return 0;
2446c92544dSBjoern A. Zeeb }
2456c92544dSBjoern A. Zeeb
2466c92544dSBjoern A. Zeeb static int
mt76u_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)2476c92544dSBjoern A. Zeeb mt76u_wr_rp(struct mt76_dev *dev, u32 base,
2486c92544dSBjoern A. Zeeb const struct mt76_reg_pair *data, int n)
2496c92544dSBjoern A. Zeeb {
2506c92544dSBjoern A. Zeeb if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
2516c92544dSBjoern A. Zeeb return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
2526c92544dSBjoern A. Zeeb else
2536c92544dSBjoern A. Zeeb return mt76u_req_wr_rp(dev, base, data, n);
2546c92544dSBjoern A. Zeeb }
2556c92544dSBjoern A. Zeeb
2566c92544dSBjoern A. Zeeb static int
mt76u_req_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int len)2576c92544dSBjoern A. Zeeb mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
2586c92544dSBjoern A. Zeeb int len)
2596c92544dSBjoern A. Zeeb {
2606c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
2616c92544dSBjoern A. Zeeb
2626c92544dSBjoern A. Zeeb mutex_lock(&usb->usb_ctrl_mtx);
2636c92544dSBjoern A. Zeeb while (len > 0) {
2646c92544dSBjoern A. Zeeb data->value = __mt76u_rr(dev, base + data->reg);
2656c92544dSBjoern A. Zeeb len--;
2666c92544dSBjoern A. Zeeb data++;
2676c92544dSBjoern A. Zeeb }
2686c92544dSBjoern A. Zeeb mutex_unlock(&usb->usb_ctrl_mtx);
2696c92544dSBjoern A. Zeeb
2706c92544dSBjoern A. Zeeb return 0;
2716c92544dSBjoern A. Zeeb }
2726c92544dSBjoern A. Zeeb
2736c92544dSBjoern A. Zeeb static int
mt76u_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int n)2746c92544dSBjoern A. Zeeb mt76u_rd_rp(struct mt76_dev *dev, u32 base,
2756c92544dSBjoern A. Zeeb struct mt76_reg_pair *data, int n)
2766c92544dSBjoern A. Zeeb {
2776c92544dSBjoern A. Zeeb if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
2786c92544dSBjoern A. Zeeb return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
2796c92544dSBjoern A. Zeeb else
2806c92544dSBjoern A. Zeeb return mt76u_req_rd_rp(dev, base, data, n);
2816c92544dSBjoern A. Zeeb }
2826c92544dSBjoern A. Zeeb
mt76u_check_sg(struct mt76_dev * dev)2836c92544dSBjoern A. Zeeb static bool mt76u_check_sg(struct mt76_dev *dev)
2846c92544dSBjoern A. Zeeb {
2856c92544dSBjoern A. Zeeb struct usb_interface *uintf = to_usb_interface(dev->dev);
2866c92544dSBjoern A. Zeeb struct usb_device *udev = interface_to_usbdev(uintf);
2876c92544dSBjoern A. Zeeb
2886c92544dSBjoern A. Zeeb return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
289*8ba4d145SBjoern A. Zeeb udev->bus->no_sg_constraint);
2906c92544dSBjoern A. Zeeb }
2916c92544dSBjoern A. Zeeb
2926c92544dSBjoern A. Zeeb static int
mt76u_set_endpoints(struct usb_interface * intf,struct mt76_usb * usb)2936c92544dSBjoern A. Zeeb mt76u_set_endpoints(struct usb_interface *intf,
2946c92544dSBjoern A. Zeeb struct mt76_usb *usb)
2956c92544dSBjoern A. Zeeb {
2966c92544dSBjoern A. Zeeb struct usb_host_interface *intf_desc = intf->cur_altsetting;
2976c92544dSBjoern A. Zeeb struct usb_endpoint_descriptor *ep_desc;
2986c92544dSBjoern A. Zeeb int i, in_ep = 0, out_ep = 0;
2996c92544dSBjoern A. Zeeb
3006c92544dSBjoern A. Zeeb for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
3016c92544dSBjoern A. Zeeb ep_desc = &intf_desc->endpoint[i].desc;
3026c92544dSBjoern A. Zeeb
3036c92544dSBjoern A. Zeeb if (usb_endpoint_is_bulk_in(ep_desc) &&
3046c92544dSBjoern A. Zeeb in_ep < __MT_EP_IN_MAX) {
3056c92544dSBjoern A. Zeeb usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
3066c92544dSBjoern A. Zeeb in_ep++;
3076c92544dSBjoern A. Zeeb } else if (usb_endpoint_is_bulk_out(ep_desc) &&
3086c92544dSBjoern A. Zeeb out_ep < __MT_EP_OUT_MAX) {
3096c92544dSBjoern A. Zeeb usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
3106c92544dSBjoern A. Zeeb out_ep++;
3116c92544dSBjoern A. Zeeb }
3126c92544dSBjoern A. Zeeb }
3136c92544dSBjoern A. Zeeb
3146c92544dSBjoern A. Zeeb if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
3156c92544dSBjoern A. Zeeb return -EINVAL;
3166c92544dSBjoern A. Zeeb return 0;
3176c92544dSBjoern A. Zeeb }
3186c92544dSBjoern A. Zeeb
3196c92544dSBjoern A. Zeeb static int
mt76u_fill_rx_sg(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)3206c92544dSBjoern A. Zeeb mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
321cbb3ec25SBjoern A. Zeeb int nsgs)
3226c92544dSBjoern A. Zeeb {
3236c92544dSBjoern A. Zeeb int i;
3246c92544dSBjoern A. Zeeb
3256c92544dSBjoern A. Zeeb for (i = 0; i < nsgs; i++) {
3266c92544dSBjoern A. Zeeb void *data;
3276c92544dSBjoern A. Zeeb int offset;
3286c92544dSBjoern A. Zeeb
329cbb3ec25SBjoern A. Zeeb data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
3306c92544dSBjoern A. Zeeb if (!data)
3316c92544dSBjoern A. Zeeb break;
3326c92544dSBjoern A. Zeeb
333cbb3ec25SBjoern A. Zeeb sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
334cbb3ec25SBjoern A. Zeeb offset);
3356c92544dSBjoern A. Zeeb }
3366c92544dSBjoern A. Zeeb
3376c92544dSBjoern A. Zeeb if (i < nsgs) {
3386c92544dSBjoern A. Zeeb int j;
3396c92544dSBjoern A. Zeeb
3406c92544dSBjoern A. Zeeb for (j = nsgs; j < urb->num_sgs; j++)
341cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
3426c92544dSBjoern A. Zeeb urb->num_sgs = i;
3436c92544dSBjoern A. Zeeb }
3446c92544dSBjoern A. Zeeb
3456c92544dSBjoern A. Zeeb urb->num_sgs = max_t(int, i, urb->num_sgs);
3466c92544dSBjoern A. Zeeb urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
3476c92544dSBjoern A. Zeeb sg_init_marker(urb->sg, urb->num_sgs);
3486c92544dSBjoern A. Zeeb
3496c92544dSBjoern A. Zeeb return i ? : -ENOMEM;
3506c92544dSBjoern A. Zeeb }
3516c92544dSBjoern A. Zeeb
3526c92544dSBjoern A. Zeeb static int
mt76u_refill_rx(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)3536c92544dSBjoern A. Zeeb mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
354cbb3ec25SBjoern A. Zeeb struct urb *urb, int nsgs)
3556c92544dSBjoern A. Zeeb {
3566c92544dSBjoern A. Zeeb enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
357cbb3ec25SBjoern A. Zeeb int offset;
3586c92544dSBjoern A. Zeeb
3596c92544dSBjoern A. Zeeb if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
360cbb3ec25SBjoern A. Zeeb return mt76u_fill_rx_sg(dev, q, urb, nsgs);
3616c92544dSBjoern A. Zeeb
3626c92544dSBjoern A. Zeeb urb->transfer_buffer_length = q->buf_size;
363cbb3ec25SBjoern A. Zeeb urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
3646c92544dSBjoern A. Zeeb
3656c92544dSBjoern A. Zeeb return urb->transfer_buffer ? 0 : -ENOMEM;
3666c92544dSBjoern A. Zeeb }
3676c92544dSBjoern A. Zeeb
3686c92544dSBjoern A. Zeeb static int
mt76u_urb_alloc(struct mt76_dev * dev,struct mt76_queue_entry * e,int sg_max_size)3696c92544dSBjoern A. Zeeb mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
3706c92544dSBjoern A. Zeeb int sg_max_size)
3716c92544dSBjoern A. Zeeb {
3726c92544dSBjoern A. Zeeb unsigned int size = sizeof(struct urb);
3736c92544dSBjoern A. Zeeb
3746c92544dSBjoern A. Zeeb if (dev->usb.sg_en)
3756c92544dSBjoern A. Zeeb size += sg_max_size * sizeof(struct scatterlist);
3766c92544dSBjoern A. Zeeb
3776c92544dSBjoern A. Zeeb e->urb = kzalloc(size, GFP_KERNEL);
3786c92544dSBjoern A. Zeeb if (!e->urb)
3796c92544dSBjoern A. Zeeb return -ENOMEM;
3806c92544dSBjoern A. Zeeb
3816c92544dSBjoern A. Zeeb usb_init_urb(e->urb);
3826c92544dSBjoern A. Zeeb
3836c92544dSBjoern A. Zeeb if (dev->usb.sg_en && sg_max_size > 0)
3846c92544dSBjoern A. Zeeb e->urb->sg = (struct scatterlist *)(e->urb + 1);
3856c92544dSBjoern A. Zeeb
3866c92544dSBjoern A. Zeeb return 0;
3876c92544dSBjoern A. Zeeb }
3886c92544dSBjoern A. Zeeb
3896c92544dSBjoern A. Zeeb static int
mt76u_rx_urb_alloc(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_entry * e)3906c92544dSBjoern A. Zeeb mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
3916c92544dSBjoern A. Zeeb struct mt76_queue_entry *e)
3926c92544dSBjoern A. Zeeb {
3936c92544dSBjoern A. Zeeb enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
3946c92544dSBjoern A. Zeeb int err, sg_size;
3956c92544dSBjoern A. Zeeb
3966c92544dSBjoern A. Zeeb sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
3976c92544dSBjoern A. Zeeb err = mt76u_urb_alloc(dev, e, sg_size);
3986c92544dSBjoern A. Zeeb if (err)
3996c92544dSBjoern A. Zeeb return err;
4006c92544dSBjoern A. Zeeb
401cbb3ec25SBjoern A. Zeeb return mt76u_refill_rx(dev, q, e->urb, sg_size);
4026c92544dSBjoern A. Zeeb }
4036c92544dSBjoern A. Zeeb
mt76u_urb_free(struct urb * urb)4046c92544dSBjoern A. Zeeb static void mt76u_urb_free(struct urb *urb)
4056c92544dSBjoern A. Zeeb {
4066c92544dSBjoern A. Zeeb int i;
4076c92544dSBjoern A. Zeeb
4086c92544dSBjoern A. Zeeb for (i = 0; i < urb->num_sgs; i++)
409cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
4106c92544dSBjoern A. Zeeb
4116c92544dSBjoern A. Zeeb if (urb->transfer_buffer)
412cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(urb->transfer_buffer, false);
4136c92544dSBjoern A. Zeeb
4146c92544dSBjoern A. Zeeb usb_free_urb(urb);
4156c92544dSBjoern A. Zeeb }
4166c92544dSBjoern A. Zeeb
4176c92544dSBjoern A. Zeeb static void
mt76u_fill_bulk_urb(struct mt76_dev * dev,int dir,int index,struct urb * urb,usb_complete_t complete_fn,void * context)4186c92544dSBjoern A. Zeeb mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
4196c92544dSBjoern A. Zeeb struct urb *urb, usb_complete_t complete_fn,
4206c92544dSBjoern A. Zeeb void *context)
4216c92544dSBjoern A. Zeeb {
4226c92544dSBjoern A. Zeeb struct usb_interface *uintf = to_usb_interface(dev->dev);
4236c92544dSBjoern A. Zeeb struct usb_device *udev = interface_to_usbdev(uintf);
4246c92544dSBjoern A. Zeeb unsigned int pipe;
4256c92544dSBjoern A. Zeeb
4266c92544dSBjoern A. Zeeb if (dir == USB_DIR_IN)
4276c92544dSBjoern A. Zeeb pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
4286c92544dSBjoern A. Zeeb else
4296c92544dSBjoern A. Zeeb pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
4306c92544dSBjoern A. Zeeb
4316c92544dSBjoern A. Zeeb urb->dev = udev;
4326c92544dSBjoern A. Zeeb urb->pipe = pipe;
4336c92544dSBjoern A. Zeeb urb->complete = complete_fn;
4346c92544dSBjoern A. Zeeb urb->context = context;
4356c92544dSBjoern A. Zeeb }
4366c92544dSBjoern A. Zeeb
4376c92544dSBjoern A. Zeeb static struct urb *
mt76u_get_next_rx_entry(struct mt76_queue * q)4386c92544dSBjoern A. Zeeb mt76u_get_next_rx_entry(struct mt76_queue *q)
4396c92544dSBjoern A. Zeeb {
4406c92544dSBjoern A. Zeeb struct urb *urb = NULL;
4416c92544dSBjoern A. Zeeb unsigned long flags;
4426c92544dSBjoern A. Zeeb
4436c92544dSBjoern A. Zeeb spin_lock_irqsave(&q->lock, flags);
4446c92544dSBjoern A. Zeeb if (q->queued > 0) {
4456c92544dSBjoern A. Zeeb urb = q->entry[q->tail].urb;
4466c92544dSBjoern A. Zeeb q->tail = (q->tail + 1) % q->ndesc;
4476c92544dSBjoern A. Zeeb q->queued--;
4486c92544dSBjoern A. Zeeb }
4496c92544dSBjoern A. Zeeb spin_unlock_irqrestore(&q->lock, flags);
4506c92544dSBjoern A. Zeeb
4516c92544dSBjoern A. Zeeb return urb;
4526c92544dSBjoern A. Zeeb }
4536c92544dSBjoern A. Zeeb
4546c92544dSBjoern A. Zeeb static int
mt76u_get_rx_entry_len(struct mt76_dev * dev,u8 * data,u32 data_len)4556c92544dSBjoern A. Zeeb mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
4566c92544dSBjoern A. Zeeb u32 data_len)
4576c92544dSBjoern A. Zeeb {
4586c92544dSBjoern A. Zeeb u16 dma_len, min_len;
4596c92544dSBjoern A. Zeeb
4606c92544dSBjoern A. Zeeb dma_len = get_unaligned_le16(data);
4616c92544dSBjoern A. Zeeb if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
4626c92544dSBjoern A. Zeeb return dma_len;
4636c92544dSBjoern A. Zeeb
4646c92544dSBjoern A. Zeeb min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
4656c92544dSBjoern A. Zeeb if (data_len < min_len || !dma_len ||
4666c92544dSBjoern A. Zeeb dma_len + MT_DMA_HDR_LEN > data_len ||
4676c92544dSBjoern A. Zeeb (dma_len & 0x3))
4686c92544dSBjoern A. Zeeb return -EINVAL;
4696c92544dSBjoern A. Zeeb return dma_len;
4706c92544dSBjoern A. Zeeb }
4716c92544dSBjoern A. Zeeb
4726c92544dSBjoern A. Zeeb static struct sk_buff *
mt76u_build_rx_skb(struct mt76_dev * dev,void * data,int len,int buf_size)4736c92544dSBjoern A. Zeeb mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
4746c92544dSBjoern A. Zeeb int len, int buf_size)
4756c92544dSBjoern A. Zeeb {
4766c92544dSBjoern A. Zeeb int head_room, drv_flags = dev->drv->drv_flags;
4776c92544dSBjoern A. Zeeb struct sk_buff *skb;
4786c92544dSBjoern A. Zeeb
4796c92544dSBjoern A. Zeeb head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
4806c92544dSBjoern A. Zeeb if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
4816c92544dSBjoern A. Zeeb struct page *page;
4826c92544dSBjoern A. Zeeb
4836c92544dSBjoern A. Zeeb /* slow path, not enough space for data and
4846c92544dSBjoern A. Zeeb * skb_shared_info
4856c92544dSBjoern A. Zeeb */
4866c92544dSBjoern A. Zeeb skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
4876c92544dSBjoern A. Zeeb if (!skb)
4886c92544dSBjoern A. Zeeb return NULL;
4896c92544dSBjoern A. Zeeb
4906c92544dSBjoern A. Zeeb skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
4916c92544dSBjoern A. Zeeb data += head_room + MT_SKB_HEAD_LEN;
4926c92544dSBjoern A. Zeeb page = virt_to_head_page(data);
4936c92544dSBjoern A. Zeeb skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4946c92544dSBjoern A. Zeeb page, data - page_address(page),
4956c92544dSBjoern A. Zeeb len - MT_SKB_HEAD_LEN, buf_size);
4966c92544dSBjoern A. Zeeb
4976c92544dSBjoern A. Zeeb return skb;
4986c92544dSBjoern A. Zeeb }
4996c92544dSBjoern A. Zeeb
5006c92544dSBjoern A. Zeeb /* fast path */
5016c92544dSBjoern A. Zeeb skb = build_skb(data, buf_size);
5026c92544dSBjoern A. Zeeb if (!skb)
5036c92544dSBjoern A. Zeeb return NULL;
5046c92544dSBjoern A. Zeeb
5056c92544dSBjoern A. Zeeb skb_reserve(skb, head_room);
5066c92544dSBjoern A. Zeeb __skb_put(skb, len);
5076c92544dSBjoern A. Zeeb
5086c92544dSBjoern A. Zeeb return skb;
5096c92544dSBjoern A. Zeeb }
5106c92544dSBjoern A. Zeeb
5116c92544dSBjoern A. Zeeb static int
mt76u_process_rx_entry(struct mt76_dev * dev,struct urb * urb,int buf_size)5126c92544dSBjoern A. Zeeb mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
5136c92544dSBjoern A. Zeeb int buf_size)
5146c92544dSBjoern A. Zeeb {
5156c92544dSBjoern A. Zeeb u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
5166c92544dSBjoern A. Zeeb int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
5176c92544dSBjoern A. Zeeb int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
5186c92544dSBjoern A. Zeeb struct sk_buff *skb;
5196c92544dSBjoern A. Zeeb
5206c92544dSBjoern A. Zeeb if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
5216c92544dSBjoern A. Zeeb return 0;
5226c92544dSBjoern A. Zeeb
5236c92544dSBjoern A. Zeeb len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
5246c92544dSBjoern A. Zeeb if (len < 0)
5256c92544dSBjoern A. Zeeb return 0;
5266c92544dSBjoern A. Zeeb
5276c92544dSBjoern A. Zeeb head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
5286c92544dSBjoern A. Zeeb data_len = min_t(int, len, data_len - head_room);
5296c92544dSBjoern A. Zeeb
5306c92544dSBjoern A. Zeeb if (len == data_len &&
5316c92544dSBjoern A. Zeeb dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
5326c92544dSBjoern A. Zeeb return 0;
5336c92544dSBjoern A. Zeeb
5346c92544dSBjoern A. Zeeb skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
5356c92544dSBjoern A. Zeeb if (!skb)
5366c92544dSBjoern A. Zeeb return 0;
5376c92544dSBjoern A. Zeeb
5386c92544dSBjoern A. Zeeb len -= data_len;
5396c92544dSBjoern A. Zeeb while (len > 0 && nsgs < urb->num_sgs) {
5406c92544dSBjoern A. Zeeb data_len = min_t(int, len, urb->sg[nsgs].length);
5416c92544dSBjoern A. Zeeb skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5426c92544dSBjoern A. Zeeb sg_page(&urb->sg[nsgs]),
5436c92544dSBjoern A. Zeeb urb->sg[nsgs].offset, data_len,
5446c92544dSBjoern A. Zeeb buf_size);
5456c92544dSBjoern A. Zeeb len -= data_len;
5466c92544dSBjoern A. Zeeb nsgs++;
5476c92544dSBjoern A. Zeeb }
548cbb3ec25SBjoern A. Zeeb
549cbb3ec25SBjoern A. Zeeb skb_mark_for_recycle(skb);
550cbb3ec25SBjoern A. Zeeb dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
5516c92544dSBjoern A. Zeeb
5526c92544dSBjoern A. Zeeb return nsgs;
5536c92544dSBjoern A. Zeeb }
5546c92544dSBjoern A. Zeeb
mt76u_complete_rx(struct urb * urb)5556c92544dSBjoern A. Zeeb static void mt76u_complete_rx(struct urb *urb)
5566c92544dSBjoern A. Zeeb {
5576c92544dSBjoern A. Zeeb struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
5586c92544dSBjoern A. Zeeb struct mt76_queue *q = urb->context;
5596c92544dSBjoern A. Zeeb unsigned long flags;
5606c92544dSBjoern A. Zeeb
5616c92544dSBjoern A. Zeeb trace_rx_urb(dev, urb);
5626c92544dSBjoern A. Zeeb
5636c92544dSBjoern A. Zeeb switch (urb->status) {
5646c92544dSBjoern A. Zeeb case -ECONNRESET:
5656c92544dSBjoern A. Zeeb case -ESHUTDOWN:
5666c92544dSBjoern A. Zeeb case -ENOENT:
5676c92544dSBjoern A. Zeeb case -EPROTO:
5686c92544dSBjoern A. Zeeb return;
5696c92544dSBjoern A. Zeeb default:
5706c92544dSBjoern A. Zeeb dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
5716c92544dSBjoern A. Zeeb urb->status);
5726c92544dSBjoern A. Zeeb fallthrough;
5736c92544dSBjoern A. Zeeb case 0:
5746c92544dSBjoern A. Zeeb break;
5756c92544dSBjoern A. Zeeb }
5766c92544dSBjoern A. Zeeb
5776c92544dSBjoern A. Zeeb spin_lock_irqsave(&q->lock, flags);
5786c92544dSBjoern A. Zeeb if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
5796c92544dSBjoern A. Zeeb goto out;
5806c92544dSBjoern A. Zeeb
5816c92544dSBjoern A. Zeeb q->head = (q->head + 1) % q->ndesc;
5826c92544dSBjoern A. Zeeb q->queued++;
5836c92544dSBjoern A. Zeeb mt76_worker_schedule(&dev->usb.rx_worker);
5846c92544dSBjoern A. Zeeb out:
5856c92544dSBjoern A. Zeeb spin_unlock_irqrestore(&q->lock, flags);
5866c92544dSBjoern A. Zeeb }
5876c92544dSBjoern A. Zeeb
5886c92544dSBjoern A. Zeeb static int
mt76u_submit_rx_buf(struct mt76_dev * dev,enum mt76_rxq_id qid,struct urb * urb)5896c92544dSBjoern A. Zeeb mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
5906c92544dSBjoern A. Zeeb struct urb *urb)
5916c92544dSBjoern A. Zeeb {
5926c92544dSBjoern A. Zeeb int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
5936c92544dSBjoern A. Zeeb
5946c92544dSBjoern A. Zeeb mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
5956c92544dSBjoern A. Zeeb mt76u_complete_rx, &dev->q_rx[qid]);
5966c92544dSBjoern A. Zeeb trace_submit_urb(dev, urb);
5976c92544dSBjoern A. Zeeb
5986c92544dSBjoern A. Zeeb return usb_submit_urb(urb, GFP_ATOMIC);
5996c92544dSBjoern A. Zeeb }
6006c92544dSBjoern A. Zeeb
6016c92544dSBjoern A. Zeeb static void
mt76u_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)6026c92544dSBjoern A. Zeeb mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
6036c92544dSBjoern A. Zeeb {
6046c92544dSBjoern A. Zeeb int qid = q - &dev->q_rx[MT_RXQ_MAIN];
6056c92544dSBjoern A. Zeeb struct urb *urb;
6066c92544dSBjoern A. Zeeb int err, count;
6076c92544dSBjoern A. Zeeb
6086c92544dSBjoern A. Zeeb while (true) {
6096c92544dSBjoern A. Zeeb urb = mt76u_get_next_rx_entry(q);
6106c92544dSBjoern A. Zeeb if (!urb)
6116c92544dSBjoern A. Zeeb break;
6126c92544dSBjoern A. Zeeb
6136c92544dSBjoern A. Zeeb count = mt76u_process_rx_entry(dev, urb, q->buf_size);
6146c92544dSBjoern A. Zeeb if (count > 0) {
615cbb3ec25SBjoern A. Zeeb err = mt76u_refill_rx(dev, q, urb, count);
6166c92544dSBjoern A. Zeeb if (err < 0)
6176c92544dSBjoern A. Zeeb break;
6186c92544dSBjoern A. Zeeb }
6196c92544dSBjoern A. Zeeb mt76u_submit_rx_buf(dev, qid, urb);
6206c92544dSBjoern A. Zeeb }
6216c92544dSBjoern A. Zeeb if (qid == MT_RXQ_MAIN) {
6226c92544dSBjoern A. Zeeb local_bh_disable();
6236c92544dSBjoern A. Zeeb mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
6246c92544dSBjoern A. Zeeb local_bh_enable();
6256c92544dSBjoern A. Zeeb }
6266c92544dSBjoern A. Zeeb }
6276c92544dSBjoern A. Zeeb
mt76u_rx_worker(struct mt76_worker * w)6286c92544dSBjoern A. Zeeb static void mt76u_rx_worker(struct mt76_worker *w)
6296c92544dSBjoern A. Zeeb {
6306c92544dSBjoern A. Zeeb struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
6316c92544dSBjoern A. Zeeb struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
6326c92544dSBjoern A. Zeeb int i;
6336c92544dSBjoern A. Zeeb
6346c92544dSBjoern A. Zeeb rcu_read_lock();
6356c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i)
6366c92544dSBjoern A. Zeeb mt76u_process_rx_queue(dev, &dev->q_rx[i]);
6376c92544dSBjoern A. Zeeb rcu_read_unlock();
6386c92544dSBjoern A. Zeeb }
6396c92544dSBjoern A. Zeeb
6406c92544dSBjoern A. Zeeb static int
mt76u_submit_rx_buffers(struct mt76_dev * dev,enum mt76_rxq_id qid)6416c92544dSBjoern A. Zeeb mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
6426c92544dSBjoern A. Zeeb {
6436c92544dSBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[qid];
6446c92544dSBjoern A. Zeeb unsigned long flags;
6456c92544dSBjoern A. Zeeb int i, err = 0;
6466c92544dSBjoern A. Zeeb
6476c92544dSBjoern A. Zeeb spin_lock_irqsave(&q->lock, flags);
6486c92544dSBjoern A. Zeeb for (i = 0; i < q->ndesc; i++) {
6496c92544dSBjoern A. Zeeb err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
6506c92544dSBjoern A. Zeeb if (err < 0)
6516c92544dSBjoern A. Zeeb break;
6526c92544dSBjoern A. Zeeb }
6536c92544dSBjoern A. Zeeb q->head = q->tail = 0;
6546c92544dSBjoern A. Zeeb q->queued = 0;
6556c92544dSBjoern A. Zeeb spin_unlock_irqrestore(&q->lock, flags);
6566c92544dSBjoern A. Zeeb
6576c92544dSBjoern A. Zeeb return err;
6586c92544dSBjoern A. Zeeb }
6596c92544dSBjoern A. Zeeb
6606c92544dSBjoern A. Zeeb static int
mt76u_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)6616c92544dSBjoern A. Zeeb mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
6626c92544dSBjoern A. Zeeb {
6636c92544dSBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[qid];
6646c92544dSBjoern A. Zeeb int i, err;
6656c92544dSBjoern A. Zeeb
666cbb3ec25SBjoern A. Zeeb err = mt76_create_page_pool(dev, q);
667cbb3ec25SBjoern A. Zeeb if (err)
668cbb3ec25SBjoern A. Zeeb return err;
669cbb3ec25SBjoern A. Zeeb
6706c92544dSBjoern A. Zeeb spin_lock_init(&q->lock);
6716c92544dSBjoern A. Zeeb q->entry = devm_kcalloc(dev->dev,
6726c92544dSBjoern A. Zeeb MT_NUM_RX_ENTRIES, sizeof(*q->entry),
6736c92544dSBjoern A. Zeeb GFP_KERNEL);
6746c92544dSBjoern A. Zeeb if (!q->entry)
6756c92544dSBjoern A. Zeeb return -ENOMEM;
6766c92544dSBjoern A. Zeeb
6776c92544dSBjoern A. Zeeb q->ndesc = MT_NUM_RX_ENTRIES;
6786c92544dSBjoern A. Zeeb q->buf_size = PAGE_SIZE;
6796c92544dSBjoern A. Zeeb
6806c92544dSBjoern A. Zeeb for (i = 0; i < q->ndesc; i++) {
6816c92544dSBjoern A. Zeeb err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
6826c92544dSBjoern A. Zeeb if (err < 0)
6836c92544dSBjoern A. Zeeb return err;
6846c92544dSBjoern A. Zeeb }
6856c92544dSBjoern A. Zeeb
6866c92544dSBjoern A. Zeeb return mt76u_submit_rx_buffers(dev, qid);
6876c92544dSBjoern A. Zeeb }
6886c92544dSBjoern A. Zeeb
mt76u_alloc_mcu_queue(struct mt76_dev * dev)6896c92544dSBjoern A. Zeeb int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
6906c92544dSBjoern A. Zeeb {
6916c92544dSBjoern A. Zeeb return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
6926c92544dSBjoern A. Zeeb }
6936c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
6946c92544dSBjoern A. Zeeb
6956c92544dSBjoern A. Zeeb static void
mt76u_free_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)6966c92544dSBjoern A. Zeeb mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
6976c92544dSBjoern A. Zeeb {
6986c92544dSBjoern A. Zeeb int i;
6996c92544dSBjoern A. Zeeb
7006c92544dSBjoern A. Zeeb for (i = 0; i < q->ndesc; i++) {
7016c92544dSBjoern A. Zeeb if (!q->entry[i].urb)
7026c92544dSBjoern A. Zeeb continue;
7036c92544dSBjoern A. Zeeb
7046c92544dSBjoern A. Zeeb mt76u_urb_free(q->entry[i].urb);
7056c92544dSBjoern A. Zeeb q->entry[i].urb = NULL;
7066c92544dSBjoern A. Zeeb }
707cbb3ec25SBjoern A. Zeeb page_pool_destroy(q->page_pool);
708cbb3ec25SBjoern A. Zeeb q->page_pool = NULL;
7096c92544dSBjoern A. Zeeb }
7106c92544dSBjoern A. Zeeb
mt76u_free_rx(struct mt76_dev * dev)7116c92544dSBjoern A. Zeeb static void mt76u_free_rx(struct mt76_dev *dev)
7126c92544dSBjoern A. Zeeb {
7136c92544dSBjoern A. Zeeb int i;
7146c92544dSBjoern A. Zeeb
7156c92544dSBjoern A. Zeeb mt76_worker_teardown(&dev->usb.rx_worker);
7166c92544dSBjoern A. Zeeb
7176c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i)
7186c92544dSBjoern A. Zeeb mt76u_free_rx_queue(dev, &dev->q_rx[i]);
7196c92544dSBjoern A. Zeeb }
7206c92544dSBjoern A. Zeeb
mt76u_stop_rx(struct mt76_dev * dev)7216c92544dSBjoern A. Zeeb void mt76u_stop_rx(struct mt76_dev *dev)
7226c92544dSBjoern A. Zeeb {
7236c92544dSBjoern A. Zeeb int i;
7246c92544dSBjoern A. Zeeb
7256c92544dSBjoern A. Zeeb mt76_worker_disable(&dev->usb.rx_worker);
7266c92544dSBjoern A. Zeeb
7276c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i) {
7286c92544dSBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[i];
7296c92544dSBjoern A. Zeeb int j;
7306c92544dSBjoern A. Zeeb
7316c92544dSBjoern A. Zeeb for (j = 0; j < q->ndesc; j++)
7326c92544dSBjoern A. Zeeb usb_poison_urb(q->entry[j].urb);
7336c92544dSBjoern A. Zeeb }
7346c92544dSBjoern A. Zeeb }
7356c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_stop_rx);
7366c92544dSBjoern A. Zeeb
mt76u_resume_rx(struct mt76_dev * dev)7376c92544dSBjoern A. Zeeb int mt76u_resume_rx(struct mt76_dev *dev)
7386c92544dSBjoern A. Zeeb {
7396c92544dSBjoern A. Zeeb int i;
7406c92544dSBjoern A. Zeeb
7416c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i) {
7426c92544dSBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[i];
7436c92544dSBjoern A. Zeeb int err, j;
7446c92544dSBjoern A. Zeeb
7456c92544dSBjoern A. Zeeb for (j = 0; j < q->ndesc; j++)
7466c92544dSBjoern A. Zeeb usb_unpoison_urb(q->entry[j].urb);
7476c92544dSBjoern A. Zeeb
7486c92544dSBjoern A. Zeeb err = mt76u_submit_rx_buffers(dev, i);
7496c92544dSBjoern A. Zeeb if (err < 0)
7506c92544dSBjoern A. Zeeb return err;
7516c92544dSBjoern A. Zeeb }
7526c92544dSBjoern A. Zeeb
7536c92544dSBjoern A. Zeeb mt76_worker_enable(&dev->usb.rx_worker);
7546c92544dSBjoern A. Zeeb
7556c92544dSBjoern A. Zeeb return 0;
7566c92544dSBjoern A. Zeeb }
7576c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_resume_rx);
7586c92544dSBjoern A. Zeeb
mt76u_status_worker(struct mt76_worker * w)7596c92544dSBjoern A. Zeeb static void mt76u_status_worker(struct mt76_worker *w)
7606c92544dSBjoern A. Zeeb {
7616c92544dSBjoern A. Zeeb struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
7626c92544dSBjoern A. Zeeb struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
7636c92544dSBjoern A. Zeeb struct mt76_queue_entry entry;
7646c92544dSBjoern A. Zeeb struct mt76_queue *q;
7656c92544dSBjoern A. Zeeb int i;
7666c92544dSBjoern A. Zeeb
767cbb3ec25SBjoern A. Zeeb if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
768cbb3ec25SBjoern A. Zeeb return;
769cbb3ec25SBjoern A. Zeeb
770*8ba4d145SBjoern A. Zeeb for (i = 0; i <= MT_TXQ_PSD; i++) {
7716c92544dSBjoern A. Zeeb q = dev->phy.q_tx[i];
7726c92544dSBjoern A. Zeeb if (!q)
7736c92544dSBjoern A. Zeeb continue;
7746c92544dSBjoern A. Zeeb
7756c92544dSBjoern A. Zeeb while (q->queued > 0) {
7766c92544dSBjoern A. Zeeb if (!q->entry[q->tail].done)
7776c92544dSBjoern A. Zeeb break;
7786c92544dSBjoern A. Zeeb
7796c92544dSBjoern A. Zeeb entry = q->entry[q->tail];
7806c92544dSBjoern A. Zeeb q->entry[q->tail].done = false;
7816c92544dSBjoern A. Zeeb
7826c92544dSBjoern A. Zeeb mt76_queue_tx_complete(dev, q, &entry);
7836c92544dSBjoern A. Zeeb }
7846c92544dSBjoern A. Zeeb
7856c92544dSBjoern A. Zeeb if (!q->queued)
7866c92544dSBjoern A. Zeeb wake_up(&dev->tx_wait);
7876c92544dSBjoern A. Zeeb
7886c92544dSBjoern A. Zeeb mt76_worker_schedule(&dev->tx_worker);
789cbb3ec25SBjoern A. Zeeb }
7906c92544dSBjoern A. Zeeb
7916c92544dSBjoern A. Zeeb if (dev->drv->tx_status_data &&
7926c92544dSBjoern A. Zeeb !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
7936c92544dSBjoern A. Zeeb queue_work(dev->wq, &dev->usb.stat_work);
7946c92544dSBjoern A. Zeeb }
7956c92544dSBjoern A. Zeeb
mt76u_tx_status_data(struct work_struct * work)7966c92544dSBjoern A. Zeeb static void mt76u_tx_status_data(struct work_struct *work)
7976c92544dSBjoern A. Zeeb {
7986c92544dSBjoern A. Zeeb struct mt76_usb *usb;
7996c92544dSBjoern A. Zeeb struct mt76_dev *dev;
8006c92544dSBjoern A. Zeeb u8 update = 1;
8016c92544dSBjoern A. Zeeb u16 count = 0;
8026c92544dSBjoern A. Zeeb
8036c92544dSBjoern A. Zeeb usb = container_of(work, struct mt76_usb, stat_work);
8046c92544dSBjoern A. Zeeb dev = container_of(usb, struct mt76_dev, usb);
8056c92544dSBjoern A. Zeeb
8066c92544dSBjoern A. Zeeb while (true) {
8076c92544dSBjoern A. Zeeb if (test_bit(MT76_REMOVED, &dev->phy.state))
8086c92544dSBjoern A. Zeeb break;
8096c92544dSBjoern A. Zeeb
8106c92544dSBjoern A. Zeeb if (!dev->drv->tx_status_data(dev, &update))
8116c92544dSBjoern A. Zeeb break;
8126c92544dSBjoern A. Zeeb count++;
8136c92544dSBjoern A. Zeeb }
8146c92544dSBjoern A. Zeeb
8156c92544dSBjoern A. Zeeb if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
8166c92544dSBjoern A. Zeeb queue_work(dev->wq, &usb->stat_work);
8176c92544dSBjoern A. Zeeb else
8186c92544dSBjoern A. Zeeb clear_bit(MT76_READING_STATS, &dev->phy.state);
8196c92544dSBjoern A. Zeeb }
8206c92544dSBjoern A. Zeeb
mt76u_complete_tx(struct urb * urb)8216c92544dSBjoern A. Zeeb static void mt76u_complete_tx(struct urb *urb)
8226c92544dSBjoern A. Zeeb {
8236c92544dSBjoern A. Zeeb struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
8246c92544dSBjoern A. Zeeb struct mt76_queue_entry *e = urb->context;
8256c92544dSBjoern A. Zeeb
8266c92544dSBjoern A. Zeeb if (mt76u_urb_error(urb))
8276c92544dSBjoern A. Zeeb dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
8286c92544dSBjoern A. Zeeb e->done = true;
8296c92544dSBjoern A. Zeeb
8306c92544dSBjoern A. Zeeb mt76_worker_schedule(&dev->usb.status_worker);
8316c92544dSBjoern A. Zeeb }
8326c92544dSBjoern A. Zeeb
8336c92544dSBjoern A. Zeeb static int
mt76u_tx_setup_buffers(struct mt76_dev * dev,struct sk_buff * skb,struct urb * urb)8346c92544dSBjoern A. Zeeb mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
8356c92544dSBjoern A. Zeeb struct urb *urb)
8366c92544dSBjoern A. Zeeb {
8376c92544dSBjoern A. Zeeb urb->transfer_buffer_length = skb->len;
8386c92544dSBjoern A. Zeeb
8396c92544dSBjoern A. Zeeb if (!dev->usb.sg_en) {
8406c92544dSBjoern A. Zeeb urb->transfer_buffer = skb->data;
8416c92544dSBjoern A. Zeeb return 0;
8426c92544dSBjoern A. Zeeb }
8436c92544dSBjoern A. Zeeb
8446c92544dSBjoern A. Zeeb sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
8456c92544dSBjoern A. Zeeb urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
8466c92544dSBjoern A. Zeeb if (!urb->num_sgs)
8476c92544dSBjoern A. Zeeb return -ENOMEM;
8486c92544dSBjoern A. Zeeb
8496c92544dSBjoern A. Zeeb return urb->num_sgs;
8506c92544dSBjoern A. Zeeb }
8516c92544dSBjoern A. Zeeb
8526c92544dSBjoern A. Zeeb static int
mt76u_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)853*8ba4d145SBjoern A. Zeeb mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
8546c92544dSBjoern A. Zeeb enum mt76_txq_id qid, struct sk_buff *skb,
8556c92544dSBjoern A. Zeeb struct mt76_wcid *wcid, struct ieee80211_sta *sta)
8566c92544dSBjoern A. Zeeb {
8576c92544dSBjoern A. Zeeb struct mt76_tx_info tx_info = {
8586c92544dSBjoern A. Zeeb .skb = skb,
8596c92544dSBjoern A. Zeeb };
860*8ba4d145SBjoern A. Zeeb struct mt76_dev *dev = phy->dev;
8616c92544dSBjoern A. Zeeb u16 idx = q->head;
8626c92544dSBjoern A. Zeeb int err;
8636c92544dSBjoern A. Zeeb
8646c92544dSBjoern A. Zeeb if (q->queued == q->ndesc)
8656c92544dSBjoern A. Zeeb return -ENOSPC;
8666c92544dSBjoern A. Zeeb
8676c92544dSBjoern A. Zeeb skb->prev = skb->next = NULL;
8686c92544dSBjoern A. Zeeb err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
8696c92544dSBjoern A. Zeeb if (err < 0)
8706c92544dSBjoern A. Zeeb return err;
8716c92544dSBjoern A. Zeeb
8726c92544dSBjoern A. Zeeb err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
8736c92544dSBjoern A. Zeeb if (err < 0)
8746c92544dSBjoern A. Zeeb return err;
8756c92544dSBjoern A. Zeeb
876*8ba4d145SBjoern A. Zeeb mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
877*8ba4d145SBjoern A. Zeeb mt76u_complete_tx, &q->entry[idx]);
8786c92544dSBjoern A. Zeeb
8796c92544dSBjoern A. Zeeb q->head = (q->head + 1) % q->ndesc;
8806c92544dSBjoern A. Zeeb q->entry[idx].skb = tx_info.skb;
8816c92544dSBjoern A. Zeeb q->entry[idx].wcid = 0xffff;
8826c92544dSBjoern A. Zeeb q->queued++;
8836c92544dSBjoern A. Zeeb
8846c92544dSBjoern A. Zeeb return idx;
8856c92544dSBjoern A. Zeeb }
8866c92544dSBjoern A. Zeeb
mt76u_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)8876c92544dSBjoern A. Zeeb static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
8886c92544dSBjoern A. Zeeb {
8896c92544dSBjoern A. Zeeb struct urb *urb;
8906c92544dSBjoern A. Zeeb int err;
8916c92544dSBjoern A. Zeeb
8926c92544dSBjoern A. Zeeb while (q->first != q->head) {
8936c92544dSBjoern A. Zeeb urb = q->entry[q->first].urb;
8946c92544dSBjoern A. Zeeb
8956c92544dSBjoern A. Zeeb trace_submit_urb(dev, urb);
8966c92544dSBjoern A. Zeeb err = usb_submit_urb(urb, GFP_ATOMIC);
8976c92544dSBjoern A. Zeeb if (err < 0) {
8986c92544dSBjoern A. Zeeb if (err == -ENODEV)
8996c92544dSBjoern A. Zeeb set_bit(MT76_REMOVED, &dev->phy.state);
9006c92544dSBjoern A. Zeeb else
9016c92544dSBjoern A. Zeeb dev_err(dev->dev, "tx urb submit failed:%d\n",
9026c92544dSBjoern A. Zeeb err);
9036c92544dSBjoern A. Zeeb break;
9046c92544dSBjoern A. Zeeb }
9056c92544dSBjoern A. Zeeb q->first = (q->first + 1) % q->ndesc;
9066c92544dSBjoern A. Zeeb }
9076c92544dSBjoern A. Zeeb }
9086c92544dSBjoern A. Zeeb
909*8ba4d145SBjoern A. Zeeb static void
mt76u_ac_to_hwq(struct mt76_dev * dev,struct mt76_queue * q,u8 qid)910*8ba4d145SBjoern A. Zeeb mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
9116c92544dSBjoern A. Zeeb {
912*8ba4d145SBjoern A. Zeeb u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
913*8ba4d145SBjoern A. Zeeb
914*8ba4d145SBjoern A. Zeeb switch (mt76_chip(dev)) {
915*8ba4d145SBjoern A. Zeeb case 0x7663: {
9166c92544dSBjoern A. Zeeb static const u8 lmac_queue_map[] = {
9176c92544dSBjoern A. Zeeb /* ac to lmac mapping */
9186c92544dSBjoern A. Zeeb [IEEE80211_AC_BK] = 0,
9196c92544dSBjoern A. Zeeb [IEEE80211_AC_BE] = 1,
9206c92544dSBjoern A. Zeeb [IEEE80211_AC_VI] = 2,
9216c92544dSBjoern A. Zeeb [IEEE80211_AC_VO] = 4,
9226c92544dSBjoern A. Zeeb };
9236c92544dSBjoern A. Zeeb
924*8ba4d145SBjoern A. Zeeb q->hw_idx = lmac_queue_map[ac];
925*8ba4d145SBjoern A. Zeeb q->ep = q->hw_idx + 1;
926*8ba4d145SBjoern A. Zeeb break;
9276c92544dSBjoern A. Zeeb }
928*8ba4d145SBjoern A. Zeeb case 0x7961:
929*8ba4d145SBjoern A. Zeeb case 0x7925:
930*8ba4d145SBjoern A. Zeeb q->hw_idx = mt76_ac_to_hwq(ac);
931*8ba4d145SBjoern A. Zeeb q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
932*8ba4d145SBjoern A. Zeeb break;
933*8ba4d145SBjoern A. Zeeb default:
934*8ba4d145SBjoern A. Zeeb q->hw_idx = mt76_ac_to_hwq(ac);
935*8ba4d145SBjoern A. Zeeb q->ep = q->hw_idx + 1;
936*8ba4d145SBjoern A. Zeeb break;
937*8ba4d145SBjoern A. Zeeb }
9386c92544dSBjoern A. Zeeb }
9396c92544dSBjoern A. Zeeb
mt76u_alloc_tx(struct mt76_dev * dev)9406c92544dSBjoern A. Zeeb static int mt76u_alloc_tx(struct mt76_dev *dev)
9416c92544dSBjoern A. Zeeb {
942*8ba4d145SBjoern A. Zeeb int i;
9436c92544dSBjoern A. Zeeb
9446c92544dSBjoern A. Zeeb for (i = 0; i <= MT_TXQ_PSD; i++) {
945*8ba4d145SBjoern A. Zeeb struct mt76_queue *q;
946*8ba4d145SBjoern A. Zeeb int j, err;
9476c92544dSBjoern A. Zeeb
9486c92544dSBjoern A. Zeeb q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
9496c92544dSBjoern A. Zeeb if (!q)
9506c92544dSBjoern A. Zeeb return -ENOMEM;
9516c92544dSBjoern A. Zeeb
9526c92544dSBjoern A. Zeeb spin_lock_init(&q->lock);
953*8ba4d145SBjoern A. Zeeb mt76u_ac_to_hwq(dev, q, i);
9546c92544dSBjoern A. Zeeb dev->phy.q_tx[i] = q;
9556c92544dSBjoern A. Zeeb
9566c92544dSBjoern A. Zeeb q->entry = devm_kcalloc(dev->dev,
9576c92544dSBjoern A. Zeeb MT_NUM_TX_ENTRIES, sizeof(*q->entry),
9586c92544dSBjoern A. Zeeb GFP_KERNEL);
9596c92544dSBjoern A. Zeeb if (!q->entry)
9606c92544dSBjoern A. Zeeb return -ENOMEM;
9616c92544dSBjoern A. Zeeb
9626c92544dSBjoern A. Zeeb q->ndesc = MT_NUM_TX_ENTRIES;
9636c92544dSBjoern A. Zeeb for (j = 0; j < q->ndesc; j++) {
9646c92544dSBjoern A. Zeeb err = mt76u_urb_alloc(dev, &q->entry[j],
9656c92544dSBjoern A. Zeeb MT_TX_SG_MAX_SIZE);
9666c92544dSBjoern A. Zeeb if (err < 0)
9676c92544dSBjoern A. Zeeb return err;
9686c92544dSBjoern A. Zeeb }
9696c92544dSBjoern A. Zeeb }
9706c92544dSBjoern A. Zeeb return 0;
9716c92544dSBjoern A. Zeeb }
9726c92544dSBjoern A. Zeeb
mt76u_free_tx(struct mt76_dev * dev)9736c92544dSBjoern A. Zeeb static void mt76u_free_tx(struct mt76_dev *dev)
9746c92544dSBjoern A. Zeeb {
9756c92544dSBjoern A. Zeeb int i;
9766c92544dSBjoern A. Zeeb
9776c92544dSBjoern A. Zeeb mt76_worker_teardown(&dev->usb.status_worker);
9786c92544dSBjoern A. Zeeb
979*8ba4d145SBjoern A. Zeeb for (i = 0; i <= MT_TXQ_PSD; i++) {
9806c92544dSBjoern A. Zeeb struct mt76_queue *q;
9816c92544dSBjoern A. Zeeb int j;
9826c92544dSBjoern A. Zeeb
9836c92544dSBjoern A. Zeeb q = dev->phy.q_tx[i];
9846c92544dSBjoern A. Zeeb if (!q)
9856c92544dSBjoern A. Zeeb continue;
9866c92544dSBjoern A. Zeeb
9876c92544dSBjoern A. Zeeb for (j = 0; j < q->ndesc; j++) {
9886c92544dSBjoern A. Zeeb usb_free_urb(q->entry[j].urb);
9896c92544dSBjoern A. Zeeb q->entry[j].urb = NULL;
9906c92544dSBjoern A. Zeeb }
9916c92544dSBjoern A. Zeeb }
9926c92544dSBjoern A. Zeeb }
9936c92544dSBjoern A. Zeeb
mt76u_stop_tx(struct mt76_dev * dev)9946c92544dSBjoern A. Zeeb void mt76u_stop_tx(struct mt76_dev *dev)
9956c92544dSBjoern A. Zeeb {
9966c92544dSBjoern A. Zeeb int ret;
9976c92544dSBjoern A. Zeeb
9986c92544dSBjoern A. Zeeb mt76_worker_disable(&dev->usb.status_worker);
9996c92544dSBjoern A. Zeeb
10006c92544dSBjoern A. Zeeb ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
10016c92544dSBjoern A. Zeeb HZ / 5);
10026c92544dSBjoern A. Zeeb if (!ret) {
10036c92544dSBjoern A. Zeeb struct mt76_queue_entry entry;
10046c92544dSBjoern A. Zeeb struct mt76_queue *q;
10056c92544dSBjoern A. Zeeb int i, j;
10066c92544dSBjoern A. Zeeb
10076c92544dSBjoern A. Zeeb dev_err(dev->dev, "timed out waiting for pending tx\n");
10086c92544dSBjoern A. Zeeb
1009*8ba4d145SBjoern A. Zeeb for (i = 0; i <= MT_TXQ_PSD; i++) {
10106c92544dSBjoern A. Zeeb q = dev->phy.q_tx[i];
10116c92544dSBjoern A. Zeeb if (!q)
10126c92544dSBjoern A. Zeeb continue;
10136c92544dSBjoern A. Zeeb
10146c92544dSBjoern A. Zeeb for (j = 0; j < q->ndesc; j++)
10156c92544dSBjoern A. Zeeb usb_kill_urb(q->entry[j].urb);
10166c92544dSBjoern A. Zeeb }
10176c92544dSBjoern A. Zeeb
10186c92544dSBjoern A. Zeeb mt76_worker_disable(&dev->tx_worker);
10196c92544dSBjoern A. Zeeb
10206c92544dSBjoern A. Zeeb /* On device removal we maight queue skb's, but mt76u_tx_kick()
10216c92544dSBjoern A. Zeeb * will fail to submit urb, cleanup those skb's manually.
10226c92544dSBjoern A. Zeeb */
1023*8ba4d145SBjoern A. Zeeb for (i = 0; i <= MT_TXQ_PSD; i++) {
10246c92544dSBjoern A. Zeeb q = dev->phy.q_tx[i];
10256c92544dSBjoern A. Zeeb if (!q)
10266c92544dSBjoern A. Zeeb continue;
10276c92544dSBjoern A. Zeeb
10286c92544dSBjoern A. Zeeb while (q->queued > 0) {
10296c92544dSBjoern A. Zeeb entry = q->entry[q->tail];
10306c92544dSBjoern A. Zeeb q->entry[q->tail].done = false;
10316c92544dSBjoern A. Zeeb mt76_queue_tx_complete(dev, q, &entry);
10326c92544dSBjoern A. Zeeb }
10336c92544dSBjoern A. Zeeb }
10346c92544dSBjoern A. Zeeb
10356c92544dSBjoern A. Zeeb mt76_worker_enable(&dev->tx_worker);
10366c92544dSBjoern A. Zeeb }
10376c92544dSBjoern A. Zeeb
10386c92544dSBjoern A. Zeeb cancel_work_sync(&dev->usb.stat_work);
10396c92544dSBjoern A. Zeeb clear_bit(MT76_READING_STATS, &dev->phy.state);
10406c92544dSBjoern A. Zeeb
10416c92544dSBjoern A. Zeeb mt76_worker_enable(&dev->usb.status_worker);
10426c92544dSBjoern A. Zeeb
10436c92544dSBjoern A. Zeeb mt76_tx_status_check(dev, true);
10446c92544dSBjoern A. Zeeb }
10456c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_stop_tx);
10466c92544dSBjoern A. Zeeb
mt76u_queues_deinit(struct mt76_dev * dev)10476c92544dSBjoern A. Zeeb void mt76u_queues_deinit(struct mt76_dev *dev)
10486c92544dSBjoern A. Zeeb {
10496c92544dSBjoern A. Zeeb mt76u_stop_rx(dev);
10506c92544dSBjoern A. Zeeb mt76u_stop_tx(dev);
10516c92544dSBjoern A. Zeeb
10526c92544dSBjoern A. Zeeb mt76u_free_rx(dev);
10536c92544dSBjoern A. Zeeb mt76u_free_tx(dev);
10546c92544dSBjoern A. Zeeb }
10556c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
10566c92544dSBjoern A. Zeeb
mt76u_alloc_queues(struct mt76_dev * dev)10576c92544dSBjoern A. Zeeb int mt76u_alloc_queues(struct mt76_dev *dev)
10586c92544dSBjoern A. Zeeb {
10596c92544dSBjoern A. Zeeb int err;
10606c92544dSBjoern A. Zeeb
10616c92544dSBjoern A. Zeeb err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
10626c92544dSBjoern A. Zeeb if (err < 0)
10636c92544dSBjoern A. Zeeb return err;
10646c92544dSBjoern A. Zeeb
10656c92544dSBjoern A. Zeeb return mt76u_alloc_tx(dev);
10666c92544dSBjoern A. Zeeb }
10676c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
10686c92544dSBjoern A. Zeeb
10696c92544dSBjoern A. Zeeb static const struct mt76_queue_ops usb_queue_ops = {
10706c92544dSBjoern A. Zeeb .tx_queue_skb = mt76u_tx_queue_skb,
10716c92544dSBjoern A. Zeeb .kick = mt76u_tx_kick,
10726c92544dSBjoern A. Zeeb };
10736c92544dSBjoern A. Zeeb
__mt76u_init(struct mt76_dev * dev,struct usb_interface * intf,struct mt76_bus_ops * ops)10746c92544dSBjoern A. Zeeb int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
10756c92544dSBjoern A. Zeeb struct mt76_bus_ops *ops)
10766c92544dSBjoern A. Zeeb {
10776c92544dSBjoern A. Zeeb struct usb_device *udev = interface_to_usbdev(intf);
10786c92544dSBjoern A. Zeeb struct mt76_usb *usb = &dev->usb;
10796c92544dSBjoern A. Zeeb int err;
10806c92544dSBjoern A. Zeeb
10816c92544dSBjoern A. Zeeb INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
10826c92544dSBjoern A. Zeeb
10836c92544dSBjoern A. Zeeb usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
10846c92544dSBjoern A. Zeeb if (usb->data_len < 32)
10856c92544dSBjoern A. Zeeb usb->data_len = 32;
10866c92544dSBjoern A. Zeeb
10876c92544dSBjoern A. Zeeb usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
10886c92544dSBjoern A. Zeeb if (!usb->data)
10896c92544dSBjoern A. Zeeb return -ENOMEM;
10906c92544dSBjoern A. Zeeb
10916c92544dSBjoern A. Zeeb mutex_init(&usb->usb_ctrl_mtx);
10926c92544dSBjoern A. Zeeb dev->bus = ops;
10936c92544dSBjoern A. Zeeb dev->queue_ops = &usb_queue_ops;
10946c92544dSBjoern A. Zeeb
10956c92544dSBjoern A. Zeeb dev_set_drvdata(&udev->dev, dev);
10966c92544dSBjoern A. Zeeb
10976c92544dSBjoern A. Zeeb usb->sg_en = mt76u_check_sg(dev);
10986c92544dSBjoern A. Zeeb
10996c92544dSBjoern A. Zeeb err = mt76u_set_endpoints(intf, usb);
11006c92544dSBjoern A. Zeeb if (err < 0)
11016c92544dSBjoern A. Zeeb return err;
11026c92544dSBjoern A. Zeeb
11036c92544dSBjoern A. Zeeb err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
11046c92544dSBjoern A. Zeeb "usb-rx");
11056c92544dSBjoern A. Zeeb if (err)
11066c92544dSBjoern A. Zeeb return err;
11076c92544dSBjoern A. Zeeb
11086c92544dSBjoern A. Zeeb err = mt76_worker_setup(dev->hw, &usb->status_worker,
11096c92544dSBjoern A. Zeeb mt76u_status_worker, "usb-status");
11106c92544dSBjoern A. Zeeb if (err)
11116c92544dSBjoern A. Zeeb return err;
11126c92544dSBjoern A. Zeeb
11136c92544dSBjoern A. Zeeb sched_set_fifo_low(usb->rx_worker.task);
11146c92544dSBjoern A. Zeeb sched_set_fifo_low(usb->status_worker.task);
11156c92544dSBjoern A. Zeeb
11166c92544dSBjoern A. Zeeb return 0;
11176c92544dSBjoern A. Zeeb }
11186c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(__mt76u_init);
11196c92544dSBjoern A. Zeeb
mt76u_init(struct mt76_dev * dev,struct usb_interface * intf)11206c92544dSBjoern A. Zeeb int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
11216c92544dSBjoern A. Zeeb {
11226c92544dSBjoern A. Zeeb static struct mt76_bus_ops bus_ops = {
11236c92544dSBjoern A. Zeeb .rr = mt76u_rr,
11246c92544dSBjoern A. Zeeb .wr = mt76u_wr,
11256c92544dSBjoern A. Zeeb .rmw = mt76u_rmw,
11266c92544dSBjoern A. Zeeb .read_copy = mt76u_read_copy,
11276c92544dSBjoern A. Zeeb .write_copy = mt76u_copy,
11286c92544dSBjoern A. Zeeb .wr_rp = mt76u_wr_rp,
11296c92544dSBjoern A. Zeeb .rd_rp = mt76u_rd_rp,
11306c92544dSBjoern A. Zeeb .type = MT76_BUS_USB,
11316c92544dSBjoern A. Zeeb };
11326c92544dSBjoern A. Zeeb
11336c92544dSBjoern A. Zeeb return __mt76u_init(dev, intf, &bus_ops);
11346c92544dSBjoern A. Zeeb }
11356c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76u_init);
11366c92544dSBjoern A. Zeeb
11376c92544dSBjoern A. Zeeb MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1138*8ba4d145SBjoern A. Zeeb MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
11396c92544dSBjoern A. Zeeb MODULE_LICENSE("Dual BSD/GPL");
1140