1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/usb.h> 7 #include <linux/mutex.h> 8 #include "main.h" 9 #include "debug.h" 10 #include "mac.h" 11 #include "reg.h" 12 #include "tx.h" 13 #include "rx.h" 14 #include "fw.h" 15 #include "ps.h" 16 #include "usb.h" 17 18 static bool rtw_switch_usb_mode = true; 19 module_param_named(switch_usb_mode, rtw_switch_usb_mode, bool, 0644); 20 MODULE_PARM_DESC(switch_usb_mode, 21 "Set to N to disable switching to USB 3 mode to avoid potential interference in the 2.4 GHz band (default: Y)"); 22 23 #define RTW_USB_MAX_RXQ_LEN 512 24 25 struct rtw_usb_txcb { 26 struct rtw_dev *rtwdev; 27 struct sk_buff_head tx_ack_queue; 28 }; 29 30 static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb, 31 struct sk_buff *skb, int agg_num) 32 { 33 struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)skb->data; 34 struct rtw_dev *rtwdev = rtwusb->rtwdev; 35 struct rtw_tx_pkt_info pkt_info; 36 37 le32p_replace_bits(&tx_desc->w7, agg_num, RTW_TX_DESC_W7_DMA_TXAGG_NUM); 38 pkt_info.pkt_offset = le32_get_bits(tx_desc->w1, RTW_TX_DESC_W1_PKT_OFFSET); 39 rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data); 40 } 41 42 static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data) 43 { 44 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 45 struct usb_device *udev = rtwusb->udev; 46 bool reg_on_section = false; 47 u16 t_reg = 0x4e0; 48 u8 t_len = 1; 49 int status; 50 51 /* There are three sections: 52 * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on 53 * 2. off (< 0xFE00, excluding "on" section): this section could be 54 * powered off 55 * 3. local (>= 0xFE00): usb specific registers section 56 */ 57 if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff)) 58 reg_on_section = true; 59 60 if (!reg_on_section) 61 return; 62 63 status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 64 RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, 65 t_reg, 0, data, t_len, 500); 66 67 if (status != t_len && status != -ENODEV) 68 rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n", 69 __func__, t_reg, t_len, status); 70 } 71 72 static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len) 73 { 74 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 75 struct usb_device *udev = rtwusb->udev; 76 __le32 *data; 77 unsigned long flags; 78 int idx, ret; 79 static int count; 80 81 spin_lock_irqsave(&rtwusb->usb_lock, flags); 82 83 idx = rtwusb->usb_data_index; 84 rtwusb->usb_data_index = (idx + 1) & (RTW_USB_MAX_RXTX_COUNT - 1); 85 86 spin_unlock_irqrestore(&rtwusb->usb_lock, flags); 87 88 data = &rtwusb->usb_data[idx]; 89 90 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 91 RTW_USB_CMD_REQ, RTW_USB_CMD_READ, addr, 92 RTW_USB_VENQT_CMD_IDX, data, len, 1000); 93 if (ret < 0 && ret != -ENODEV && count++ < 4) 94 rtw_err(rtwdev, "read register 0x%x failed with %d\n", 95 addr, ret); 96 97 if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C || 98 rtwdev->chip->id == RTW_CHIP_TYPE_8822B || 99 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 100 rtw_usb_reg_sec(rtwdev, addr, data); 101 102 return le32_to_cpu(*data); 103 } 104 105 static u8 rtw_usb_read8(struct rtw_dev *rtwdev, u32 addr) 106 { 107 return (u8)rtw_usb_read(rtwdev, addr, 1); 108 } 109 110 static u16 rtw_usb_read16(struct rtw_dev *rtwdev, u32 addr) 111 { 112 return (u16)rtw_usb_read(rtwdev, addr, 2); 113 } 114 115 static u32 rtw_usb_read32(struct rtw_dev *rtwdev, u32 addr) 116 { 117 return (u32)rtw_usb_read(rtwdev, addr, 4); 118 } 119 120 static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len) 121 { 122 struct rtw_usb *rtwusb = (struct rtw_usb *)rtwdev->priv; 123 struct usb_device *udev = rtwusb->udev; 124 unsigned long flags; 125 __le32 *data; 126 int idx, ret; 127 static int count; 128 129 spin_lock_irqsave(&rtwusb->usb_lock, flags); 130 131 idx = rtwusb->usb_data_index; 132 rtwusb->usb_data_index = (idx + 1) & (RTW_USB_MAX_RXTX_COUNT - 1); 133 134 spin_unlock_irqrestore(&rtwusb->usb_lock, flags); 135 136 data = &rtwusb->usb_data[idx]; 137 138 *data = cpu_to_le32(val); 139 140 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 141 RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, 142 addr, 0, data, len, 500); 143 if (ret < 0 && ret != -ENODEV && count++ < 4) 144 rtw_err(rtwdev, "write register 0x%x failed with %d\n", 145 addr, ret); 146 147 if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C || 148 rtwdev->chip->id == RTW_CHIP_TYPE_8822B || 149 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 150 rtw_usb_reg_sec(rtwdev, addr, data); 151 } 152 153 static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 154 { 155 rtw_usb_write(rtwdev, addr, val, 1); 156 } 157 158 static void rtw_usb_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 159 { 160 rtw_usb_write(rtwdev, addr, val, 2); 161 } 162 163 static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 164 { 165 rtw_usb_write(rtwdev, addr, val, 4); 166 } 167 168 static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page, 169 const u8 *data, u32 size) 170 { 171 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 172 struct usb_device *udev = rtwusb->udev; 173 u32 addr = FW_START_ADDR_LEGACY; 174 u8 *data_dup, *buf; 175 u32 n, block_size; 176 int ret; 177 178 switch (rtwdev->chip->id) { 179 case RTW_CHIP_TYPE_8723D: 180 block_size = 254; 181 break; 182 default: 183 block_size = 196; 184 break; 185 } 186 187 data_dup = kmemdup(data, size, GFP_KERNEL); 188 if (!data_dup) 189 return; 190 191 buf = data_dup; 192 193 rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page); 194 195 while (size > 0) { 196 if (size >= block_size) 197 n = block_size; 198 else if (size >= 8) 199 n = 8; 200 else 201 n = 1; 202 203 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 204 RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, 205 addr, 0, buf, n, 500); 206 if (ret != n) { 207 if (ret != -ENODEV) 208 rtw_err(rtwdev, 209 "write 0x%x len %d failed: %d\n", 210 addr, n, ret); 211 break; 212 } 213 214 addr += n; 215 buf += n; 216 size -= n; 217 } 218 219 kfree(data_dup); 220 } 221 222 static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping) 223 { 224 switch (dma_mapping) { 225 case RTW_DMA_MAPPING_HIGH: 226 return 0; 227 case RTW_DMA_MAPPING_NORMAL: 228 return 1; 229 case RTW_DMA_MAPPING_LOW: 230 return 2; 231 case RTW_DMA_MAPPING_EXTRA: 232 return 3; 233 default: 234 return -EINVAL; 235 } 236 } 237 238 static int rtw_usb_parse(struct rtw_dev *rtwdev, 239 struct usb_interface *interface) 240 { 241 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 242 struct usb_host_interface *host_interface = &interface->altsetting[0]; 243 struct usb_interface_descriptor *interface_desc = &host_interface->desc; 244 struct usb_endpoint_descriptor *endpoint; 245 int num_out_pipes = 0; 246 int i; 247 u8 num; 248 const struct rtw_chip_info *chip = rtwdev->chip; 249 const struct rtw_rqpn *rqpn; 250 251 for (i = 0; i < interface_desc->bNumEndpoints; i++) { 252 endpoint = &host_interface->endpoint[i].desc; 253 num = usb_endpoint_num(endpoint); 254 255 if (usb_endpoint_dir_in(endpoint) && 256 usb_endpoint_xfer_bulk(endpoint)) { 257 if (rtwusb->pipe_in) { 258 rtw_err(rtwdev, "IN pipes overflow\n"); 259 return -EINVAL; 260 } 261 262 rtwusb->pipe_in = num; 263 } 264 265 if (usb_endpoint_dir_in(endpoint) && 266 usb_endpoint_xfer_int(endpoint)) { 267 if (rtwusb->pipe_interrupt) { 268 rtw_err(rtwdev, "INT pipes overflow\n"); 269 return -EINVAL; 270 } 271 272 rtwusb->pipe_interrupt = num; 273 } 274 275 if (usb_endpoint_dir_out(endpoint) && 276 usb_endpoint_xfer_bulk(endpoint)) { 277 if (num_out_pipes >= ARRAY_SIZE(rtwusb->out_ep)) { 278 rtw_err(rtwdev, "OUT pipes overflow\n"); 279 return -EINVAL; 280 } 281 282 rtwusb->out_ep[num_out_pipes++] = num; 283 } 284 } 285 286 rtwdev->hci.bulkout_num = num_out_pipes; 287 288 if (num_out_pipes < 1 || num_out_pipes > 4) { 289 rtw_err(rtwdev, "invalid number of endpoints %d\n", num_out_pipes); 290 return -EINVAL; 291 } 292 293 rqpn = &chip->rqpn_table[num_out_pipes]; 294 295 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID0] = dma_mapping_to_ep(rqpn->dma_map_be); 296 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID1] = dma_mapping_to_ep(rqpn->dma_map_bk); 297 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID2] = dma_mapping_to_ep(rqpn->dma_map_bk); 298 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID3] = dma_mapping_to_ep(rqpn->dma_map_be); 299 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID4] = dma_mapping_to_ep(rqpn->dma_map_vi); 300 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID5] = dma_mapping_to_ep(rqpn->dma_map_vi); 301 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID6] = dma_mapping_to_ep(rqpn->dma_map_vo); 302 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID7] = dma_mapping_to_ep(rqpn->dma_map_vo); 303 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID8] = -EINVAL; 304 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID9] = -EINVAL; 305 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID10] = -EINVAL; 306 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID11] = -EINVAL; 307 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID12] = -EINVAL; 308 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID13] = -EINVAL; 309 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID14] = -EINVAL; 310 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID15] = -EINVAL; 311 rtwusb->qsel_to_ep[TX_DESC_QSEL_BEACON] = dma_mapping_to_ep(rqpn->dma_map_hi); 312 rtwusb->qsel_to_ep[TX_DESC_QSEL_HIGH] = dma_mapping_to_ep(rqpn->dma_map_hi); 313 rtwusb->qsel_to_ep[TX_DESC_QSEL_MGMT] = dma_mapping_to_ep(rqpn->dma_map_mg); 314 rtwusb->qsel_to_ep[TX_DESC_QSEL_H2C] = dma_mapping_to_ep(rqpn->dma_map_hi); 315 316 return 0; 317 } 318 319 static void rtw_usb_write_port_tx_complete(struct urb *urb) 320 { 321 struct rtw_usb_txcb *txcb = urb->context; 322 struct rtw_dev *rtwdev = txcb->rtwdev; 323 struct ieee80211_hw *hw = rtwdev->hw; 324 325 while (true) { 326 struct sk_buff *skb = skb_dequeue(&txcb->tx_ack_queue); 327 struct ieee80211_tx_info *info; 328 struct rtw_usb_tx_data *tx_data; 329 330 if (!skb) 331 break; 332 333 info = IEEE80211_SKB_CB(skb); 334 tx_data = rtw_usb_get_tx_data(skb); 335 336 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 337 338 /* enqueue to wait for tx report */ 339 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 340 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 341 continue; 342 } 343 344 /* always ACK for others, then they won't be marked as drop */ 345 ieee80211_tx_info_clear_status(info); 346 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 347 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 348 else 349 info->flags |= IEEE80211_TX_STAT_ACK; 350 351 ieee80211_tx_status_irqsafe(hw, skb); 352 } 353 354 kfree(txcb); 355 } 356 357 static int qsel_to_ep(struct rtw_usb *rtwusb, unsigned int qsel) 358 { 359 if (qsel >= ARRAY_SIZE(rtwusb->qsel_to_ep)) 360 return -EINVAL; 361 362 return rtwusb->qsel_to_ep[qsel]; 363 } 364 365 static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *skb, 366 usb_complete_t cb, void *context) 367 { 368 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 369 struct usb_device *usbd = rtwusb->udev; 370 struct urb *urb; 371 unsigned int pipe; 372 int ret; 373 int ep = qsel_to_ep(rtwusb, qsel); 374 375 if (ep < 0) 376 return ep; 377 378 pipe = usb_sndbulkpipe(usbd, rtwusb->out_ep[ep]); 379 urb = usb_alloc_urb(0, GFP_ATOMIC); 380 if (!urb) 381 return -ENOMEM; 382 383 usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context); 384 urb->transfer_flags |= URB_ZERO_PACKET; 385 ret = usb_submit_urb(urb, GFP_ATOMIC); 386 387 usb_free_urb(urb); 388 389 return ret; 390 } 391 392 static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list) 393 { 394 struct rtw_dev *rtwdev = rtwusb->rtwdev; 395 struct rtw_tx_desc *tx_desc; 396 struct rtw_usb_txcb *txcb; 397 struct sk_buff *skb_head; 398 struct sk_buff *skb_iter; 399 int agg_num = 0; 400 unsigned int align_next = 0; 401 u8 qsel; 402 403 if (skb_queue_empty(list)) 404 return false; 405 406 txcb = kmalloc(sizeof(*txcb), GFP_ATOMIC); 407 if (!txcb) 408 return false; 409 410 txcb->rtwdev = rtwdev; 411 skb_queue_head_init(&txcb->tx_ack_queue); 412 413 skb_iter = skb_dequeue(list); 414 415 if (skb_queue_empty(list)) { 416 skb_head = skb_iter; 417 goto queue; 418 } 419 420 skb_head = dev_alloc_skb(RTW_USB_MAX_XMITBUF_SZ); 421 if (!skb_head) { 422 skb_head = skb_iter; 423 goto queue; 424 } 425 426 while (skb_iter) { 427 unsigned long flags; 428 429 skb_put(skb_head, align_next); 430 skb_put_data(skb_head, skb_iter->data, skb_iter->len); 431 432 align_next = ALIGN(skb_iter->len, 8) - skb_iter->len; 433 434 agg_num++; 435 436 skb_queue_tail(&txcb->tx_ack_queue, skb_iter); 437 438 spin_lock_irqsave(&list->lock, flags); 439 440 skb_iter = skb_peek(list); 441 442 if (skb_iter && 443 skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ && 444 agg_num < rtwdev->chip->usb_tx_agg_desc_num) 445 __skb_unlink(skb_iter, list); 446 else 447 skb_iter = NULL; 448 spin_unlock_irqrestore(&list->lock, flags); 449 } 450 451 if (agg_num > 1) 452 rtw_usb_fill_tx_checksum(rtwusb, skb_head, agg_num); 453 454 queue: 455 skb_queue_tail(&txcb->tx_ack_queue, skb_head); 456 tx_desc = (struct rtw_tx_desc *)skb_head->data; 457 qsel = le32_get_bits(tx_desc->w1, RTW_TX_DESC_W1_QSEL); 458 459 rtw_usb_write_port(rtwdev, qsel, skb_head, rtw_usb_write_port_tx_complete, txcb); 460 461 return true; 462 } 463 464 static void rtw_usb_tx_handler(struct work_struct *work) 465 { 466 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, tx_work); 467 int i, limit; 468 469 for (i = ARRAY_SIZE(rtwusb->tx_queue) - 1; i >= 0; i--) { 470 for (limit = 0; limit < 200; limit++) { 471 struct sk_buff_head *list = &rtwusb->tx_queue[i]; 472 473 if (!rtw_usb_tx_agg_skb(rtwusb, list)) 474 break; 475 } 476 } 477 } 478 479 static void rtw_usb_tx_queue_purge(struct rtw_usb *rtwusb) 480 { 481 struct rtw_dev *rtwdev = rtwusb->rtwdev; 482 int i; 483 484 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) 485 ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]); 486 } 487 488 static void rtw_usb_write_port_complete(struct urb *urb) 489 { 490 struct sk_buff *skb = urb->context; 491 492 dev_kfree_skb_any(skb); 493 } 494 495 static int rtw_usb_write_data(struct rtw_dev *rtwdev, 496 struct rtw_tx_pkt_info *pkt_info, 497 u8 *buf) 498 { 499 const struct rtw_chip_info *chip = rtwdev->chip; 500 struct sk_buff *skb; 501 unsigned int size; 502 u8 qsel; 503 int ret = 0; 504 505 size = pkt_info->tx_pkt_size; 506 qsel = pkt_info->qsel; 507 508 skb = dev_alloc_skb(chip->tx_pkt_desc_sz + size); 509 if (unlikely(!skb)) 510 return -ENOMEM; 511 512 skb_reserve(skb, chip->tx_pkt_desc_sz); 513 skb_put_data(skb, buf, size); 514 skb_push(skb, chip->tx_pkt_desc_sz); 515 memset(skb->data, 0, chip->tx_pkt_desc_sz); 516 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); 517 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); 518 519 ret = rtw_usb_write_port(rtwdev, qsel, skb, 520 rtw_usb_write_port_complete, skb); 521 if (unlikely(ret)) 522 rtw_err(rtwdev, "failed to do USB write, ret=%d\n", ret); 523 524 return ret; 525 } 526 527 static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 528 u32 size) 529 { 530 const struct rtw_chip_info *chip = rtwdev->chip; 531 struct rtw_tx_pkt_info pkt_info = {0}; 532 533 pkt_info.tx_pkt_size = size; 534 pkt_info.qsel = TX_DESC_QSEL_BEACON; 535 pkt_info.offset = chip->tx_pkt_desc_sz; 536 pkt_info.ls = true; 537 538 return rtw_usb_write_data(rtwdev, &pkt_info, buf); 539 } 540 541 static int rtw_usb_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 542 { 543 struct rtw_tx_pkt_info pkt_info = {0}; 544 545 pkt_info.tx_pkt_size = size; 546 pkt_info.qsel = TX_DESC_QSEL_H2C; 547 548 return rtw_usb_write_data(rtwdev, &pkt_info, buf); 549 } 550 551 static u8 rtw_usb_tx_queue_mapping_to_qsel(struct sk_buff *skb) 552 { 553 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 554 __le16 fc = hdr->frame_control; 555 u8 qsel; 556 557 if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 558 qsel = TX_DESC_QSEL_MGMT; 559 else if (is_broadcast_ether_addr(hdr->addr1) || 560 is_multicast_ether_addr(hdr->addr1)) 561 qsel = TX_DESC_QSEL_HIGH; 562 else if (skb_get_queue_mapping(skb) <= IEEE80211_AC_BK) 563 qsel = skb->priority; 564 else 565 qsel = TX_DESC_QSEL_BEACON; 566 567 return qsel; 568 } 569 570 static int rtw_usb_tx_write(struct rtw_dev *rtwdev, 571 struct rtw_tx_pkt_info *pkt_info, 572 struct sk_buff *skb) 573 { 574 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 575 const struct rtw_chip_info *chip = rtwdev->chip; 576 struct rtw_usb_tx_data *tx_data; 577 u8 *pkt_desc; 578 int ep; 579 580 pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb); 581 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 582 memset(pkt_desc, 0, chip->tx_pkt_desc_sz); 583 ep = qsel_to_ep(rtwusb, pkt_info->qsel); 584 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); 585 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); 586 tx_data = rtw_usb_get_tx_data(skb); 587 tx_data->sn = pkt_info->sn; 588 589 skb_queue_tail(&rtwusb->tx_queue[ep], skb); 590 591 return 0; 592 } 593 594 static void rtw_usb_tx_kick_off(struct rtw_dev *rtwdev) 595 { 596 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 597 598 queue_work(rtwusb->txwq, &rtwusb->tx_work); 599 } 600 601 static void rtw_usb_rx_handler(struct work_struct *work) 602 { 603 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work); 604 struct rtw_dev *rtwdev = rtwusb->rtwdev; 605 struct ieee80211_rx_status rx_status; 606 struct rtw_rx_pkt_stat pkt_stat; 607 struct sk_buff *rx_skb; 608 struct sk_buff *skb; 609 u32 pkt_desc_sz = rtwdev->chip->rx_pkt_desc_sz; 610 u32 max_skb_len = pkt_desc_sz + PHY_STATUS_SIZE * 8 + 611 IEEE80211_MAX_MPDU_LEN_VHT_11454; 612 u32 pkt_offset, next_pkt, skb_len; 613 u8 *rx_desc; 614 int limit; 615 616 for (limit = 0; limit < 200; limit++) { 617 rx_skb = skb_dequeue(&rtwusb->rx_queue); 618 if (!rx_skb) 619 break; 620 621 if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) { 622 dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n"); 623 dev_kfree_skb_any(rx_skb); 624 continue; 625 } 626 627 rx_desc = rx_skb->data; 628 629 do { 630 rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, 631 &rx_status); 632 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 633 pkt_stat.shift; 634 635 skb_len = pkt_stat.pkt_len + pkt_offset; 636 if (skb_len > max_skb_len) { 637 rtw_dbg(rtwdev, RTW_DBG_USB, 638 "skipping too big packet: %u\n", 639 skb_len); 640 goto skip_packet; 641 } 642 643 skb = alloc_skb(skb_len, GFP_ATOMIC); 644 if (!skb) { 645 rtw_dbg(rtwdev, RTW_DBG_USB, 646 "failed to allocate RX skb of size %u\n", 647 skb_len); 648 goto skip_packet; 649 } 650 651 skb_put_data(skb, rx_desc, skb_len); 652 653 if (pkt_stat.is_c2h) { 654 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb); 655 } else { 656 skb_pull(skb, pkt_offset); 657 rtw_update_rx_freq_for_invalid(rtwdev, skb, 658 &rx_status, 659 &pkt_stat); 660 rtw_rx_stats(rtwdev, pkt_stat.vif, skb); 661 memcpy(skb->cb, &rx_status, sizeof(rx_status)); 662 ieee80211_rx_irqsafe(rtwdev->hw, skb); 663 } 664 665 skip_packet: 666 next_pkt = round_up(skb_len, 8); 667 rx_desc += next_pkt; 668 } while (rx_desc + pkt_desc_sz < rx_skb->data + rx_skb->len); 669 670 if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW_USB_RX_SKB_NUM) 671 dev_kfree_skb_any(rx_skb); 672 else 673 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 674 } 675 } 676 677 static void rtw_usb_read_port_complete(struct urb *urb); 678 679 static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb, 680 struct rx_usb_ctrl_block *rxcb, 681 gfp_t gfp) 682 { 683 struct rtw_dev *rtwdev = rtwusb->rtwdev; 684 struct sk_buff *rx_skb; 685 int error; 686 687 rx_skb = skb_dequeue(&rtwusb->rx_free_queue); 688 if (!rx_skb) 689 rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, gfp); 690 691 if (!rx_skb) 692 goto try_later; 693 694 skb_reset_tail_pointer(rx_skb); 695 rx_skb->len = 0; 696 697 rxcb->rx_skb = rx_skb; 698 699 usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev, 700 usb_rcvbulkpipe(rtwusb->udev, rtwusb->pipe_in), 701 rxcb->rx_skb->data, RTW_USB_MAX_RECVBUF_SZ, 702 rtw_usb_read_port_complete, rxcb); 703 704 error = usb_submit_urb(rxcb->rx_urb, gfp); 705 if (error) { 706 skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb); 707 708 if (error != -ENODEV) 709 rtw_err(rtwdev, "Err sending rx data urb %d\n", 710 error); 711 712 if (error == -ENOMEM) 713 goto try_later; 714 } 715 716 return; 717 718 try_later: 719 rxcb->rx_skb = NULL; 720 queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work); 721 } 722 723 static void rtw_usb_rx_resubmit_work(struct work_struct *work) 724 { 725 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_urb_work); 726 struct rx_usb_ctrl_block *rxcb; 727 int i; 728 729 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 730 rxcb = &rtwusb->rx_cb[i]; 731 732 if (!rxcb->rx_skb) 733 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 734 } 735 } 736 737 static void rtw_usb_read_port_complete(struct urb *urb) 738 { 739 struct rx_usb_ctrl_block *rxcb = urb->context; 740 struct rtw_dev *rtwdev = rxcb->rtwdev; 741 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 742 struct sk_buff *skb = rxcb->rx_skb; 743 744 if (urb->status == 0) { 745 if (urb->actual_length >= RTW_USB_MAX_RECVBUF_SZ || 746 urb->actual_length < 24) { 747 rtw_err(rtwdev, "failed to get urb length:%d\n", 748 urb->actual_length); 749 skb_queue_tail(&rtwusb->rx_free_queue, skb); 750 } else { 751 skb_put(skb, urb->actual_length); 752 skb_queue_tail(&rtwusb->rx_queue, skb); 753 queue_work(rtwusb->rxwq, &rtwusb->rx_work); 754 } 755 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 756 } else { 757 skb_queue_tail(&rtwusb->rx_free_queue, skb); 758 759 switch (urb->status) { 760 case -EINVAL: 761 case -EPIPE: 762 case -ENODEV: 763 case -ESHUTDOWN: 764 case -ENOENT: 765 case -EPROTO: 766 case -EILSEQ: 767 case -ETIME: 768 case -ECOMM: 769 case -EOVERFLOW: 770 case -EINPROGRESS: 771 break; 772 default: 773 rtw_err(rtwdev, "status %d\n", urb->status); 774 break; 775 } 776 } 777 } 778 779 static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb) 780 { 781 struct rx_usb_ctrl_block *rxcb; 782 int i; 783 784 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 785 rxcb = &rtwusb->rx_cb[i]; 786 usb_kill_urb(rxcb->rx_urb); 787 } 788 } 789 790 static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb) 791 { 792 struct rx_usb_ctrl_block *rxcb; 793 int i; 794 795 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 796 rxcb = &rtwusb->rx_cb[i]; 797 usb_kill_urb(rxcb->rx_urb); 798 usb_free_urb(rxcb->rx_urb); 799 } 800 } 801 802 static int rtw_usb_alloc_rx_bufs(struct rtw_usb *rtwusb) 803 { 804 int i; 805 806 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 807 struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; 808 809 rxcb->rtwdev = rtwusb->rtwdev; 810 rxcb->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 811 if (!rxcb->rx_urb) 812 goto err; 813 } 814 815 return 0; 816 err: 817 rtw_usb_free_rx_bufs(rtwusb); 818 return -ENOMEM; 819 } 820 821 static int rtw_usb_setup(struct rtw_dev *rtwdev) 822 { 823 /* empty function for rtw_hci_ops */ 824 return 0; 825 } 826 827 static int rtw_usb_start(struct rtw_dev *rtwdev) 828 { 829 return 0; 830 } 831 832 static void rtw_usb_stop(struct rtw_dev *rtwdev) 833 { 834 } 835 836 static void rtw_usb_deep_ps(struct rtw_dev *rtwdev, bool enter) 837 { 838 /* empty function for rtw_hci_ops */ 839 } 840 841 static void rtw_usb_link_ps(struct rtw_dev *rtwdev, bool enter) 842 { 843 /* empty function for rtw_hci_ops */ 844 } 845 846 static void rtw_usb_init_burst_pkt_len(struct rtw_dev *rtwdev) 847 { 848 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 849 enum usb_device_speed speed = rtwusb->udev->speed; 850 u8 rxdma, burst_size; 851 852 rxdma = BIT_DMA_BURST_CNT | BIT_DMA_MODE; 853 854 if (speed == USB_SPEED_SUPER) 855 burst_size = BIT_DMA_BURST_SIZE_1024; 856 else if (speed == USB_SPEED_HIGH) 857 burst_size = BIT_DMA_BURST_SIZE_512; 858 else 859 burst_size = BIT_DMA_BURST_SIZE_64; 860 861 u8p_replace_bits(&rxdma, burst_size, BIT_DMA_BURST_SIZE); 862 863 rtw_write8(rtwdev, REG_RXDMA_MODE, rxdma); 864 rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); 865 } 866 867 static void rtw_usb_interface_cfg(struct rtw_dev *rtwdev) 868 { 869 rtw_usb_init_burst_pkt_len(rtwdev); 870 } 871 872 static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable) 873 { 874 u8 size, timeout; 875 u16 val16; 876 877 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 878 rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); 879 880 if (enable) { 881 size = 0x5; 882 timeout = 0x20; 883 } else { 884 size = 0x0; 885 timeout = 0x1; 886 } 887 val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | 888 u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); 889 890 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 891 } 892 893 static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable) 894 { 895 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 896 u8 size, timeout; 897 u16 val16; 898 899 if (!enable) { 900 size = 0x0; 901 timeout = 0x1; 902 } else if (rtwusb->udev->speed == USB_SPEED_SUPER) { 903 size = 0x6; 904 timeout = 0x1a; 905 } else { 906 size = 0x5; 907 timeout = 0x20; 908 } 909 910 val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | 911 u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); 912 913 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 914 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 915 } 916 917 static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) 918 { 919 switch (rtwdev->chip->id) { 920 case RTW_CHIP_TYPE_8822C: 921 case RTW_CHIP_TYPE_8822B: 922 case RTW_CHIP_TYPE_8821C: 923 case RTW_CHIP_TYPE_8814A: 924 rtw_usb_dynamic_rx_agg_v1(rtwdev, enable); 925 break; 926 case RTW_CHIP_TYPE_8821A: 927 case RTW_CHIP_TYPE_8812A: 928 rtw_usb_dynamic_rx_agg_v2(rtwdev, enable); 929 break; 930 case RTW_CHIP_TYPE_8723D: 931 /* Doesn't like aggregation. */ 932 break; 933 case RTW_CHIP_TYPE_8703B: 934 /* Likely not found in USB devices. */ 935 break; 936 } 937 } 938 939 static const struct rtw_hci_ops rtw_usb_ops = { 940 .tx_write = rtw_usb_tx_write, 941 .tx_kick_off = rtw_usb_tx_kick_off, 942 .setup = rtw_usb_setup, 943 .start = rtw_usb_start, 944 .stop = rtw_usb_stop, 945 .deep_ps = rtw_usb_deep_ps, 946 .link_ps = rtw_usb_link_ps, 947 .interface_cfg = rtw_usb_interface_cfg, 948 .dynamic_rx_agg = rtw_usb_dynamic_rx_agg, 949 .write_firmware_page = rtw_usb_write_firmware_page, 950 951 .write8 = rtw_usb_write8, 952 .write16 = rtw_usb_write16, 953 .write32 = rtw_usb_write32, 954 .read8 = rtw_usb_read8, 955 .read16 = rtw_usb_read16, 956 .read32 = rtw_usb_read32, 957 958 .write_data_rsvd_page = rtw_usb_write_data_rsvd_page, 959 .write_data_h2c = rtw_usb_write_data_h2c, 960 }; 961 962 static int rtw_usb_init_rx(struct rtw_dev *rtwdev) 963 { 964 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 965 struct sk_buff *rx_skb; 966 int i; 967 968 rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0); 969 if (!rtwusb->rxwq) { 970 rtw_err(rtwdev, "failed to create RX work queue\n"); 971 return -ENOMEM; 972 } 973 974 skb_queue_head_init(&rtwusb->rx_queue); 975 skb_queue_head_init(&rtwusb->rx_free_queue); 976 977 INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); 978 INIT_WORK(&rtwusb->rx_urb_work, rtw_usb_rx_resubmit_work); 979 980 for (i = 0; i < RTW_USB_RX_SKB_NUM; i++) { 981 rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_KERNEL); 982 if (rx_skb) 983 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 984 } 985 986 return 0; 987 } 988 989 static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) 990 { 991 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 992 int i; 993 994 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 995 struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; 996 997 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_KERNEL); 998 } 999 } 1000 1001 static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) 1002 { 1003 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1004 1005 skb_queue_purge(&rtwusb->rx_queue); 1006 1007 destroy_workqueue(rtwusb->rxwq); 1008 1009 skb_queue_purge(&rtwusb->rx_free_queue); 1010 } 1011 1012 static int rtw_usb_init_tx(struct rtw_dev *rtwdev) 1013 { 1014 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1015 int i; 1016 1017 rtwusb->txwq = create_singlethread_workqueue("rtw88_usb: tx wq"); 1018 if (!rtwusb->txwq) { 1019 rtw_err(rtwdev, "failed to create TX work queue\n"); 1020 return -ENOMEM; 1021 } 1022 1023 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) 1024 skb_queue_head_init(&rtwusb->tx_queue[i]); 1025 1026 INIT_WORK(&rtwusb->tx_work, rtw_usb_tx_handler); 1027 1028 return 0; 1029 } 1030 1031 static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev) 1032 { 1033 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1034 1035 destroy_workqueue(rtwusb->txwq); 1036 rtw_usb_tx_queue_purge(rtwusb); 1037 } 1038 1039 static int rtw_usb_intf_init(struct rtw_dev *rtwdev, 1040 struct usb_interface *intf) 1041 { 1042 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1043 struct usb_device *udev = usb_get_dev(interface_to_usbdev(intf)); 1044 int ret; 1045 1046 rtwusb->udev = udev; 1047 ret = rtw_usb_parse(rtwdev, intf); 1048 if (ret) 1049 return ret; 1050 1051 rtwusb->usb_data = kcalloc(RTW_USB_MAX_RXTX_COUNT, sizeof(u32), 1052 GFP_KERNEL); 1053 if (!rtwusb->usb_data) 1054 return -ENOMEM; 1055 1056 usb_set_intfdata(intf, rtwdev->hw); 1057 1058 SET_IEEE80211_DEV(rtwdev->hw, &intf->dev); 1059 spin_lock_init(&rtwusb->usb_lock); 1060 1061 return 0; 1062 } 1063 1064 static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev, 1065 struct usb_interface *intf) 1066 { 1067 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1068 1069 usb_put_dev(rtwusb->udev); 1070 kfree(rtwusb->usb_data); 1071 usb_set_intfdata(intf, NULL); 1072 } 1073 1074 static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev) 1075 { 1076 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1077 enum usb_device_speed cur_speed = rtwusb->udev->speed; 1078 u8 hci_opt; 1079 1080 if (cur_speed == USB_SPEED_HIGH) { 1081 hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL); 1082 1083 if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) { 1084 rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8); 1085 rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2); 1086 rtw_write8(rtwdev, REG_ACLK_MON, 0x1); 1087 rtw_write8(rtwdev, 0x3d, 0x3); 1088 /* usb disconnect */ 1089 rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80); 1090 return 1; 1091 } 1092 } else if (cur_speed == USB_SPEED_SUPER) { 1093 rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1)); 1094 rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0)); 1095 } 1096 1097 return 0; 1098 } 1099 1100 static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev) 1101 { 1102 enum usb_device_speed cur_speed; 1103 u8 id = rtwdev->chip->id; 1104 bool can_switch; 1105 u32 pad_ctrl2; 1106 1107 if (rtw_read8(rtwdev, REG_SYS_CFG2 + 3) == 0x20) 1108 cur_speed = USB_SPEED_SUPER; 1109 else 1110 cur_speed = USB_SPEED_HIGH; 1111 1112 if (cur_speed == USB_SPEED_SUPER) 1113 return 0; 1114 1115 pad_ctrl2 = rtw_read32(rtwdev, REG_PAD_CTRL2); 1116 1117 can_switch = !!(pad_ctrl2 & (BIT_MASK_USB23_SW_MODE_V1 | 1118 BIT_USB3_USB2_TRANSITION)); 1119 1120 if (!can_switch) { 1121 rtw_dbg(rtwdev, RTW_DBG_USB, 1122 "Switching to USB 3 mode unsupported by the chip\n"); 1123 return 0; 1124 } 1125 1126 /* At this point cur_speed is USB_SPEED_HIGH. If we already tried 1127 * to switch don't try again - it's a USB 2 port. 1128 */ 1129 if (u32_get_bits(pad_ctrl2, BIT_MASK_USB23_SW_MODE_V1) == BIT_USB_MODE_U3) 1130 return 0; 1131 1132 /* Enable IO wrapper timeout */ 1133 if (id == RTW_CHIP_TYPE_8822B || id == RTW_CHIP_TYPE_8821C) 1134 rtw_write8_clr(rtwdev, REG_SW_MDIO + 3, BIT(0)); 1135 1136 u32p_replace_bits(&pad_ctrl2, BIT_USB_MODE_U3, BIT_MASK_USB23_SW_MODE_V1); 1137 pad_ctrl2 |= BIT_RSM_EN_V1; 1138 1139 rtw_write32(rtwdev, REG_PAD_CTRL2, pad_ctrl2); 1140 rtw_write8(rtwdev, REG_PAD_CTRL2 + 1, 4); 1141 1142 rtw_write16_set(rtwdev, REG_SYS_PW_CTRL, BIT_APFM_OFFMAC); 1143 usleep_range(1000, 1001); 1144 rtw_write32_set(rtwdev, REG_PAD_CTRL2, BIT_NO_PDN_CHIPOFF_V1); 1145 1146 return 1; 1147 } 1148 1149 static bool rtw_usb3_chip_old(u8 chip_id) 1150 { 1151 return chip_id == RTW_CHIP_TYPE_8812A || 1152 chip_id == RTW_CHIP_TYPE_8814A; 1153 } 1154 1155 static bool rtw_usb3_chip_new(u8 chip_id) 1156 { 1157 return chip_id == RTW_CHIP_TYPE_8822C || 1158 chip_id == RTW_CHIP_TYPE_8822B; 1159 } 1160 1161 static int rtw_usb_switch_mode(struct rtw_dev *rtwdev) 1162 { 1163 u8 id = rtwdev->chip->id; 1164 1165 if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id)) 1166 return 0; 1167 1168 if (!rtwdev->efuse.usb_mode_switch) { 1169 rtw_dbg(rtwdev, RTW_DBG_USB, 1170 "Switching to USB 3 mode disabled by chip's efuse\n"); 1171 return 0; 1172 } 1173 1174 if (!rtw_switch_usb_mode) { 1175 rtw_dbg(rtwdev, RTW_DBG_USB, 1176 "Switching to USB 3 mode disabled by module parameter\n"); 1177 return 0; 1178 } 1179 1180 if (rtw_usb3_chip_old(id)) 1181 return rtw_usb_switch_mode_old(rtwdev); 1182 else 1183 return rtw_usb_switch_mode_new(rtwdev); 1184 } 1185 1186 #define USB_REG_PAGE 0xf4 1187 #define USB_PHY_PAGE0 0x9b 1188 #define USB_PHY_PAGE1 0xbb 1189 1190 static void rtw_usb_phy_write(struct rtw_dev *rtwdev, u8 addr, u16 data, 1191 enum usb_device_speed speed) 1192 { 1193 if (speed == USB_SPEED_SUPER) { 1194 rtw_write8(rtwdev, REG_USB3_PHY_DAT_L, data & 0xff); 1195 rtw_write8(rtwdev, REG_USB3_PHY_DAT_H, data >> 8); 1196 rtw_write8(rtwdev, REG_USB3_PHY_ADR, addr | BIT_USB3_PHY_ADR_WR); 1197 } else if (speed == USB_SPEED_HIGH) { 1198 rtw_write8(rtwdev, REG_USB2_PHY_DAT, data); 1199 rtw_write8(rtwdev, REG_USB2_PHY_ADR, addr); 1200 rtw_write8(rtwdev, REG_USB2_PHY_CMD, BIT_USB2_PHY_CMD_TRG); 1201 } 1202 } 1203 1204 static void rtw_usb_page_switch(struct rtw_dev *rtwdev, 1205 enum usb_device_speed speed, u8 page) 1206 { 1207 if (speed == USB_SPEED_SUPER) 1208 return; 1209 1210 rtw_usb_phy_write(rtwdev, USB_REG_PAGE, page, speed); 1211 } 1212 1213 static void rtw_usb_phy_cfg(struct rtw_dev *rtwdev, 1214 enum usb_device_speed speed) 1215 { 1216 const struct rtw_intf_phy_para *para = NULL; 1217 u16 offset; 1218 1219 if (!rtwdev->chip->intf_table) 1220 return; 1221 1222 if (speed == USB_SPEED_SUPER) 1223 para = rtwdev->chip->intf_table->usb3_para; 1224 else if (speed == USB_SPEED_HIGH) 1225 para = rtwdev->chip->intf_table->usb2_para; 1226 1227 if (!para) 1228 return; 1229 1230 for ( ; para->offset != 0xffff; para++) { 1231 if (!(para->cut_mask & BIT(rtwdev->hal.cut_version))) 1232 continue; 1233 1234 offset = para->offset; 1235 1236 if (para->ip_sel == RTW_IP_SEL_MAC) { 1237 rtw_write8(rtwdev, offset, para->value); 1238 } else { 1239 if (offset > 0x100) 1240 rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE1); 1241 else 1242 rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE0); 1243 1244 offset &= 0xff; 1245 1246 rtw_usb_phy_write(rtwdev, offset, para->value, speed); 1247 } 1248 } 1249 } 1250 1251 int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1252 { 1253 struct rtw_dev *rtwdev; 1254 struct ieee80211_hw *hw; 1255 struct rtw_usb *rtwusb; 1256 int drv_data_size; 1257 int ret; 1258 1259 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_usb); 1260 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1261 if (!hw) 1262 return -ENOMEM; 1263 1264 rtwdev = hw->priv; 1265 rtwdev->hw = hw; 1266 rtwdev->dev = &intf->dev; 1267 rtwdev->chip = (struct rtw_chip_info *)id->driver_info; 1268 rtwdev->hci.ops = &rtw_usb_ops; 1269 rtwdev->hci.type = RTW_HCI_TYPE_USB; 1270 1271 rtwusb = rtw_get_usb_priv(rtwdev); 1272 rtwusb->rtwdev = rtwdev; 1273 1274 ret = rtw_usb_alloc_rx_bufs(rtwusb); 1275 if (ret) 1276 goto err_release_hw; 1277 1278 ret = rtw_core_init(rtwdev); 1279 if (ret) 1280 goto err_free_rx_bufs; 1281 1282 ret = rtw_usb_intf_init(rtwdev, intf); 1283 if (ret) { 1284 rtw_err(rtwdev, "failed to init USB interface\n"); 1285 goto err_deinit_core; 1286 } 1287 1288 ret = rtw_usb_init_tx(rtwdev); 1289 if (ret) { 1290 rtw_err(rtwdev, "failed to init USB TX\n"); 1291 goto err_destroy_usb; 1292 } 1293 1294 ret = rtw_usb_init_rx(rtwdev); 1295 if (ret) { 1296 rtw_err(rtwdev, "failed to init USB RX\n"); 1297 goto err_destroy_txwq; 1298 } 1299 1300 ret = rtw_chip_info_setup(rtwdev); 1301 if (ret) { 1302 rtw_err(rtwdev, "failed to setup chip information\n"); 1303 goto err_destroy_rxwq; 1304 } 1305 1306 rtw_usb_phy_cfg(rtwdev, USB_SPEED_HIGH); 1307 rtw_usb_phy_cfg(rtwdev, USB_SPEED_SUPER); 1308 1309 ret = rtw_usb_switch_mode(rtwdev); 1310 if (ret) { 1311 /* Not a fail, but we do need to skip rtw_register_hw. */ 1312 rtw_dbg(rtwdev, RTW_DBG_USB, "switching to USB 3 mode\n"); 1313 ret = 0; 1314 goto err_destroy_rxwq; 1315 } 1316 1317 ret = rtw_register_hw(rtwdev, rtwdev->hw); 1318 if (ret) { 1319 rtw_err(rtwdev, "failed to register hw\n"); 1320 goto err_destroy_rxwq; 1321 } 1322 1323 rtw_usb_setup_rx(rtwdev); 1324 1325 return 0; 1326 1327 err_destroy_rxwq: 1328 rtw_usb_deinit_rx(rtwdev); 1329 1330 err_destroy_txwq: 1331 rtw_usb_deinit_tx(rtwdev); 1332 1333 err_destroy_usb: 1334 rtw_usb_intf_deinit(rtwdev, intf); 1335 1336 err_deinit_core: 1337 rtw_core_deinit(rtwdev); 1338 1339 err_free_rx_bufs: 1340 rtw_usb_free_rx_bufs(rtwusb); 1341 1342 err_release_hw: 1343 ieee80211_free_hw(hw); 1344 1345 return ret; 1346 } 1347 EXPORT_SYMBOL(rtw_usb_probe); 1348 1349 void rtw_usb_disconnect(struct usb_interface *intf) 1350 { 1351 struct ieee80211_hw *hw = usb_get_intfdata(intf); 1352 struct rtw_dev *rtwdev; 1353 struct rtw_usb *rtwusb; 1354 1355 if (!hw) 1356 return; 1357 1358 rtwdev = hw->priv; 1359 rtwusb = rtw_get_usb_priv(rtwdev); 1360 1361 rtw_usb_cancel_rx_bufs(rtwusb); 1362 1363 rtw_unregister_hw(rtwdev, hw); 1364 rtw_usb_deinit_tx(rtwdev); 1365 rtw_usb_deinit_rx(rtwdev); 1366 1367 if (rtwusb->udev->state != USB_STATE_NOTATTACHED) 1368 usb_reset_device(rtwusb->udev); 1369 1370 rtw_usb_free_rx_bufs(rtwusb); 1371 1372 rtw_usb_intf_deinit(rtwdev, intf); 1373 rtw_core_deinit(rtwdev); 1374 ieee80211_free_hw(hw); 1375 } 1376 EXPORT_SYMBOL(rtw_usb_disconnect); 1377 1378 MODULE_AUTHOR("Realtek Corporation"); 1379 MODULE_DESCRIPTION("Realtek USB 802.11ac wireless driver"); 1380 MODULE_LICENSE("Dual BSD/GPL"); 1381