1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2025 Realtek Corporation 3 */ 4 5 #include <linux/usb.h> 6 #include "debug.h" 7 #include "mac.h" 8 #include "reg.h" 9 #include "txrx.h" 10 #include "usb.h" 11 12 static void rtw89_usb_read_port_complete(struct urb *urb); 13 14 static void rtw89_usb_vendorreq(struct rtw89_dev *rtwdev, u32 addr, 15 void *data, u16 len, u8 reqtype) 16 { 17 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 18 struct usb_device *udev = rtwusb->udev; 19 unsigned int pipe; 20 u16 value, index; 21 int attempt, ret; 22 23 if (test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags)) 24 return; 25 26 value = u32_get_bits(addr, GENMASK(15, 0)); 27 index = u32_get_bits(addr, GENMASK(23, 16)); 28 29 for (attempt = 0; attempt < 10; attempt++) { 30 *rtwusb->vendor_req_buf = 0; 31 32 if (reqtype == RTW89_USB_VENQT_READ) { 33 pipe = usb_rcvctrlpipe(udev, 0); 34 } else { /* RTW89_USB_VENQT_WRITE */ 35 pipe = usb_sndctrlpipe(udev, 0); 36 37 memcpy(rtwusb->vendor_req_buf, data, len); 38 } 39 40 ret = usb_control_msg(udev, pipe, RTW89_USB_VENQT, reqtype, 41 value, index, rtwusb->vendor_req_buf, 42 len, 500); 43 44 if (ret == len) { /* Success */ 45 atomic_set(&rtwusb->continual_io_error, 0); 46 47 if (reqtype == RTW89_USB_VENQT_READ) 48 memcpy(data, rtwusb->vendor_req_buf, len); 49 50 break; 51 } 52 53 if (ret == -ESHUTDOWN || ret == -ENODEV) 54 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 55 else if (ret < 0) 56 rtw89_warn(rtwdev, 57 "usb %s%u 0x%x fail ret=%d value=0x%x attempt=%d\n", 58 str_read_write(reqtype == RTW89_USB_VENQT_READ), 59 len * 8, addr, ret, 60 le32_to_cpup(rtwusb->vendor_req_buf), 61 attempt); 62 else if (ret > 0 && reqtype == RTW89_USB_VENQT_READ) 63 memcpy(data, rtwusb->vendor_req_buf, len); 64 65 if (atomic_inc_return(&rtwusb->continual_io_error) > 4) { 66 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 67 break; 68 } 69 } 70 } 71 72 static u32 rtw89_usb_read_cmac(struct rtw89_dev *rtwdev, u32 addr) 73 { 74 u32 addr32, val32, shift; 75 __le32 data = 0; 76 int count; 77 78 addr32 = addr & ~0x3; 79 shift = (addr & 0x3) * 8; 80 81 for (count = 0; ; count++) { 82 rtw89_usb_vendorreq(rtwdev, addr32, &data, 4, 83 RTW89_USB_VENQT_READ); 84 85 val32 = le32_to_cpu(data); 86 if (val32 != RTW89_R32_DEAD) 87 break; 88 89 if (count >= MAC_REG_POOL_COUNT) { 90 rtw89_warn(rtwdev, "%s: addr %#x = %#x\n", 91 __func__, addr32, val32); 92 val32 = RTW89_R32_DEAD; 93 break; 94 } 95 96 rtw89_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 97 } 98 99 return val32 >> shift; 100 } 101 102 static u8 rtw89_usb_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 103 { 104 u8 data = 0; 105 106 if (ACCESS_CMAC(addr)) 107 return rtw89_usb_read_cmac(rtwdev, addr); 108 109 rtw89_usb_vendorreq(rtwdev, addr, &data, 1, RTW89_USB_VENQT_READ); 110 111 return data; 112 } 113 114 static u16 rtw89_usb_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 115 { 116 __le16 data = 0; 117 118 if (ACCESS_CMAC(addr)) 119 return rtw89_usb_read_cmac(rtwdev, addr); 120 121 rtw89_usb_vendorreq(rtwdev, addr, &data, 2, RTW89_USB_VENQT_READ); 122 123 return le16_to_cpu(data); 124 } 125 126 static u32 rtw89_usb_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 127 { 128 __le32 data = 0; 129 130 if (ACCESS_CMAC(addr)) 131 return rtw89_usb_read_cmac(rtwdev, addr); 132 133 rtw89_usb_vendorreq(rtwdev, addr, &data, 4, 134 RTW89_USB_VENQT_READ); 135 136 return le32_to_cpu(data); 137 } 138 139 static void rtw89_usb_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 val) 140 { 141 u8 data = val; 142 143 rtw89_usb_vendorreq(rtwdev, addr, &data, 1, RTW89_USB_VENQT_WRITE); 144 } 145 146 static void rtw89_usb_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 val) 147 { 148 __le16 data = cpu_to_le16(val); 149 150 rtw89_usb_vendorreq(rtwdev, addr, &data, 2, RTW89_USB_VENQT_WRITE); 151 } 152 153 static void rtw89_usb_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 val) 154 { 155 __le32 data = cpu_to_le32(val); 156 157 rtw89_usb_vendorreq(rtwdev, addr, &data, 4, RTW89_USB_VENQT_WRITE); 158 } 159 160 static u32 161 rtw89_usb_ops_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 162 u8 txch) 163 { 164 if (txch == RTW89_TXCH_CH12) 165 return 1; 166 167 return 42; /* TODO some kind of calculation? */ 168 } 169 170 static void rtw89_usb_write_port_complete(struct urb *urb) 171 { 172 struct rtw89_usb_tx_ctrl_block *txcb = urb->context; 173 struct rtw89_dev *rtwdev = txcb->rtwdev; 174 struct ieee80211_tx_info *info; 175 struct rtw89_txwd_body *txdesc; 176 struct sk_buff *skb; 177 u32 txdesc_size; 178 179 while (true) { 180 skb = skb_dequeue(&txcb->tx_ack_queue); 181 if (!skb) 182 break; 183 184 if (txcb->txch == RTW89_TXCH_CH12) { 185 dev_kfree_skb_any(skb); 186 continue; 187 } 188 189 txdesc = (struct rtw89_txwd_body *)skb->data; 190 191 txdesc_size = rtwdev->chip->txwd_body_size; 192 if (le32_get_bits(txdesc->dword0, RTW89_TXWD_BODY0_WD_INFO_EN)) 193 txdesc_size += rtwdev->chip->txwd_info_size; 194 195 skb_pull(skb, txdesc_size); 196 197 if (rtw89_is_tx_rpt_skb(rtwdev, skb)) { 198 if (urb->status == 0) 199 rtw89_tx_rpt_skb_add(rtwdev, skb); 200 else 201 rtw89_tx_rpt_tx_status(rtwdev, skb, 202 RTW89_TX_MACID_DROP); 203 continue; 204 } 205 206 info = IEEE80211_SKB_CB(skb); 207 ieee80211_tx_info_clear_status(info); 208 209 if (urb->status == 0) { 210 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 211 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 212 else 213 info->flags |= IEEE80211_TX_STAT_ACK; 214 } 215 216 ieee80211_tx_status_irqsafe(rtwdev->hw, skb); 217 } 218 219 switch (urb->status) { 220 case 0: 221 case -EPIPE: 222 case -EPROTO: 223 case -EINPROGRESS: 224 case -ENOENT: 225 case -ECONNRESET: 226 break; 227 default: 228 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 229 break; 230 } 231 232 kfree(txcb); 233 } 234 235 static int rtw89_usb_write_port(struct rtw89_dev *rtwdev, u8 ch_dma, 236 void *data, int len, void *context) 237 { 238 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 239 const struct rtw89_usb_info *info = rtwusb->info; 240 struct usb_device *usbd = rtwusb->udev; 241 struct urb *urb; 242 u8 bulkout_id = info->bulkout_id[ch_dma]; 243 unsigned int pipe; 244 int ret; 245 246 if (test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags)) 247 return -ENODEV; 248 249 urb = usb_alloc_urb(0, GFP_ATOMIC); 250 if (!urb) 251 return -ENOMEM; 252 253 pipe = usb_sndbulkpipe(usbd, rtwusb->out_pipe[bulkout_id]); 254 255 usb_fill_bulk_urb(urb, usbd, pipe, data, len, 256 rtw89_usb_write_port_complete, context); 257 urb->transfer_flags |= URB_ZERO_PACKET; 258 usb_anchor_urb(urb, &rtwusb->tx_submitted); 259 260 ret = usb_submit_urb(urb, GFP_ATOMIC); 261 if (ret) 262 usb_unanchor_urb(urb); 263 264 /* release our reference to this URB, USB core will eventually free it 265 * on its own after the completion callback finishes (or URB is 266 * immediately freed here if its submission has failed) 267 */ 268 usb_free_urb(urb); 269 270 if (ret == -ENODEV) 271 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 272 273 return ret; 274 } 275 276 static void rtw89_usb_tx_free_skb(struct rtw89_dev *rtwdev, u8 txch, 277 struct sk_buff *skb) 278 { 279 if (txch == RTW89_TXCH_CH12) 280 dev_kfree_skb_any(skb); 281 else 282 ieee80211_free_txskb(rtwdev->hw, skb); 283 } 284 285 static void rtw89_usb_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 286 { 287 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 288 struct rtw89_usb_tx_ctrl_block *txcb; 289 struct sk_buff *skb; 290 int ret; 291 292 while (true) { 293 skb = skb_dequeue(&rtwusb->tx_queue[txch]); 294 if (!skb) 295 break; 296 297 txcb = kmalloc(sizeof(*txcb), GFP_ATOMIC); 298 if (!txcb) { 299 rtw89_usb_tx_free_skb(rtwdev, txch, skb); 300 continue; 301 } 302 303 txcb->rtwdev = rtwdev; 304 txcb->txch = txch; 305 skb_queue_head_init(&txcb->tx_ack_queue); 306 307 skb_queue_tail(&txcb->tx_ack_queue, skb); 308 309 ret = rtw89_usb_write_port(rtwdev, txch, skb->data, skb->len, 310 txcb); 311 if (ret) { 312 if (ret != -ENODEV) 313 rtw89_err(rtwdev, "write port txch %d failed: %d\n", 314 txch, ret); 315 316 skb_dequeue(&txcb->tx_ack_queue); 317 kfree(txcb); 318 rtw89_usb_tx_free_skb(rtwdev, txch, skb); 319 } 320 } 321 } 322 323 static int rtw89_usb_tx_write_fwcmd(struct rtw89_dev *rtwdev, 324 struct rtw89_core_tx_request *tx_req) 325 { 326 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 327 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 328 struct sk_buff *skb = tx_req->skb; 329 struct sk_buff *skb512; 330 u32 txdesc_size = rtwdev->chip->h2c_desc_size; 331 void *txdesc; 332 333 if (((desc_info->pkt_size + txdesc_size) % 512) == 0) { 334 rtw89_debug(rtwdev, RTW89_DBG_HCI, "avoiding multiple of 512\n"); 335 336 skb512 = dev_alloc_skb(txdesc_size + desc_info->pkt_size + 337 RTW89_USB_MOD512_PADDING); 338 if (!skb512) { 339 rtw89_err(rtwdev, "%s: failed to allocate skb\n", 340 __func__); 341 342 return -ENOMEM; 343 } 344 345 skb_pull(skb512, txdesc_size); 346 skb_put_data(skb512, skb->data, skb->len); 347 skb_put_zero(skb512, RTW89_USB_MOD512_PADDING); 348 349 dev_kfree_skb_any(skb); 350 skb = skb512; 351 tx_req->skb = skb512; 352 353 desc_info->pkt_size += RTW89_USB_MOD512_PADDING; 354 } 355 356 txdesc = skb_push(skb, txdesc_size); 357 memset(txdesc, 0, txdesc_size); 358 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 359 360 skb_queue_tail(&rtwusb->tx_queue[desc_info->ch_dma], skb); 361 362 return 0; 363 } 364 365 static int rtw89_usb_ops_tx_write(struct rtw89_dev *rtwdev, 366 struct rtw89_core_tx_request *tx_req) 367 { 368 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 369 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 370 struct rtw89_tx_skb_data *skb_data; 371 struct sk_buff *skb = tx_req->skb; 372 struct rtw89_txwd_body *txdesc; 373 u32 txdesc_size; 374 375 if ((desc_info->ch_dma == RTW89_TXCH_CH12 || 376 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 377 (desc_info->ch_dma != RTW89_TXCH_CH12 || 378 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 379 rtw89_err(rtwdev, "dma channel %d/TX type %d mismatch\n", 380 desc_info->ch_dma, tx_req->tx_type); 381 return -EINVAL; 382 } 383 384 if (desc_info->ch_dma == RTW89_TXCH_CH12) 385 return rtw89_usb_tx_write_fwcmd(rtwdev, tx_req); 386 387 txdesc_size = rtwdev->chip->txwd_body_size; 388 if (desc_info->en_wd_info) 389 txdesc_size += rtwdev->chip->txwd_info_size; 390 391 txdesc = skb_push(skb, txdesc_size); 392 memset(txdesc, 0, txdesc_size); 393 rtw89_chip_fill_txdesc(rtwdev, desc_info, txdesc); 394 395 le32p_replace_bits(&txdesc->dword0, 1, RTW89_TXWD_BODY0_STF_MODE); 396 397 skb_data = RTW89_TX_SKB_CB(skb); 398 if (tx_req->desc_info.sn) 399 skb_data->tx_rpt_sn = tx_req->desc_info.sn; 400 if (tx_req->desc_info.tx_cnt_lmt) 401 skb_data->tx_pkt_cnt_lmt = tx_req->desc_info.tx_cnt_lmt; 402 403 skb_queue_tail(&rtwusb->tx_queue[desc_info->ch_dma], skb); 404 405 return 0; 406 } 407 408 static void rtw89_usb_rx_handler(struct work_struct *work) 409 { 410 struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_work); 411 struct rtw89_dev *rtwdev = rtwusb->rtwdev; 412 struct rtw89_rx_desc_info desc_info; 413 struct sk_buff *rx_skb; 414 struct sk_buff *skb; 415 u32 pkt_offset; 416 int limit; 417 418 for (limit = 0; limit < 200; limit++) { 419 rx_skb = skb_dequeue(&rtwusb->rx_queue); 420 if (!rx_skb) 421 break; 422 423 if (skb_queue_len(&rtwusb->rx_queue) >= RTW89_USB_MAX_RXQ_LEN) { 424 rtw89_warn(rtwdev, "rx_queue overflow\n"); 425 goto free_or_reuse; 426 } 427 428 memset(&desc_info, 0, sizeof(desc_info)); 429 rtw89_chip_query_rxdesc(rtwdev, &desc_info, rx_skb->data, 0); 430 431 skb = rtw89_alloc_skb_for_rx(rtwdev, desc_info.pkt_size); 432 if (!skb) { 433 rtw89_debug(rtwdev, RTW89_DBG_HCI, 434 "failed to allocate RX skb of size %u\n", 435 desc_info.pkt_size); 436 goto free_or_reuse; 437 } 438 439 pkt_offset = desc_info.offset + desc_info.rxd_len; 440 441 skb_put_data(skb, rx_skb->data + pkt_offset, 442 desc_info.pkt_size); 443 444 rtw89_core_rx(rtwdev, &desc_info, skb); 445 446 free_or_reuse: 447 if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW89_USB_RX_SKB_NUM) 448 dev_kfree_skb_any(rx_skb); 449 else 450 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 451 } 452 453 if (limit == 200) { 454 rtw89_debug(rtwdev, RTW89_DBG_HCI, 455 "left %d rx skbs in the queue for later\n", 456 skb_queue_len(&rtwusb->rx_queue)); 457 queue_work(rtwusb->rxwq, &rtwusb->rx_work); 458 } 459 } 460 461 static void rtw89_usb_rx_resubmit(struct rtw89_usb *rtwusb, 462 struct rtw89_usb_rx_ctrl_block *rxcb, 463 gfp_t gfp) 464 { 465 struct rtw89_dev *rtwdev = rtwusb->rtwdev; 466 struct sk_buff *rx_skb; 467 int ret; 468 469 rx_skb = skb_dequeue(&rtwusb->rx_free_queue); 470 if (!rx_skb) 471 rx_skb = alloc_skb(RTW89_USB_RECVBUF_SZ, gfp); 472 473 if (!rx_skb) 474 goto try_later; 475 476 skb_reset_tail_pointer(rx_skb); 477 rx_skb->len = 0; 478 479 rxcb->rx_skb = rx_skb; 480 481 usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev, 482 usb_rcvbulkpipe(rtwusb->udev, rtwusb->in_pipe), 483 rxcb->rx_skb->data, RTW89_USB_RECVBUF_SZ, 484 rtw89_usb_read_port_complete, rxcb); 485 486 ret = usb_submit_urb(rxcb->rx_urb, gfp); 487 if (ret) { 488 skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb); 489 490 if (ret == -ENODEV) 491 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 492 else 493 rtw89_err(rtwdev, "Err sending rx data urb %d\n", ret); 494 495 if (ret == -ENOMEM) 496 goto try_later; 497 } 498 499 return; 500 501 try_later: 502 rxcb->rx_skb = NULL; 503 queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work); 504 } 505 506 static void rtw89_usb_rx_resubmit_work(struct work_struct *work) 507 { 508 struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_urb_work); 509 struct rtw89_usb_rx_ctrl_block *rxcb; 510 int i; 511 512 for (i = 0; i < RTW89_USB_RXCB_NUM; i++) { 513 rxcb = &rtwusb->rx_cb[i]; 514 515 if (!rxcb->rx_skb) 516 rtw89_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 517 } 518 } 519 520 static void rtw89_usb_read_port_complete(struct urb *urb) 521 { 522 struct rtw89_usb_rx_ctrl_block *rxcb = urb->context; 523 struct rtw89_dev *rtwdev = rxcb->rtwdev; 524 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 525 struct sk_buff *skb = rxcb->rx_skb; 526 527 if (urb->status == 0) { 528 if (urb->actual_length > urb->transfer_buffer_length || 529 urb->actual_length < sizeof(struct rtw89_rxdesc_short)) { 530 rtw89_err(rtwdev, "failed to get urb length: %d\n", 531 urb->actual_length); 532 skb_queue_tail(&rtwusb->rx_free_queue, skb); 533 } else { 534 skb_put(skb, urb->actual_length); 535 skb_queue_tail(&rtwusb->rx_queue, skb); 536 queue_work(rtwusb->rxwq, &rtwusb->rx_work); 537 } 538 539 rtw89_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 540 } else { 541 skb_queue_tail(&rtwusb->rx_free_queue, skb); 542 543 if (atomic_inc_return(&rtwusb->continual_io_error) > 4) 544 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 545 546 switch (urb->status) { 547 case -EINVAL: 548 case -EPIPE: 549 case -ENODEV: 550 case -ESHUTDOWN: 551 set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags); 552 break; 553 case -EPROTO: 554 case -EILSEQ: 555 case -ETIME: 556 case -ECOMM: 557 case -EOVERFLOW: 558 case -ENOENT: 559 break; 560 case -EINPROGRESS: 561 rtw89_info(rtwdev, "URB is in progress\n"); 562 break; 563 default: 564 rtw89_err(rtwdev, "%s status %d\n", 565 __func__, urb->status); 566 break; 567 } 568 } 569 } 570 571 static void rtw89_usb_cancel_rx_bufs(struct rtw89_usb *rtwusb) 572 { 573 struct rtw89_usb_rx_ctrl_block *rxcb; 574 int i; 575 576 for (i = 0; i < RTW89_USB_RXCB_NUM; i++) { 577 rxcb = &rtwusb->rx_cb[i]; 578 usb_kill_urb(rxcb->rx_urb); 579 } 580 } 581 582 static void rtw89_usb_cancel_tx_bufs(struct rtw89_usb *rtwusb) 583 { 584 usb_kill_anchored_urbs(&rtwusb->tx_submitted); 585 } 586 587 static void rtw89_usb_free_rx_bufs(struct rtw89_usb *rtwusb) 588 { 589 struct rtw89_usb_rx_ctrl_block *rxcb; 590 int i; 591 592 for (i = 0; i < RTW89_USB_RXCB_NUM; i++) { 593 rxcb = &rtwusb->rx_cb[i]; 594 usb_free_urb(rxcb->rx_urb); 595 } 596 } 597 598 static int rtw89_usb_alloc_rx_bufs(struct rtw89_usb *rtwusb) 599 { 600 struct rtw89_usb_rx_ctrl_block *rxcb; 601 int i; 602 603 for (i = 0; i < RTW89_USB_RXCB_NUM; i++) { 604 rxcb = &rtwusb->rx_cb[i]; 605 606 rxcb->rtwdev = rtwusb->rtwdev; 607 rxcb->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 608 if (!rxcb->rx_urb) { 609 rtw89_usb_free_rx_bufs(rtwusb); 610 return -ENOMEM; 611 } 612 } 613 614 return 0; 615 } 616 617 static int rtw89_usb_init_rx(struct rtw89_dev *rtwdev) 618 { 619 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 620 struct sk_buff *rx_skb; 621 int i; 622 623 rtwusb->rxwq = alloc_workqueue("rtw89_usb: rx wq", WQ_BH, 0); 624 if (!rtwusb->rxwq) { 625 rtw89_err(rtwdev, "failed to create RX work queue\n"); 626 return -ENOMEM; 627 } 628 629 skb_queue_head_init(&rtwusb->rx_queue); 630 skb_queue_head_init(&rtwusb->rx_free_queue); 631 632 INIT_WORK(&rtwusb->rx_work, rtw89_usb_rx_handler); 633 INIT_WORK(&rtwusb->rx_urb_work, rtw89_usb_rx_resubmit_work); 634 635 for (i = 0; i < RTW89_USB_RX_SKB_NUM; i++) { 636 rx_skb = alloc_skb(RTW89_USB_RECVBUF_SZ, GFP_KERNEL); 637 if (rx_skb) 638 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 639 } 640 641 return 0; 642 } 643 644 static void rtw89_usb_deinit_rx(struct rtw89_dev *rtwdev) 645 { 646 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 647 648 skb_queue_purge(&rtwusb->rx_queue); 649 650 destroy_workqueue(rtwusb->rxwq); 651 652 skb_queue_purge(&rtwusb->rx_free_queue); 653 } 654 655 static void rtw89_usb_start_rx(struct rtw89_dev *rtwdev) 656 { 657 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 658 int i; 659 660 for (i = 0; i < RTW89_USB_RXCB_NUM; i++) 661 rtw89_usb_rx_resubmit(rtwusb, &rtwusb->rx_cb[i], GFP_KERNEL); 662 } 663 664 static void rtw89_usb_init_tx(struct rtw89_dev *rtwdev) 665 { 666 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 667 int i; 668 669 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) 670 skb_queue_head_init(&rtwusb->tx_queue[i]); 671 } 672 673 static void rtw89_usb_deinit_tx(struct rtw89_dev *rtwdev) 674 { 675 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 676 int i; 677 678 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) { 679 if (i == RTW89_TXCH_CH12) 680 skb_queue_purge(&rtwusb->tx_queue[i]); 681 else 682 ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]); 683 } 684 } 685 686 static void rtw89_usb_ops_reset(struct rtw89_dev *rtwdev) 687 { 688 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 689 690 rtw89_usb_cancel_tx_bufs(rtwusb); 691 rtw89_tx_rpt_skbs_purge(rtwdev); 692 } 693 694 static int rtw89_usb_ops_start(struct rtw89_dev *rtwdev) 695 { 696 return 0; /* Nothing to do. */ 697 } 698 699 static void rtw89_usb_ops_stop(struct rtw89_dev *rtwdev) 700 { 701 /* Nothing to do. */ 702 } 703 704 static void rtw89_usb_ops_pause(struct rtw89_dev *rtwdev, bool pause) 705 { 706 /* Nothing to do? */ 707 } 708 709 static void rtw89_usb_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 710 { 711 /* Nothing to do. */ 712 } 713 714 static int rtw89_usb_ops_deinit(struct rtw89_dev *rtwdev) 715 { 716 return 0; /* Nothing to do. */ 717 } 718 719 static int rtw89_usb_ops_mac_pre_init(struct rtw89_dev *rtwdev) 720 { 721 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 722 const struct rtw89_usb_info *info = rtwusb->info; 723 u32 val32; 724 725 rtw89_write32_set(rtwdev, info->usb_host_request_2, 726 B_AX_R_USBIO_MODE); 727 728 /* fix USB IO hang suggest by chihhanli@realtek.com */ 729 rtw89_write32_clr(rtwdev, info->usb_wlan0_1, 730 B_AX_USBRX_RST | B_AX_USBTX_RST); 731 732 val32 = rtw89_read32(rtwdev, info->hci_func_en); 733 val32 &= ~(B_AX_HCI_RXDMA_EN | B_AX_HCI_TXDMA_EN); 734 rtw89_write32(rtwdev, info->hci_func_en, val32); 735 736 val32 |= B_AX_HCI_RXDMA_EN | B_AX_HCI_TXDMA_EN; 737 rtw89_write32(rtwdev, info->hci_func_en, val32); 738 /* fix USB TRX hang suggest by chihhanli@realtek.com */ 739 740 return 0; 741 } 742 743 static int rtw89_usb_ops_mac_pre_deinit(struct rtw89_dev *rtwdev) 744 { 745 return 0; /* Nothing to do. */ 746 } 747 748 static int rtw89_usb_ops_mac_post_init(struct rtw89_dev *rtwdev) 749 { 750 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 751 const struct rtw89_usb_info *info = rtwusb->info; 752 enum usb_device_speed speed; 753 u32 ep; 754 755 rtw89_write32_clr(rtwdev, info->usb3_mac_npi_config_intf_0, 756 B_AX_SSPHY_LFPS_FILTER); 757 758 speed = rtwusb->udev->speed; 759 760 if (speed == USB_SPEED_SUPER) 761 rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB3_BULKSIZE); 762 else if (speed == USB_SPEED_HIGH) 763 rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB2_BULKSIZE); 764 else 765 rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB11_BULKSIZE); 766 767 for (ep = 5; ep <= 12; ep++) { 768 if (ep == 8) 769 continue; 770 771 rtw89_write8_mask(rtwdev, info->usb_endpoint_0, 772 B_AX_EP_IDX, ep); 773 rtw89_write8(rtwdev, info->usb_endpoint_2 + 1, NUMP); 774 } 775 776 return 0; 777 } 778 779 static void rtw89_usb_ops_recalc_int_mit(struct rtw89_dev *rtwdev) 780 { 781 /* Nothing to do. */ 782 } 783 784 static int rtw89_usb_ops_mac_lv1_rcvy(struct rtw89_dev *rtwdev, 785 enum rtw89_lv1_rcvy_step step) 786 { 787 u32 reg, mask; 788 789 switch (rtwdev->chip->chip_id) { 790 case RTL8851B: 791 case RTL8852A: 792 case RTL8852B: 793 reg = R_AX_USB_WLAN0_1; 794 mask = B_AX_USBRX_RST | B_AX_USBTX_RST; 795 break; 796 case RTL8852C: 797 reg = R_AX_USB_WLAN0_1_V1; 798 mask = B_AX_USBRX_RST_V1 | B_AX_USBTX_RST_V1; 799 break; 800 default: 801 rtw89_err(rtwdev, "%s: fix me\n", __func__); 802 return -EOPNOTSUPP; 803 } 804 805 switch (step) { 806 case RTW89_LV1_RCVY_STEP_1: 807 rtw89_write32_set(rtwdev, reg, mask); 808 809 msleep(30); 810 break; 811 case RTW89_LV1_RCVY_STEP_2: 812 rtw89_write32_clr(rtwdev, reg, mask); 813 break; 814 default: 815 return -EINVAL; 816 } 817 818 return 0; 819 } 820 821 static void rtw89_usb_ops_dump_err_status(struct rtw89_dev *rtwdev) 822 { 823 rtw89_warn(rtwdev, "%s TODO\n", __func__); 824 } 825 826 static const struct rtw89_hci_ops rtw89_usb_ops = { 827 .tx_write = rtw89_usb_ops_tx_write, 828 .tx_kick_off = rtw89_usb_ops_tx_kick_off, 829 .flush_queues = NULL, /* Not needed? */ 830 .reset = rtw89_usb_ops_reset, 831 .start = rtw89_usb_ops_start, 832 .stop = rtw89_usb_ops_stop, 833 .pause = rtw89_usb_ops_pause, 834 .switch_mode = rtw89_usb_ops_switch_mode, 835 .recalc_int_mit = rtw89_usb_ops_recalc_int_mit, 836 837 .read8 = rtw89_usb_ops_read8, 838 .read16 = rtw89_usb_ops_read16, 839 .read32 = rtw89_usb_ops_read32, 840 .write8 = rtw89_usb_ops_write8, 841 .write16 = rtw89_usb_ops_write16, 842 .write32 = rtw89_usb_ops_write32, 843 844 .mac_pre_init = rtw89_usb_ops_mac_pre_init, 845 .mac_pre_deinit = rtw89_usb_ops_mac_pre_deinit, 846 .mac_post_init = rtw89_usb_ops_mac_post_init, 847 .deinit = rtw89_usb_ops_deinit, 848 849 .check_and_reclaim_tx_resource = rtw89_usb_ops_check_and_reclaim_tx_resource, 850 .mac_lv1_rcvy = rtw89_usb_ops_mac_lv1_rcvy, 851 .dump_err_status = rtw89_usb_ops_dump_err_status, 852 .napi_poll = NULL, 853 854 .recovery_start = NULL, 855 .recovery_complete = NULL, 856 857 .ctrl_txdma_ch = NULL, 858 .ctrl_txdma_fw_ch = NULL, 859 .ctrl_trxhci = NULL, 860 .poll_txdma_ch_idle = NULL, 861 862 .clr_idx_all = NULL, 863 .clear = NULL, 864 .disable_intr = NULL, 865 .enable_intr = NULL, 866 .rst_bdram = NULL, 867 }; 868 869 static int rtw89_usb_parse(struct rtw89_dev *rtwdev, 870 struct usb_interface *intf) 871 { 872 struct usb_host_interface *host_interface = &intf->altsetting[0]; 873 struct usb_interface_descriptor *intf_desc = &host_interface->desc; 874 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 875 struct usb_endpoint_descriptor *endpoint; 876 int num_out_pipes = 0; 877 u8 num; 878 int i; 879 880 if (intf_desc->bNumEndpoints > RTW89_MAX_ENDPOINT_NUM) { 881 rtw89_err(rtwdev, "found %d endpoints, expected %d max\n", 882 intf_desc->bNumEndpoints, RTW89_MAX_ENDPOINT_NUM); 883 return -EINVAL; 884 } 885 886 for (i = 0; i < intf_desc->bNumEndpoints; i++) { 887 endpoint = &host_interface->endpoint[i].desc; 888 num = usb_endpoint_num(endpoint); 889 890 if (usb_endpoint_dir_in(endpoint) && 891 usb_endpoint_xfer_bulk(endpoint)) { 892 if (rtwusb->in_pipe) { 893 rtw89_err(rtwdev, 894 "found more than 1 bulk in endpoint\n"); 895 return -EINVAL; 896 } 897 898 rtwusb->in_pipe = num; 899 } 900 901 if (usb_endpoint_dir_out(endpoint) && 902 usb_endpoint_xfer_bulk(endpoint)) { 903 if (num_out_pipes >= RTW89_MAX_BULKOUT_NUM) { 904 rtw89_err(rtwdev, 905 "found more than %d bulk out endpoints\n", 906 RTW89_MAX_BULKOUT_NUM); 907 return -EINVAL; 908 } 909 910 rtwusb->out_pipe[num_out_pipes++] = num; 911 } 912 } 913 914 if (num_out_pipes < 1) { 915 rtw89_err(rtwdev, "no bulk out endpoints found\n"); 916 return -EINVAL; 917 } 918 919 return 0; 920 } 921 922 static int rtw89_usb_intf_init(struct rtw89_dev *rtwdev, 923 struct usb_interface *intf) 924 { 925 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 926 int ret; 927 928 init_usb_anchor(&rtwusb->tx_submitted); 929 930 ret = rtw89_usb_parse(rtwdev, intf); 931 if (ret) 932 return ret; 933 934 rtwusb->vendor_req_buf = kmalloc(sizeof(*rtwusb->vendor_req_buf), 935 GFP_KERNEL); 936 if (!rtwusb->vendor_req_buf) 937 return -ENOMEM; 938 939 rtwusb->udev = usb_get_dev(interface_to_usbdev(intf)); 940 941 usb_set_intfdata(intf, rtwdev->hw); 942 943 SET_IEEE80211_DEV(rtwdev->hw, &intf->dev); 944 945 return 0; 946 } 947 948 static void rtw89_usb_intf_deinit(struct rtw89_dev *rtwdev, 949 struct usb_interface *intf) 950 { 951 struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev); 952 953 usb_put_dev(rtwusb->udev); 954 kfree(rtwusb->vendor_req_buf); 955 usb_set_intfdata(intf, NULL); 956 } 957 958 int rtw89_usb_probe(struct usb_interface *intf, 959 const struct usb_device_id *id) 960 { 961 const struct rtw89_driver_info *info; 962 struct rtw89_dev *rtwdev; 963 struct rtw89_usb *rtwusb; 964 int ret; 965 966 info = (const struct rtw89_driver_info *)id->driver_info; 967 968 rtwdev = rtw89_alloc_ieee80211_hw(&intf->dev, 969 sizeof(struct rtw89_usb), 970 info->chip, info->variant); 971 if (!rtwdev) { 972 dev_err(&intf->dev, "failed to allocate hw\n"); 973 return -ENOMEM; 974 } 975 976 rtwusb = rtw89_usb_priv(rtwdev); 977 rtwusb->rtwdev = rtwdev; 978 rtwusb->info = info->bus.usb; 979 980 rtwdev->hci.ops = &rtw89_usb_ops; 981 rtwdev->hci.type = RTW89_HCI_TYPE_USB; 982 rtwdev->hci.tx_rpt_enabled = true; 983 984 ret = rtw89_usb_intf_init(rtwdev, intf); 985 if (ret) { 986 rtw89_err(rtwdev, "failed to initialise intf: %d\n", ret); 987 goto err_free_hw; 988 } 989 990 if (rtwusb->udev->speed == USB_SPEED_SUPER) 991 rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_USB3; 992 else 993 rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_USB2; 994 995 rtw89_usb_init_tx(rtwdev); 996 997 ret = rtw89_usb_alloc_rx_bufs(rtwusb); 998 if (ret) 999 goto err_intf_deinit; 1000 1001 ret = rtw89_usb_init_rx(rtwdev); 1002 if (ret) 1003 goto err_free_rx_bufs; 1004 1005 ret = rtw89_core_init(rtwdev); 1006 if (ret) { 1007 rtw89_err(rtwdev, "failed to initialise core: %d\n", ret); 1008 goto err_deinit_rx; 1009 } 1010 1011 ret = rtw89_chip_info_setup(rtwdev); 1012 if (ret) { 1013 rtw89_err(rtwdev, "failed to setup chip information\n"); 1014 goto err_core_deinit; 1015 } 1016 1017 ret = rtw89_core_register(rtwdev); 1018 if (ret) { 1019 rtw89_err(rtwdev, "failed to register core\n"); 1020 goto err_core_deinit; 1021 } 1022 1023 rtw89_usb_start_rx(rtwdev); 1024 1025 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 1026 1027 return 0; 1028 1029 err_core_deinit: 1030 rtw89_core_deinit(rtwdev); 1031 err_deinit_rx: 1032 rtw89_usb_deinit_rx(rtwdev); 1033 err_free_rx_bufs: 1034 rtw89_usb_free_rx_bufs(rtwusb); 1035 err_intf_deinit: 1036 rtw89_usb_intf_deinit(rtwdev, intf); 1037 err_free_hw: 1038 rtw89_free_ieee80211_hw(rtwdev); 1039 1040 return ret; 1041 } 1042 EXPORT_SYMBOL(rtw89_usb_probe); 1043 1044 void rtw89_usb_disconnect(struct usb_interface *intf) 1045 { 1046 struct ieee80211_hw *hw = usb_get_intfdata(intf); 1047 struct rtw89_dev *rtwdev; 1048 struct rtw89_usb *rtwusb; 1049 1050 if (!hw) 1051 return; 1052 1053 rtwdev = hw->priv; 1054 rtwusb = rtw89_usb_priv(rtwdev); 1055 1056 rtw89_usb_cancel_rx_bufs(rtwusb); 1057 rtw89_usb_cancel_tx_bufs(rtwusb); 1058 1059 rtw89_core_unregister(rtwdev); 1060 rtw89_core_deinit(rtwdev); 1061 rtw89_usb_deinit_rx(rtwdev); 1062 rtw89_usb_free_rx_bufs(rtwusb); 1063 rtw89_usb_deinit_tx(rtwdev); 1064 rtw89_usb_intf_deinit(rtwdev, intf); 1065 rtw89_free_ieee80211_hw(rtwdev); 1066 } 1067 EXPORT_SYMBOL(rtw89_usb_disconnect); 1068 1069 MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>"); 1070 MODULE_DESCRIPTION("Realtek USB 802.11ax wireless driver"); 1071 MODULE_LICENSE("Dual BSD/GPL"); 1072