1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/usb.h> 7 #include <linux/mutex.h> 8 #include "main.h" 9 #include "debug.h" 10 #include "mac.h" 11 #include "reg.h" 12 #include "tx.h" 13 #include "rx.h" 14 #include "fw.h" 15 #include "ps.h" 16 #include "usb.h" 17 18 static bool rtw_switch_usb_mode = true; 19 module_param_named(switch_usb_mode, rtw_switch_usb_mode, bool, 0644); 20 MODULE_PARM_DESC(switch_usb_mode, 21 "Set to N to disable switching to USB 3 mode to avoid potential interference in the 2.4 GHz band (default: Y)"); 22 23 #define RTW_USB_MAX_RXQ_LEN 512 24 25 struct rtw_usb_txcb { 26 struct rtw_dev *rtwdev; 27 struct sk_buff_head tx_ack_queue; 28 }; 29 30 static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb, 31 struct sk_buff *skb, int agg_num) 32 { 33 struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)skb->data; 34 struct rtw_dev *rtwdev = rtwusb->rtwdev; 35 struct rtw_tx_pkt_info pkt_info; 36 37 le32p_replace_bits(&tx_desc->w7, agg_num, RTW_TX_DESC_W7_DMA_TXAGG_NUM); 38 pkt_info.pkt_offset = le32_get_bits(tx_desc->w1, RTW_TX_DESC_W1_PKT_OFFSET); 39 rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data); 40 } 41 42 static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data) 43 { 44 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 45 struct usb_device *udev = rtwusb->udev; 46 bool reg_on_section = false; 47 u16 t_reg = 0x4e0; 48 u8 t_len = 1; 49 int status; 50 51 /* There are three sections: 52 * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on 53 * 2. off (< 0xFE00, excluding "on" section): this section could be 54 * powered off 55 * 3. local (>= 0xFE00): usb specific registers section 56 */ 57 if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff)) 58 reg_on_section = true; 59 60 if (!reg_on_section) 61 return; 62 63 status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 64 RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, 65 t_reg, 0, data, t_len, 500); 66 67 if (status != t_len && status != -ENODEV) 68 rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n", 69 __func__, t_reg, t_len, status); 70 } 71 72 static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len) 73 { 74 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 75 struct usb_device *udev = rtwusb->udev; 76 __le32 *data; 77 unsigned long flags; 78 int idx, ret; 79 static int count; 80 81 spin_lock_irqsave(&rtwusb->usb_lock, flags); 82 83 idx = rtwusb->usb_data_index; 84 rtwusb->usb_data_index = (idx + 1) & (RTW_USB_MAX_RXTX_COUNT - 1); 85 86 spin_unlock_irqrestore(&rtwusb->usb_lock, flags); 87 88 data = &rtwusb->usb_data[idx]; 89 90 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 91 RTW_USB_CMD_REQ, RTW_USB_CMD_READ, addr, 92 RTW_USB_VENQT_CMD_IDX, data, len, 1000); 93 if (ret < 0 && ret != -ENODEV && count++ < 4) 94 rtw_err(rtwdev, "read register 0x%x failed with %d\n", 95 addr, ret); 96 97 if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C || 98 rtwdev->chip->id == RTW_CHIP_TYPE_8822B || 99 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 100 rtw_usb_reg_sec(rtwdev, addr, data); 101 102 return le32_to_cpu(*data); 103 } 104 105 static u8 rtw_usb_read8(struct rtw_dev *rtwdev, u32 addr) 106 { 107 return (u8)rtw_usb_read(rtwdev, addr, 1); 108 } 109 110 static u16 rtw_usb_read16(struct rtw_dev *rtwdev, u32 addr) 111 { 112 return (u16)rtw_usb_read(rtwdev, addr, 2); 113 } 114 115 static u32 rtw_usb_read32(struct rtw_dev *rtwdev, u32 addr) 116 { 117 return (u32)rtw_usb_read(rtwdev, addr, 4); 118 } 119 120 static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len) 121 { 122 struct rtw_usb *rtwusb = (struct rtw_usb *)rtwdev->priv; 123 struct usb_device *udev = rtwusb->udev; 124 unsigned long flags; 125 __le32 *data; 126 int idx, ret; 127 static int count; 128 129 spin_lock_irqsave(&rtwusb->usb_lock, flags); 130 131 idx = rtwusb->usb_data_index; 132 rtwusb->usb_data_index = (idx + 1) & (RTW_USB_MAX_RXTX_COUNT - 1); 133 134 spin_unlock_irqrestore(&rtwusb->usb_lock, flags); 135 136 data = &rtwusb->usb_data[idx]; 137 138 *data = cpu_to_le32(val); 139 140 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 141 RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE, 142 addr, 0, data, len, 30000); 143 if (ret < 0 && ret != -ENODEV && count++ < 4) 144 rtw_err(rtwdev, "write register 0x%x failed with %d\n", 145 addr, ret); 146 147 if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C || 148 rtwdev->chip->id == RTW_CHIP_TYPE_8822B || 149 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 150 rtw_usb_reg_sec(rtwdev, addr, data); 151 } 152 153 static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 154 { 155 rtw_usb_write(rtwdev, addr, val, 1); 156 } 157 158 static void rtw_usb_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 159 { 160 rtw_usb_write(rtwdev, addr, val, 2); 161 } 162 163 static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 164 { 165 rtw_usb_write(rtwdev, addr, val, 4); 166 } 167 168 static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping) 169 { 170 switch (dma_mapping) { 171 case RTW_DMA_MAPPING_HIGH: 172 return 0; 173 case RTW_DMA_MAPPING_NORMAL: 174 return 1; 175 case RTW_DMA_MAPPING_LOW: 176 return 2; 177 case RTW_DMA_MAPPING_EXTRA: 178 return 3; 179 default: 180 return -EINVAL; 181 } 182 } 183 184 static int rtw_usb_parse(struct rtw_dev *rtwdev, 185 struct usb_interface *interface) 186 { 187 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 188 struct usb_host_interface *host_interface = &interface->altsetting[0]; 189 struct usb_interface_descriptor *interface_desc = &host_interface->desc; 190 struct usb_endpoint_descriptor *endpoint; 191 int num_out_pipes = 0; 192 int i; 193 u8 num; 194 const struct rtw_chip_info *chip = rtwdev->chip; 195 const struct rtw_rqpn *rqpn; 196 197 for (i = 0; i < interface_desc->bNumEndpoints; i++) { 198 endpoint = &host_interface->endpoint[i].desc; 199 num = usb_endpoint_num(endpoint); 200 201 if (usb_endpoint_dir_in(endpoint) && 202 usb_endpoint_xfer_bulk(endpoint)) { 203 if (rtwusb->pipe_in) { 204 rtw_err(rtwdev, "IN pipes overflow\n"); 205 return -EINVAL; 206 } 207 208 rtwusb->pipe_in = num; 209 } 210 211 if (usb_endpoint_dir_in(endpoint) && 212 usb_endpoint_xfer_int(endpoint)) { 213 if (rtwusb->pipe_interrupt) { 214 rtw_err(rtwdev, "INT pipes overflow\n"); 215 return -EINVAL; 216 } 217 218 rtwusb->pipe_interrupt = num; 219 } 220 221 if (usb_endpoint_dir_out(endpoint) && 222 usb_endpoint_xfer_bulk(endpoint)) { 223 if (num_out_pipes >= ARRAY_SIZE(rtwusb->out_ep)) { 224 rtw_err(rtwdev, "OUT pipes overflow\n"); 225 return -EINVAL; 226 } 227 228 rtwusb->out_ep[num_out_pipes++] = num; 229 } 230 } 231 232 rtwdev->hci.bulkout_num = num_out_pipes; 233 234 if (num_out_pipes < 1 || num_out_pipes > 4) { 235 rtw_err(rtwdev, "invalid number of endpoints %d\n", num_out_pipes); 236 return -EINVAL; 237 } 238 239 rqpn = &chip->rqpn_table[num_out_pipes]; 240 241 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID0] = dma_mapping_to_ep(rqpn->dma_map_be); 242 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID1] = dma_mapping_to_ep(rqpn->dma_map_bk); 243 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID2] = dma_mapping_to_ep(rqpn->dma_map_bk); 244 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID3] = dma_mapping_to_ep(rqpn->dma_map_be); 245 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID4] = dma_mapping_to_ep(rqpn->dma_map_vi); 246 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID5] = dma_mapping_to_ep(rqpn->dma_map_vi); 247 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID6] = dma_mapping_to_ep(rqpn->dma_map_vo); 248 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID7] = dma_mapping_to_ep(rqpn->dma_map_vo); 249 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID8] = -EINVAL; 250 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID9] = -EINVAL; 251 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID10] = -EINVAL; 252 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID11] = -EINVAL; 253 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID12] = -EINVAL; 254 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID13] = -EINVAL; 255 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID14] = -EINVAL; 256 rtwusb->qsel_to_ep[TX_DESC_QSEL_TID15] = -EINVAL; 257 rtwusb->qsel_to_ep[TX_DESC_QSEL_BEACON] = dma_mapping_to_ep(rqpn->dma_map_hi); 258 rtwusb->qsel_to_ep[TX_DESC_QSEL_HIGH] = dma_mapping_to_ep(rqpn->dma_map_hi); 259 rtwusb->qsel_to_ep[TX_DESC_QSEL_MGMT] = dma_mapping_to_ep(rqpn->dma_map_mg); 260 rtwusb->qsel_to_ep[TX_DESC_QSEL_H2C] = dma_mapping_to_ep(rqpn->dma_map_hi); 261 262 return 0; 263 } 264 265 static void rtw_usb_write_port_tx_complete(struct urb *urb) 266 { 267 struct rtw_usb_txcb *txcb = urb->context; 268 struct rtw_dev *rtwdev = txcb->rtwdev; 269 struct ieee80211_hw *hw = rtwdev->hw; 270 271 while (true) { 272 struct sk_buff *skb = skb_dequeue(&txcb->tx_ack_queue); 273 struct ieee80211_tx_info *info; 274 struct rtw_usb_tx_data *tx_data; 275 276 if (!skb) 277 break; 278 279 info = IEEE80211_SKB_CB(skb); 280 tx_data = rtw_usb_get_tx_data(skb); 281 282 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 283 284 /* enqueue to wait for tx report */ 285 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 286 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 287 continue; 288 } 289 290 /* always ACK for others, then they won't be marked as drop */ 291 ieee80211_tx_info_clear_status(info); 292 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 293 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 294 else 295 info->flags |= IEEE80211_TX_STAT_ACK; 296 297 ieee80211_tx_status_irqsafe(hw, skb); 298 } 299 300 kfree(txcb); 301 } 302 303 static int qsel_to_ep(struct rtw_usb *rtwusb, unsigned int qsel) 304 { 305 if (qsel >= ARRAY_SIZE(rtwusb->qsel_to_ep)) 306 return -EINVAL; 307 308 return rtwusb->qsel_to_ep[qsel]; 309 } 310 311 static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *skb, 312 usb_complete_t cb, void *context) 313 { 314 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 315 struct usb_device *usbd = rtwusb->udev; 316 struct urb *urb; 317 unsigned int pipe; 318 int ret; 319 int ep = qsel_to_ep(rtwusb, qsel); 320 321 if (ep < 0) 322 return ep; 323 324 pipe = usb_sndbulkpipe(usbd, rtwusb->out_ep[ep]); 325 urb = usb_alloc_urb(0, GFP_ATOMIC); 326 if (!urb) 327 return -ENOMEM; 328 329 usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context); 330 urb->transfer_flags |= URB_ZERO_PACKET; 331 ret = usb_submit_urb(urb, GFP_ATOMIC); 332 333 usb_free_urb(urb); 334 335 return ret; 336 } 337 338 static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list) 339 { 340 struct rtw_dev *rtwdev = rtwusb->rtwdev; 341 struct rtw_tx_desc *tx_desc; 342 struct rtw_usb_txcb *txcb; 343 struct sk_buff *skb_head; 344 struct sk_buff *skb_iter; 345 int agg_num = 0; 346 unsigned int align_next = 0; 347 u8 qsel; 348 349 if (skb_queue_empty(list)) 350 return false; 351 352 txcb = kmalloc(sizeof(*txcb), GFP_ATOMIC); 353 if (!txcb) 354 return false; 355 356 txcb->rtwdev = rtwdev; 357 skb_queue_head_init(&txcb->tx_ack_queue); 358 359 skb_iter = skb_dequeue(list); 360 361 if (skb_queue_empty(list)) { 362 skb_head = skb_iter; 363 goto queue; 364 } 365 366 skb_head = dev_alloc_skb(RTW_USB_MAX_XMITBUF_SZ); 367 if (!skb_head) { 368 skb_head = skb_iter; 369 goto queue; 370 } 371 372 while (skb_iter) { 373 unsigned long flags; 374 375 skb_put(skb_head, align_next); 376 skb_put_data(skb_head, skb_iter->data, skb_iter->len); 377 378 align_next = ALIGN(skb_iter->len, 8) - skb_iter->len; 379 380 agg_num++; 381 382 skb_queue_tail(&txcb->tx_ack_queue, skb_iter); 383 384 spin_lock_irqsave(&list->lock, flags); 385 386 skb_iter = skb_peek(list); 387 388 if (skb_iter && 389 skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ && 390 agg_num < rtwdev->chip->usb_tx_agg_desc_num) 391 __skb_unlink(skb_iter, list); 392 else 393 skb_iter = NULL; 394 spin_unlock_irqrestore(&list->lock, flags); 395 } 396 397 if (agg_num > 1) 398 rtw_usb_fill_tx_checksum(rtwusb, skb_head, agg_num); 399 400 queue: 401 skb_queue_tail(&txcb->tx_ack_queue, skb_head); 402 tx_desc = (struct rtw_tx_desc *)skb_head->data; 403 qsel = le32_get_bits(tx_desc->w1, RTW_TX_DESC_W1_QSEL); 404 405 rtw_usb_write_port(rtwdev, qsel, skb_head, rtw_usb_write_port_tx_complete, txcb); 406 407 return true; 408 } 409 410 static void rtw_usb_tx_handler(struct work_struct *work) 411 { 412 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, tx_work); 413 int i, limit; 414 415 for (i = ARRAY_SIZE(rtwusb->tx_queue) - 1; i >= 0; i--) { 416 for (limit = 0; limit < 200; limit++) { 417 struct sk_buff_head *list = &rtwusb->tx_queue[i]; 418 419 if (!rtw_usb_tx_agg_skb(rtwusb, list)) 420 break; 421 } 422 } 423 } 424 425 static void rtw_usb_tx_queue_purge(struct rtw_usb *rtwusb) 426 { 427 struct rtw_dev *rtwdev = rtwusb->rtwdev; 428 int i; 429 430 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) 431 ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]); 432 } 433 434 static void rtw_usb_write_port_complete(struct urb *urb) 435 { 436 struct sk_buff *skb = urb->context; 437 438 dev_kfree_skb_any(skb); 439 } 440 441 static int rtw_usb_write_data(struct rtw_dev *rtwdev, 442 struct rtw_tx_pkt_info *pkt_info, 443 u8 *buf) 444 { 445 const struct rtw_chip_info *chip = rtwdev->chip; 446 struct sk_buff *skb; 447 unsigned int size; 448 u8 qsel; 449 int ret = 0; 450 451 size = pkt_info->tx_pkt_size; 452 qsel = pkt_info->qsel; 453 454 skb = dev_alloc_skb(chip->tx_pkt_desc_sz + size); 455 if (unlikely(!skb)) 456 return -ENOMEM; 457 458 skb_reserve(skb, chip->tx_pkt_desc_sz); 459 skb_put_data(skb, buf, size); 460 skb_push(skb, chip->tx_pkt_desc_sz); 461 memset(skb->data, 0, chip->tx_pkt_desc_sz); 462 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); 463 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); 464 465 ret = rtw_usb_write_port(rtwdev, qsel, skb, 466 rtw_usb_write_port_complete, skb); 467 if (unlikely(ret)) 468 rtw_err(rtwdev, "failed to do USB write, ret=%d\n", ret); 469 470 return ret; 471 } 472 473 static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 474 u32 size) 475 { 476 const struct rtw_chip_info *chip = rtwdev->chip; 477 struct rtw_tx_pkt_info pkt_info = {0}; 478 479 pkt_info.tx_pkt_size = size; 480 pkt_info.qsel = TX_DESC_QSEL_BEACON; 481 pkt_info.offset = chip->tx_pkt_desc_sz; 482 pkt_info.ls = true; 483 484 return rtw_usb_write_data(rtwdev, &pkt_info, buf); 485 } 486 487 static int rtw_usb_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 488 { 489 struct rtw_tx_pkt_info pkt_info = {0}; 490 491 pkt_info.tx_pkt_size = size; 492 pkt_info.qsel = TX_DESC_QSEL_H2C; 493 494 return rtw_usb_write_data(rtwdev, &pkt_info, buf); 495 } 496 497 static u8 rtw_usb_tx_queue_mapping_to_qsel(struct sk_buff *skb) 498 { 499 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 500 __le16 fc = hdr->frame_control; 501 u8 qsel; 502 503 if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 504 qsel = TX_DESC_QSEL_MGMT; 505 else if (is_broadcast_ether_addr(hdr->addr1) || 506 is_multicast_ether_addr(hdr->addr1)) 507 qsel = TX_DESC_QSEL_HIGH; 508 else if (skb_get_queue_mapping(skb) <= IEEE80211_AC_BK) 509 qsel = skb->priority; 510 else 511 qsel = TX_DESC_QSEL_BEACON; 512 513 return qsel; 514 } 515 516 static int rtw_usb_tx_write(struct rtw_dev *rtwdev, 517 struct rtw_tx_pkt_info *pkt_info, 518 struct sk_buff *skb) 519 { 520 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 521 const struct rtw_chip_info *chip = rtwdev->chip; 522 struct rtw_usb_tx_data *tx_data; 523 u8 *pkt_desc; 524 int ep; 525 526 pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb); 527 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 528 memset(pkt_desc, 0, chip->tx_pkt_desc_sz); 529 ep = qsel_to_ep(rtwusb, pkt_info->qsel); 530 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); 531 rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); 532 tx_data = rtw_usb_get_tx_data(skb); 533 tx_data->sn = pkt_info->sn; 534 535 skb_queue_tail(&rtwusb->tx_queue[ep], skb); 536 537 return 0; 538 } 539 540 static void rtw_usb_tx_kick_off(struct rtw_dev *rtwdev) 541 { 542 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 543 544 queue_work(rtwusb->txwq, &rtwusb->tx_work); 545 } 546 547 static void rtw_usb_rx_handler(struct work_struct *work) 548 { 549 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_work); 550 struct rtw_dev *rtwdev = rtwusb->rtwdev; 551 struct ieee80211_rx_status rx_status; 552 struct rtw_rx_pkt_stat pkt_stat; 553 struct sk_buff *rx_skb; 554 struct sk_buff *skb; 555 u32 pkt_desc_sz = rtwdev->chip->rx_pkt_desc_sz; 556 u32 max_skb_len = pkt_desc_sz + PHY_STATUS_SIZE * 8 + 557 IEEE80211_MAX_MPDU_LEN_VHT_11454; 558 u32 pkt_offset, next_pkt, skb_len; 559 u8 *rx_desc; 560 int limit; 561 562 for (limit = 0; limit < 200; limit++) { 563 rx_skb = skb_dequeue(&rtwusb->rx_queue); 564 if (!rx_skb) 565 break; 566 567 if (skb_queue_len(&rtwusb->rx_queue) >= RTW_USB_MAX_RXQ_LEN) { 568 dev_dbg_ratelimited(rtwdev->dev, "failed to get rx_queue, overflow\n"); 569 dev_kfree_skb_any(rx_skb); 570 continue; 571 } 572 573 rx_desc = rx_skb->data; 574 575 do { 576 rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, 577 &rx_status); 578 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 579 pkt_stat.shift; 580 581 skb_len = pkt_stat.pkt_len + pkt_offset; 582 if (skb_len > max_skb_len) { 583 rtw_dbg(rtwdev, RTW_DBG_USB, 584 "skipping too big packet: %u\n", 585 skb_len); 586 goto skip_packet; 587 } 588 589 skb = alloc_skb(skb_len, GFP_ATOMIC); 590 if (!skb) { 591 rtw_dbg(rtwdev, RTW_DBG_USB, 592 "failed to allocate RX skb of size %u\n", 593 skb_len); 594 goto skip_packet; 595 } 596 597 skb_put_data(skb, rx_desc, skb_len); 598 599 if (pkt_stat.is_c2h) { 600 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, skb); 601 } else { 602 skb_pull(skb, pkt_offset); 603 rtw_update_rx_freq_for_invalid(rtwdev, skb, 604 &rx_status, 605 &pkt_stat); 606 rtw_rx_stats(rtwdev, pkt_stat.vif, skb); 607 memcpy(skb->cb, &rx_status, sizeof(rx_status)); 608 ieee80211_rx_irqsafe(rtwdev->hw, skb); 609 } 610 611 skip_packet: 612 next_pkt = round_up(skb_len, 8); 613 rx_desc += next_pkt; 614 } while (rx_desc + pkt_desc_sz < rx_skb->data + rx_skb->len); 615 616 if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW_USB_RX_SKB_NUM) 617 dev_kfree_skb_any(rx_skb); 618 else 619 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 620 } 621 } 622 623 static void rtw_usb_read_port_complete(struct urb *urb); 624 625 static void rtw_usb_rx_resubmit(struct rtw_usb *rtwusb, 626 struct rx_usb_ctrl_block *rxcb, 627 gfp_t gfp) 628 { 629 struct rtw_dev *rtwdev = rtwusb->rtwdev; 630 struct sk_buff *rx_skb; 631 int error; 632 633 rx_skb = skb_dequeue(&rtwusb->rx_free_queue); 634 if (!rx_skb) 635 rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, gfp); 636 637 if (!rx_skb) 638 goto try_later; 639 640 skb_reset_tail_pointer(rx_skb); 641 rx_skb->len = 0; 642 643 rxcb->rx_skb = rx_skb; 644 645 usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev, 646 usb_rcvbulkpipe(rtwusb->udev, rtwusb->pipe_in), 647 rxcb->rx_skb->data, RTW_USB_MAX_RECVBUF_SZ, 648 rtw_usb_read_port_complete, rxcb); 649 650 error = usb_submit_urb(rxcb->rx_urb, gfp); 651 if (error) { 652 skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb); 653 654 if (error != -ENODEV) 655 rtw_err(rtwdev, "Err sending rx data urb %d\n", 656 error); 657 658 if (error == -ENOMEM) 659 goto try_later; 660 } 661 662 return; 663 664 try_later: 665 rxcb->rx_skb = NULL; 666 queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work); 667 } 668 669 static void rtw_usb_rx_resubmit_work(struct work_struct *work) 670 { 671 struct rtw_usb *rtwusb = container_of(work, struct rtw_usb, rx_urb_work); 672 struct rx_usb_ctrl_block *rxcb; 673 int i; 674 675 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 676 rxcb = &rtwusb->rx_cb[i]; 677 678 if (!rxcb->rx_skb) 679 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 680 } 681 } 682 683 static void rtw_usb_read_port_complete(struct urb *urb) 684 { 685 struct rx_usb_ctrl_block *rxcb = urb->context; 686 struct rtw_dev *rtwdev = rxcb->rtwdev; 687 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 688 struct sk_buff *skb = rxcb->rx_skb; 689 690 if (urb->status == 0) { 691 if (urb->actual_length >= RTW_USB_MAX_RECVBUF_SZ || 692 urb->actual_length < 24) { 693 rtw_err(rtwdev, "failed to get urb length:%d\n", 694 urb->actual_length); 695 skb_queue_tail(&rtwusb->rx_free_queue, skb); 696 } else { 697 skb_put(skb, urb->actual_length); 698 skb_queue_tail(&rtwusb->rx_queue, skb); 699 queue_work(rtwusb->rxwq, &rtwusb->rx_work); 700 } 701 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC); 702 } else { 703 skb_queue_tail(&rtwusb->rx_free_queue, skb); 704 705 switch (urb->status) { 706 case -EINVAL: 707 case -EPIPE: 708 case -ENODEV: 709 case -ESHUTDOWN: 710 case -ENOENT: 711 case -EPROTO: 712 case -EILSEQ: 713 case -ETIME: 714 case -ECOMM: 715 case -EOVERFLOW: 716 case -EINPROGRESS: 717 break; 718 default: 719 rtw_err(rtwdev, "status %d\n", urb->status); 720 break; 721 } 722 } 723 } 724 725 static void rtw_usb_cancel_rx_bufs(struct rtw_usb *rtwusb) 726 { 727 struct rx_usb_ctrl_block *rxcb; 728 int i; 729 730 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 731 rxcb = &rtwusb->rx_cb[i]; 732 usb_kill_urb(rxcb->rx_urb); 733 } 734 } 735 736 static void rtw_usb_free_rx_bufs(struct rtw_usb *rtwusb) 737 { 738 struct rx_usb_ctrl_block *rxcb; 739 int i; 740 741 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 742 rxcb = &rtwusb->rx_cb[i]; 743 usb_kill_urb(rxcb->rx_urb); 744 usb_free_urb(rxcb->rx_urb); 745 } 746 } 747 748 static int rtw_usb_alloc_rx_bufs(struct rtw_usb *rtwusb) 749 { 750 int i; 751 752 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 753 struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; 754 755 rxcb->rtwdev = rtwusb->rtwdev; 756 rxcb->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 757 if (!rxcb->rx_urb) 758 goto err; 759 } 760 761 return 0; 762 err: 763 rtw_usb_free_rx_bufs(rtwusb); 764 return -ENOMEM; 765 } 766 767 static int rtw_usb_setup(struct rtw_dev *rtwdev) 768 { 769 /* empty function for rtw_hci_ops */ 770 return 0; 771 } 772 773 static int rtw_usb_start(struct rtw_dev *rtwdev) 774 { 775 return 0; 776 } 777 778 static void rtw_usb_stop(struct rtw_dev *rtwdev) 779 { 780 } 781 782 static void rtw_usb_deep_ps(struct rtw_dev *rtwdev, bool enter) 783 { 784 /* empty function for rtw_hci_ops */ 785 } 786 787 static void rtw_usb_link_ps(struct rtw_dev *rtwdev, bool enter) 788 { 789 /* empty function for rtw_hci_ops */ 790 } 791 792 static void rtw_usb_init_burst_pkt_len(struct rtw_dev *rtwdev) 793 { 794 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 795 enum usb_device_speed speed = rtwusb->udev->speed; 796 u8 rxdma, burst_size; 797 798 rxdma = BIT_DMA_BURST_CNT | BIT_DMA_MODE; 799 800 if (speed == USB_SPEED_SUPER) 801 burst_size = BIT_DMA_BURST_SIZE_1024; 802 else if (speed == USB_SPEED_HIGH) 803 burst_size = BIT_DMA_BURST_SIZE_512; 804 else 805 burst_size = BIT_DMA_BURST_SIZE_64; 806 807 u8p_replace_bits(&rxdma, burst_size, BIT_DMA_BURST_SIZE); 808 809 rtw_write8(rtwdev, REG_RXDMA_MODE, rxdma); 810 rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); 811 } 812 813 static void rtw_usb_interface_cfg(struct rtw_dev *rtwdev) 814 { 815 rtw_usb_init_burst_pkt_len(rtwdev); 816 } 817 818 static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable) 819 { 820 u8 size, timeout; 821 u16 val16; 822 823 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 824 rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7)); 825 826 if (enable) { 827 size = 0x5; 828 timeout = 0x20; 829 } else { 830 size = 0x0; 831 timeout = 0x1; 832 } 833 val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | 834 u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); 835 836 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 837 } 838 839 static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable) 840 { 841 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 842 u8 size, timeout; 843 u16 val16; 844 845 if (!enable) { 846 size = 0x0; 847 timeout = 0x1; 848 } else if (rtwusb->udev->speed == USB_SPEED_SUPER) { 849 size = 0x6; 850 timeout = 0x1a; 851 } else { 852 size = 0x5; 853 timeout = 0x20; 854 } 855 856 val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | 857 u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); 858 859 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 860 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 861 } 862 863 static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) 864 { 865 switch (rtwdev->chip->id) { 866 case RTW_CHIP_TYPE_8822C: 867 case RTW_CHIP_TYPE_8822B: 868 case RTW_CHIP_TYPE_8821C: 869 rtw_usb_dynamic_rx_agg_v1(rtwdev, enable); 870 break; 871 case RTW_CHIP_TYPE_8821A: 872 case RTW_CHIP_TYPE_8812A: 873 rtw_usb_dynamic_rx_agg_v2(rtwdev, enable); 874 break; 875 case RTW_CHIP_TYPE_8723D: 876 /* Doesn't like aggregation. */ 877 break; 878 case RTW_CHIP_TYPE_8703B: 879 /* Likely not found in USB devices. */ 880 break; 881 } 882 } 883 884 static struct rtw_hci_ops rtw_usb_ops = { 885 .tx_write = rtw_usb_tx_write, 886 .tx_kick_off = rtw_usb_tx_kick_off, 887 .setup = rtw_usb_setup, 888 .start = rtw_usb_start, 889 .stop = rtw_usb_stop, 890 .deep_ps = rtw_usb_deep_ps, 891 .link_ps = rtw_usb_link_ps, 892 .interface_cfg = rtw_usb_interface_cfg, 893 .dynamic_rx_agg = rtw_usb_dynamic_rx_agg, 894 895 .write8 = rtw_usb_write8, 896 .write16 = rtw_usb_write16, 897 .write32 = rtw_usb_write32, 898 .read8 = rtw_usb_read8, 899 .read16 = rtw_usb_read16, 900 .read32 = rtw_usb_read32, 901 902 .write_data_rsvd_page = rtw_usb_write_data_rsvd_page, 903 .write_data_h2c = rtw_usb_write_data_h2c, 904 }; 905 906 static int rtw_usb_init_rx(struct rtw_dev *rtwdev) 907 { 908 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 909 struct sk_buff *rx_skb; 910 int i; 911 912 rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0); 913 if (!rtwusb->rxwq) { 914 rtw_err(rtwdev, "failed to create RX work queue\n"); 915 return -ENOMEM; 916 } 917 918 skb_queue_head_init(&rtwusb->rx_queue); 919 skb_queue_head_init(&rtwusb->rx_free_queue); 920 921 INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); 922 INIT_WORK(&rtwusb->rx_urb_work, rtw_usb_rx_resubmit_work); 923 924 for (i = 0; i < RTW_USB_RX_SKB_NUM; i++) { 925 rx_skb = alloc_skb(RTW_USB_MAX_RECVBUF_SZ, GFP_KERNEL); 926 if (rx_skb) 927 skb_queue_tail(&rtwusb->rx_free_queue, rx_skb); 928 } 929 930 return 0; 931 } 932 933 static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) 934 { 935 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 936 int i; 937 938 for (i = 0; i < RTW_USB_RXCB_NUM; i++) { 939 struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; 940 941 rtw_usb_rx_resubmit(rtwusb, rxcb, GFP_KERNEL); 942 } 943 } 944 945 static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) 946 { 947 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 948 949 skb_queue_purge(&rtwusb->rx_queue); 950 951 flush_workqueue(rtwusb->rxwq); 952 destroy_workqueue(rtwusb->rxwq); 953 954 skb_queue_purge(&rtwusb->rx_free_queue); 955 } 956 957 static int rtw_usb_init_tx(struct rtw_dev *rtwdev) 958 { 959 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 960 int i; 961 962 rtwusb->txwq = create_singlethread_workqueue("rtw88_usb: tx wq"); 963 if (!rtwusb->txwq) { 964 rtw_err(rtwdev, "failed to create TX work queue\n"); 965 return -ENOMEM; 966 } 967 968 for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) 969 skb_queue_head_init(&rtwusb->tx_queue[i]); 970 971 INIT_WORK(&rtwusb->tx_work, rtw_usb_tx_handler); 972 973 return 0; 974 } 975 976 static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev) 977 { 978 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 979 980 flush_workqueue(rtwusb->txwq); 981 destroy_workqueue(rtwusb->txwq); 982 rtw_usb_tx_queue_purge(rtwusb); 983 } 984 985 static int rtw_usb_intf_init(struct rtw_dev *rtwdev, 986 struct usb_interface *intf) 987 { 988 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 989 struct usb_device *udev = usb_get_dev(interface_to_usbdev(intf)); 990 int ret; 991 992 rtwusb->udev = udev; 993 ret = rtw_usb_parse(rtwdev, intf); 994 if (ret) 995 return ret; 996 997 rtwusb->usb_data = kcalloc(RTW_USB_MAX_RXTX_COUNT, sizeof(u32), 998 GFP_KERNEL); 999 if (!rtwusb->usb_data) 1000 return -ENOMEM; 1001 1002 usb_set_intfdata(intf, rtwdev->hw); 1003 1004 SET_IEEE80211_DEV(rtwdev->hw, &intf->dev); 1005 spin_lock_init(&rtwusb->usb_lock); 1006 1007 return 0; 1008 } 1009 1010 static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev, 1011 struct usb_interface *intf) 1012 { 1013 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1014 1015 usb_put_dev(rtwusb->udev); 1016 kfree(rtwusb->usb_data); 1017 usb_set_intfdata(intf, NULL); 1018 } 1019 1020 static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev) 1021 { 1022 struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 1023 enum usb_device_speed cur_speed = rtwusb->udev->speed; 1024 u8 hci_opt; 1025 1026 if (cur_speed == USB_SPEED_HIGH) { 1027 hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL); 1028 1029 if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) { 1030 rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8); 1031 rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2); 1032 rtw_write8(rtwdev, REG_ACLK_MON, 0x1); 1033 rtw_write8(rtwdev, 0x3d, 0x3); 1034 /* usb disconnect */ 1035 rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80); 1036 return 1; 1037 } 1038 } else if (cur_speed == USB_SPEED_SUPER) { 1039 rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1)); 1040 rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0)); 1041 } 1042 1043 return 0; 1044 } 1045 1046 static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev) 1047 { 1048 enum usb_device_speed cur_speed; 1049 u8 id = rtwdev->chip->id; 1050 bool can_switch; 1051 u32 pad_ctrl2; 1052 1053 if (rtw_read8(rtwdev, REG_SYS_CFG2 + 3) == 0x20) 1054 cur_speed = USB_SPEED_SUPER; 1055 else 1056 cur_speed = USB_SPEED_HIGH; 1057 1058 if (cur_speed == USB_SPEED_SUPER) 1059 return 0; 1060 1061 pad_ctrl2 = rtw_read32(rtwdev, REG_PAD_CTRL2); 1062 1063 can_switch = !!(pad_ctrl2 & (BIT_MASK_USB23_SW_MODE_V1 | 1064 BIT_USB3_USB2_TRANSITION)); 1065 1066 if (!can_switch) { 1067 rtw_dbg(rtwdev, RTW_DBG_USB, 1068 "Switching to USB 3 mode unsupported by the chip\n"); 1069 return 0; 1070 } 1071 1072 /* At this point cur_speed is USB_SPEED_HIGH. If we already tried 1073 * to switch don't try again - it's a USB 2 port. 1074 */ 1075 if (u32_get_bits(pad_ctrl2, BIT_MASK_USB23_SW_MODE_V1) == BIT_USB_MODE_U3) 1076 return 0; 1077 1078 /* Enable IO wrapper timeout */ 1079 if (id == RTW_CHIP_TYPE_8822B || id == RTW_CHIP_TYPE_8821C) 1080 rtw_write8_clr(rtwdev, REG_SW_MDIO + 3, BIT(0)); 1081 1082 u32p_replace_bits(&pad_ctrl2, BIT_USB_MODE_U3, BIT_MASK_USB23_SW_MODE_V1); 1083 pad_ctrl2 |= BIT_RSM_EN_V1; 1084 1085 rtw_write32(rtwdev, REG_PAD_CTRL2, pad_ctrl2); 1086 rtw_write8(rtwdev, REG_PAD_CTRL2 + 1, 4); 1087 1088 rtw_write16_set(rtwdev, REG_SYS_PW_CTRL, BIT_APFM_OFFMAC); 1089 usleep_range(1000, 1001); 1090 rtw_write32_set(rtwdev, REG_PAD_CTRL2, BIT_NO_PDN_CHIPOFF_V1); 1091 1092 return 1; 1093 } 1094 1095 static bool rtw_usb3_chip_old(u8 chip_id) 1096 { 1097 return chip_id == RTW_CHIP_TYPE_8812A; 1098 } 1099 1100 static bool rtw_usb3_chip_new(u8 chip_id) 1101 { 1102 return chip_id == RTW_CHIP_TYPE_8822C || 1103 chip_id == RTW_CHIP_TYPE_8822B; 1104 } 1105 1106 static int rtw_usb_switch_mode(struct rtw_dev *rtwdev) 1107 { 1108 u8 id = rtwdev->chip->id; 1109 1110 if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id)) 1111 return 0; 1112 1113 if (!rtwdev->efuse.usb_mode_switch) { 1114 rtw_dbg(rtwdev, RTW_DBG_USB, 1115 "Switching to USB 3 mode disabled by chip's efuse\n"); 1116 return 0; 1117 } 1118 1119 if (!rtw_switch_usb_mode) { 1120 rtw_dbg(rtwdev, RTW_DBG_USB, 1121 "Switching to USB 3 mode disabled by module parameter\n"); 1122 return 0; 1123 } 1124 1125 if (rtw_usb3_chip_old(id)) 1126 return rtw_usb_switch_mode_old(rtwdev); 1127 else 1128 return rtw_usb_switch_mode_new(rtwdev); 1129 } 1130 1131 #define USB_REG_PAGE 0xf4 1132 #define USB_PHY_PAGE0 0x9b 1133 #define USB_PHY_PAGE1 0xbb 1134 1135 static void rtw_usb_phy_write(struct rtw_dev *rtwdev, u8 addr, u16 data, 1136 enum usb_device_speed speed) 1137 { 1138 if (speed == USB_SPEED_SUPER) { 1139 rtw_write8(rtwdev, REG_USB3_PHY_DAT_L, data & 0xff); 1140 rtw_write8(rtwdev, REG_USB3_PHY_DAT_H, data >> 8); 1141 rtw_write8(rtwdev, REG_USB3_PHY_ADR, addr | BIT_USB3_PHY_ADR_WR); 1142 } else if (speed == USB_SPEED_HIGH) { 1143 rtw_write8(rtwdev, REG_USB2_PHY_DAT, data); 1144 rtw_write8(rtwdev, REG_USB2_PHY_ADR, addr); 1145 rtw_write8(rtwdev, REG_USB2_PHY_CMD, BIT_USB2_PHY_CMD_TRG); 1146 } 1147 } 1148 1149 static void rtw_usb_page_switch(struct rtw_dev *rtwdev, 1150 enum usb_device_speed speed, u8 page) 1151 { 1152 if (speed == USB_SPEED_SUPER) 1153 return; 1154 1155 rtw_usb_phy_write(rtwdev, USB_REG_PAGE, page, speed); 1156 } 1157 1158 static void rtw_usb_phy_cfg(struct rtw_dev *rtwdev, 1159 enum usb_device_speed speed) 1160 { 1161 const struct rtw_intf_phy_para *para = NULL; 1162 u16 offset; 1163 1164 if (!rtwdev->chip->intf_table) 1165 return; 1166 1167 if (speed == USB_SPEED_SUPER) 1168 para = rtwdev->chip->intf_table->usb3_para; 1169 else if (speed == USB_SPEED_HIGH) 1170 para = rtwdev->chip->intf_table->usb2_para; 1171 1172 if (!para) 1173 return; 1174 1175 for ( ; para->offset != 0xffff; para++) { 1176 if (!(para->cut_mask & BIT(rtwdev->hal.cut_version))) 1177 continue; 1178 1179 offset = para->offset; 1180 1181 if (para->ip_sel == RTW_IP_SEL_MAC) { 1182 rtw_write8(rtwdev, offset, para->value); 1183 } else { 1184 if (offset > 0x100) 1185 rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE1); 1186 else 1187 rtw_usb_page_switch(rtwdev, speed, USB_PHY_PAGE0); 1188 1189 offset &= 0xff; 1190 1191 rtw_usb_phy_write(rtwdev, offset, para->value, speed); 1192 } 1193 } 1194 } 1195 1196 int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1197 { 1198 struct rtw_dev *rtwdev; 1199 struct ieee80211_hw *hw; 1200 struct rtw_usb *rtwusb; 1201 int drv_data_size; 1202 int ret; 1203 1204 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_usb); 1205 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1206 if (!hw) 1207 return -ENOMEM; 1208 1209 rtwdev = hw->priv; 1210 rtwdev->hw = hw; 1211 rtwdev->dev = &intf->dev; 1212 rtwdev->chip = (struct rtw_chip_info *)id->driver_info; 1213 rtwdev->hci.ops = &rtw_usb_ops; 1214 rtwdev->hci.type = RTW_HCI_TYPE_USB; 1215 1216 rtwusb = rtw_get_usb_priv(rtwdev); 1217 rtwusb->rtwdev = rtwdev; 1218 1219 ret = rtw_usb_alloc_rx_bufs(rtwusb); 1220 if (ret) 1221 goto err_release_hw; 1222 1223 ret = rtw_core_init(rtwdev); 1224 if (ret) 1225 goto err_free_rx_bufs; 1226 1227 ret = rtw_usb_intf_init(rtwdev, intf); 1228 if (ret) { 1229 rtw_err(rtwdev, "failed to init USB interface\n"); 1230 goto err_deinit_core; 1231 } 1232 1233 ret = rtw_usb_init_tx(rtwdev); 1234 if (ret) { 1235 rtw_err(rtwdev, "failed to init USB TX\n"); 1236 goto err_destroy_usb; 1237 } 1238 1239 ret = rtw_usb_init_rx(rtwdev); 1240 if (ret) { 1241 rtw_err(rtwdev, "failed to init USB RX\n"); 1242 goto err_destroy_txwq; 1243 } 1244 1245 ret = rtw_chip_info_setup(rtwdev); 1246 if (ret) { 1247 rtw_err(rtwdev, "failed to setup chip information\n"); 1248 goto err_destroy_rxwq; 1249 } 1250 1251 rtw_usb_phy_cfg(rtwdev, USB_SPEED_HIGH); 1252 rtw_usb_phy_cfg(rtwdev, USB_SPEED_SUPER); 1253 1254 ret = rtw_usb_switch_mode(rtwdev); 1255 if (ret) { 1256 /* Not a fail, but we do need to skip rtw_register_hw. */ 1257 rtw_dbg(rtwdev, RTW_DBG_USB, "switching to USB 3 mode\n"); 1258 ret = 0; 1259 goto err_destroy_rxwq; 1260 } 1261 1262 ret = rtw_register_hw(rtwdev, rtwdev->hw); 1263 if (ret) { 1264 rtw_err(rtwdev, "failed to register hw\n"); 1265 goto err_destroy_rxwq; 1266 } 1267 1268 rtw_usb_setup_rx(rtwdev); 1269 1270 return 0; 1271 1272 err_destroy_rxwq: 1273 rtw_usb_deinit_rx(rtwdev); 1274 1275 err_destroy_txwq: 1276 rtw_usb_deinit_tx(rtwdev); 1277 1278 err_destroy_usb: 1279 rtw_usb_intf_deinit(rtwdev, intf); 1280 1281 err_deinit_core: 1282 rtw_core_deinit(rtwdev); 1283 1284 err_free_rx_bufs: 1285 rtw_usb_free_rx_bufs(rtwusb); 1286 1287 err_release_hw: 1288 ieee80211_free_hw(hw); 1289 1290 return ret; 1291 } 1292 EXPORT_SYMBOL(rtw_usb_probe); 1293 1294 void rtw_usb_disconnect(struct usb_interface *intf) 1295 { 1296 struct ieee80211_hw *hw = usb_get_intfdata(intf); 1297 struct rtw_dev *rtwdev; 1298 struct rtw_usb *rtwusb; 1299 1300 if (!hw) 1301 return; 1302 1303 rtwdev = hw->priv; 1304 rtwusb = rtw_get_usb_priv(rtwdev); 1305 1306 rtw_usb_cancel_rx_bufs(rtwusb); 1307 1308 rtw_unregister_hw(rtwdev, hw); 1309 rtw_usb_deinit_tx(rtwdev); 1310 rtw_usb_deinit_rx(rtwdev); 1311 1312 if (rtwusb->udev->state != USB_STATE_NOTATTACHED) 1313 usb_reset_device(rtwusb->udev); 1314 1315 rtw_usb_free_rx_bufs(rtwusb); 1316 1317 rtw_usb_intf_deinit(rtwdev, intf); 1318 rtw_core_deinit(rtwdev); 1319 ieee80211_free_hw(hw); 1320 } 1321 EXPORT_SYMBOL(rtw_usb_disconnect); 1322 1323 MODULE_AUTHOR("Realtek Corporation"); 1324 MODULE_DESCRIPTION("Realtek USB 802.11ac wireless driver"); 1325 MODULE_LICENSE("Dual BSD/GPL"); 1326