1 /* 2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <net/ieee80211_radiotap.h> 19 #include <linux/if_arp.h> 20 #include <linux/moduleparam.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <net/ipv6.h> 24 #include <linux/prefetch.h> 25 26 #include "wil6210.h" 27 #include "wmi.h" 28 #include "txrx.h" 29 #include "trace.h" 30 31 static bool rtap_include_phy_info; 32 module_param(rtap_include_phy_info, bool, S_IRUGO); 33 MODULE_PARM_DESC(rtap_include_phy_info, 34 " Include PHY info in the radiotap header, default - no"); 35 36 bool rx_align_2; 37 module_param(rx_align_2, bool, S_IRUGO); 38 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); 39 40 static inline uint wil_rx_snaplen(void) 41 { 42 return rx_align_2 ? 6 : 0; 43 } 44 45 static inline int wil_vring_is_empty(struct vring *vring) 46 { 47 return vring->swhead == vring->swtail; 48 } 49 50 static inline u32 wil_vring_next_tail(struct vring *vring) 51 { 52 return (vring->swtail + 1) % vring->size; 53 } 54 55 static inline void wil_vring_advance_head(struct vring *vring, int n) 56 { 57 vring->swhead = (vring->swhead + n) % vring->size; 58 } 59 60 static inline int wil_vring_is_full(struct vring *vring) 61 { 62 return wil_vring_next_tail(vring) == vring->swhead; 63 } 64 65 /* Used space in Tx Vring */ 66 static inline int wil_vring_used_tx(struct vring *vring) 67 { 68 u32 swhead = vring->swhead; 69 u32 swtail = vring->swtail; 70 return (vring->size + swhead - swtail) % vring->size; 71 } 72 73 /* Available space in Tx Vring */ 74 static inline int wil_vring_avail_tx(struct vring *vring) 75 { 76 return vring->size - wil_vring_used_tx(vring) - 1; 77 } 78 79 /* wil_vring_wmark_low - low watermark for available descriptor space */ 80 static inline int wil_vring_wmark_low(struct vring *vring) 81 { 82 return vring->size/8; 83 } 84 85 /* wil_vring_wmark_high - high watermark for available descriptor space */ 86 static inline int wil_vring_wmark_high(struct vring *vring) 87 { 88 return vring->size/4; 89 } 90 91 /* wil_val_in_range - check if value in [min,max) */ 92 static inline bool wil_val_in_range(int val, int min, int max) 93 { 94 return val >= min && val < max; 95 } 96 97 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) 98 { 99 struct device *dev = wil_to_dev(wil); 100 size_t sz = vring->size * sizeof(vring->va[0]); 101 uint i; 102 103 wil_dbg_misc(wil, "%s()\n", __func__); 104 105 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); 106 107 vring->swhead = 0; 108 vring->swtail = 0; 109 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); 110 if (!vring->ctx) { 111 vring->va = NULL; 112 return -ENOMEM; 113 } 114 /* vring->va should be aligned on its size rounded up to power of 2 115 * This is granted by the dma_alloc_coherent 116 */ 117 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 118 if (!vring->va) { 119 kfree(vring->ctx); 120 vring->ctx = NULL; 121 return -ENOMEM; 122 } 123 /* initially, all descriptors are SW owned 124 * For Tx and Rx, ownership bit is at the same location, thus 125 * we can use any 126 */ 127 for (i = 0; i < vring->size; i++) { 128 volatile struct vring_tx_desc *_d = &vring->va[i].tx; 129 130 _d->dma.status = TX_DMA_STATUS_DU; 131 } 132 133 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, 134 vring->va, &vring->pa, vring->ctx); 135 136 return 0; 137 } 138 139 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, 140 struct wil_ctx *ctx) 141 { 142 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 143 u16 dmalen = le16_to_cpu(d->dma.length); 144 145 switch (ctx->mapped_as) { 146 case wil_mapped_as_single: 147 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 148 break; 149 case wil_mapped_as_page: 150 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 151 break; 152 default: 153 break; 154 } 155 } 156 157 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 158 int tx) 159 { 160 struct device *dev = wil_to_dev(wil); 161 size_t sz = vring->size * sizeof(vring->va[0]); 162 163 lockdep_assert_held(&wil->mutex); 164 if (tx) { 165 int vring_index = vring - wil->vring_tx; 166 167 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", 168 vring_index, vring->size, vring->va, 169 &vring->pa, vring->ctx); 170 } else { 171 wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", 172 vring->size, vring->va, 173 &vring->pa, vring->ctx); 174 } 175 176 while (!wil_vring_is_empty(vring)) { 177 dma_addr_t pa; 178 u16 dmalen; 179 struct wil_ctx *ctx; 180 181 if (tx) { 182 struct vring_tx_desc dd, *d = ⅆ 183 volatile struct vring_tx_desc *_d = 184 &vring->va[vring->swtail].tx; 185 186 ctx = &vring->ctx[vring->swtail]; 187 *d = *_d; 188 wil_txdesc_unmap(dev, d, ctx); 189 if (ctx->skb) 190 dev_kfree_skb_any(ctx->skb); 191 vring->swtail = wil_vring_next_tail(vring); 192 } else { /* rx */ 193 struct vring_rx_desc dd, *d = ⅆ 194 volatile struct vring_rx_desc *_d = 195 &vring->va[vring->swhead].rx; 196 197 ctx = &vring->ctx[vring->swhead]; 198 *d = *_d; 199 pa = wil_desc_addr(&d->dma.addr); 200 dmalen = le16_to_cpu(d->dma.length); 201 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 202 kfree_skb(ctx->skb); 203 wil_vring_advance_head(vring, 1); 204 } 205 } 206 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 207 kfree(vring->ctx); 208 vring->pa = 0; 209 vring->va = NULL; 210 vring->ctx = NULL; 211 } 212 213 /** 214 * Allocate one skb for Rx VRING 215 * 216 * Safe to call from IRQ 217 */ 218 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, 219 u32 i, int headroom) 220 { 221 struct device *dev = wil_to_dev(wil); 222 unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen(); 223 struct vring_rx_desc dd, *d = ⅆ 224 volatile struct vring_rx_desc *_d = &vring->va[i].rx; 225 dma_addr_t pa; 226 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 227 228 if (unlikely(!skb)) 229 return -ENOMEM; 230 231 skb_reserve(skb, headroom); 232 skb_put(skb, sz); 233 234 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 235 if (unlikely(dma_mapping_error(dev, pa))) { 236 kfree_skb(skb); 237 return -ENOMEM; 238 } 239 240 d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; 241 wil_desc_addr_set(&d->dma.addr, pa); 242 /* ip_length don't care */ 243 /* b11 don't care */ 244 /* error don't care */ 245 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 246 d->dma.length = cpu_to_le16(sz); 247 *_d = *d; 248 vring->ctx[i].skb = skb; 249 250 return 0; 251 } 252 253 /** 254 * Adds radiotap header 255 * 256 * Any error indicated as "Bad FCS" 257 * 258 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: 259 * - Rx descriptor: 32 bytes 260 * - Phy info 261 */ 262 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 263 struct sk_buff *skb) 264 { 265 struct wireless_dev *wdev = wil->wdev; 266 struct wil6210_rtap { 267 struct ieee80211_radiotap_header rthdr; 268 /* fields should be in the order of bits in rthdr.it_present */ 269 /* flags */ 270 u8 flags; 271 /* channel */ 272 __le16 chnl_freq __aligned(2); 273 __le16 chnl_flags; 274 /* MCS */ 275 u8 mcs_present; 276 u8 mcs_flags; 277 u8 mcs_index; 278 } __packed; 279 struct wil6210_rtap_vendor { 280 struct wil6210_rtap rtap; 281 /* vendor */ 282 u8 vendor_oui[3] __aligned(2); 283 u8 vendor_ns; 284 __le16 vendor_skip; 285 u8 vendor_data[0]; 286 } __packed; 287 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 288 struct wil6210_rtap_vendor *rtap_vendor; 289 int rtap_len = sizeof(struct wil6210_rtap); 290 int phy_length = 0; /* phy info header size, bytes */ 291 static char phy_data[128]; 292 struct ieee80211_channel *ch = wdev->preset_chandef.chan; 293 294 if (rtap_include_phy_info) { 295 rtap_len = sizeof(*rtap_vendor) + sizeof(*d); 296 /* calculate additional length */ 297 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { 298 /** 299 * PHY info starts from 8-byte boundary 300 * there are 8-byte lines, last line may be partially 301 * written (HW bug), thus FW configures for last line 302 * to be excessive. Driver skips this last line. 303 */ 304 int len = min_t(int, 8 + sizeof(phy_data), 305 wil_rxdesc_phy_length(d)); 306 307 if (len > 8) { 308 void *p = skb_tail_pointer(skb); 309 void *pa = PTR_ALIGN(p, 8); 310 311 if (skb_tailroom(skb) >= len + (pa - p)) { 312 phy_length = len - 8; 313 memcpy(phy_data, pa, phy_length); 314 } 315 } 316 } 317 rtap_len += phy_length; 318 } 319 320 if (skb_headroom(skb) < rtap_len && 321 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { 322 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len); 323 return; 324 } 325 326 rtap_vendor = (void *)skb_push(skb, rtap_len); 327 memset(rtap_vendor, 0, rtap_len); 328 329 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 330 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); 331 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( 332 (1 << IEEE80211_RADIOTAP_FLAGS) | 333 (1 << IEEE80211_RADIOTAP_CHANNEL) | 334 (1 << IEEE80211_RADIOTAP_MCS)); 335 if (d->dma.status & RX_DMA_STATUS_ERROR) 336 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; 337 338 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 339 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); 340 341 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 342 rtap_vendor->rtap.mcs_flags = 0; 343 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); 344 345 if (rtap_include_phy_info) { 346 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << 347 IEEE80211_RADIOTAP_VENDOR_NAMESPACE); 348 /* OUI for Wilocity 04:ce:14 */ 349 rtap_vendor->vendor_oui[0] = 0x04; 350 rtap_vendor->vendor_oui[1] = 0xce; 351 rtap_vendor->vendor_oui[2] = 0x14; 352 rtap_vendor->vendor_ns = 1; 353 /* Rx descriptor + PHY data */ 354 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + 355 phy_length); 356 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); 357 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, 358 phy_length); 359 } 360 } 361 362 /* similar to ieee80211_ version, but FC contain only 1-st byte */ 363 static inline int wil_is_back_req(u8 fc) 364 { 365 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == 366 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); 367 } 368 369 /** 370 * reap 1 frame from @swhead 371 * 372 * Rx descriptor copied to skb->cb 373 * 374 * Safe to call from IRQ 375 */ 376 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 377 struct vring *vring) 378 { 379 struct device *dev = wil_to_dev(wil); 380 struct net_device *ndev = wil_to_ndev(wil); 381 volatile struct vring_rx_desc *_d; 382 struct vring_rx_desc *d; 383 struct sk_buff *skb; 384 dma_addr_t pa; 385 unsigned int snaplen = wil_rx_snaplen(); 386 unsigned int sz = mtu_max + ETH_HLEN + snaplen; 387 u16 dmalen; 388 u8 ftype; 389 int cid; 390 int i; 391 struct wil_net_stats *stats; 392 393 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 394 395 again: 396 if (unlikely(wil_vring_is_empty(vring))) 397 return NULL; 398 399 i = (int)vring->swhead; 400 _d = &vring->va[i].rx; 401 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 402 /* it is not error, we just reached end of Rx done area */ 403 return NULL; 404 } 405 406 skb = vring->ctx[i].skb; 407 vring->ctx[i].skb = NULL; 408 wil_vring_advance_head(vring, 1); 409 if (!skb) { 410 wil_err(wil, "No Rx skb at [%d]\n", i); 411 goto again; 412 } 413 d = wil_skb_rxdesc(skb); 414 *d = *_d; 415 pa = wil_desc_addr(&d->dma.addr); 416 417 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 418 dmalen = le16_to_cpu(d->dma.length); 419 420 trace_wil6210_rx(i, d); 421 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); 422 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 423 (const void *)d, sizeof(*d), false); 424 425 cid = wil_rxdesc_cid(d); 426 stats = &wil->sta[cid].stats; 427 428 if (unlikely(dmalen > sz)) { 429 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 430 stats->rx_large_frame++; 431 kfree_skb(skb); 432 goto again; 433 } 434 skb_trim(skb, dmalen); 435 436 prefetch(skb->data); 437 438 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 439 skb->data, skb_headlen(skb), false); 440 441 stats->last_mcs_rx = wil_rxdesc_mcs(d); 442 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) 443 stats->rx_per_mcs[stats->last_mcs_rx]++; 444 445 /* use radiotap header only if required */ 446 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 447 wil_rx_add_radiotap_header(wil, skb); 448 449 /* no extra checks if in sniffer mode */ 450 if (ndev->type != ARPHRD_ETHER) 451 return skb; 452 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR) 453 * Driver should recognize it by frame type, that is found 454 * in Rx descriptor. If type is not data, it is 802.11 frame as is 455 */ 456 ftype = wil_rxdesc_ftype(d) << 2; 457 if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { 458 u8 fc1 = wil_rxdesc_fc1(d); 459 int mid = wil_rxdesc_mid(d); 460 int tid = wil_rxdesc_tid(d); 461 u16 seq = wil_rxdesc_seq(d); 462 463 wil_dbg_txrx(wil, 464 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 465 fc1, mid, cid, tid, seq); 466 stats->rx_non_data_frame++; 467 if (wil_is_back_req(fc1)) { 468 wil_dbg_txrx(wil, 469 "BAR: MID %d CID %d TID %d Seq 0x%03x\n", 470 mid, cid, tid, seq); 471 wil_rx_bar(wil, cid, tid, seq); 472 } else { 473 /* print again all info. One can enable only this 474 * without overhead for printing every Rx frame 475 */ 476 wil_dbg_txrx(wil, 477 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n", 478 fc1, mid, cid, tid, seq); 479 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 480 (const void *)d, sizeof(*d), false); 481 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 482 skb->data, skb_headlen(skb), false); 483 } 484 kfree_skb(skb); 485 goto again; 486 } 487 488 if (unlikely(skb->len < ETH_HLEN + snaplen)) { 489 wil_err(wil, "Short frame, len = %d\n", skb->len); 490 stats->rx_short_frame++; 491 kfree_skb(skb); 492 goto again; 493 } 494 495 /* L4 IDENT is on when HW calculated checksum, check status 496 * and in case of error drop the packet 497 * higher stack layers will handle retransmission (if required) 498 */ 499 if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { 500 /* L4 protocol identified, csum calculated */ 501 if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) 502 skb->ip_summed = CHECKSUM_UNNECESSARY; 503 /* If HW reports bad checksum, let IP stack re-check it 504 * For example, HW don't understand Microsoft IP stack that 505 * mis-calculates TCP checksum - if it should be 0x0, 506 * it writes 0xffff in violation of RFC 1624 507 */ 508 } 509 510 if (snaplen) { 511 /* Packet layout 512 * +-------+-------+---------+------------+------+ 513 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA | 514 * +-------+-------+---------+------------+------+ 515 * Need to remove SNAP, shifting SA and DA forward 516 */ 517 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); 518 skb_pull(skb, snaplen); 519 } 520 521 return skb; 522 } 523 524 /** 525 * allocate and fill up to @count buffers in rx ring 526 * buffers posted at @swtail 527 */ 528 static int wil_rx_refill(struct wil6210_priv *wil, int count) 529 { 530 struct net_device *ndev = wil_to_ndev(wil); 531 struct vring *v = &wil->vring_rx; 532 u32 next_tail; 533 int rc = 0; 534 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 535 WIL6210_RTAP_SIZE : 0; 536 537 for (; next_tail = wil_vring_next_tail(v), 538 (next_tail != v->swhead) && (count-- > 0); 539 v->swtail = next_tail) { 540 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 541 if (unlikely(rc)) { 542 wil_err(wil, "Error %d in wil_rx_refill[%d]\n", 543 rc, v->swtail); 544 break; 545 } 546 } 547 wil_w(wil, v->hwtail, v->swtail); 548 549 return rc; 550 } 551 552 /* 553 * Pass Rx packet to the netif. Update statistics. 554 * Called in softirq context (NAPI poll). 555 */ 556 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 557 { 558 gro_result_t rc = GRO_NORMAL; 559 struct wil6210_priv *wil = ndev_to_wil(ndev); 560 struct wireless_dev *wdev = wil_to_wdev(wil); 561 unsigned int len = skb->len; 562 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 563 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 564 struct ethhdr *eth = (void *)skb->data; 565 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 566 * is not suitable, need to look at data 567 */ 568 int mcast = is_multicast_ether_addr(eth->h_dest); 569 struct wil_net_stats *stats = &wil->sta[cid].stats; 570 struct sk_buff *xmit_skb = NULL; 571 static const char * const gro_res_str[] = { 572 [GRO_MERGED] = "GRO_MERGED", 573 [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 574 [GRO_HELD] = "GRO_HELD", 575 [GRO_NORMAL] = "GRO_NORMAL", 576 [GRO_DROP] = "GRO_DROP", 577 }; 578 579 if (ndev->features & NETIF_F_RXHASH) 580 /* fake L4 to ensure it won't be re-calculated later 581 * set hash to any non-zero value to activate rps 582 * mechanism, core will be chosen according 583 * to user-level rps configuration. 584 */ 585 skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); 586 587 skb_orphan(skb); 588 589 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { 590 if (mcast) { 591 /* send multicast frames both to higher layers in 592 * local net stack and back to the wireless medium 593 */ 594 xmit_skb = skb_copy(skb, GFP_ATOMIC); 595 } else { 596 int xmit_cid = wil_find_cid(wil, eth->h_dest); 597 598 if (xmit_cid >= 0) { 599 /* The destination station is associated to 600 * this AP (in this VLAN), so send the frame 601 * directly to it and do not pass it to local 602 * net stack. 603 */ 604 xmit_skb = skb; 605 skb = NULL; 606 } 607 } 608 } 609 if (xmit_skb) { 610 /* Send to wireless media and increase priority by 256 to 611 * keep the received priority instead of reclassifying 612 * the frame (see cfg80211_classify8021d). 613 */ 614 xmit_skb->dev = ndev; 615 xmit_skb->priority += 256; 616 xmit_skb->protocol = htons(ETH_P_802_3); 617 skb_reset_network_header(xmit_skb); 618 skb_reset_mac_header(xmit_skb); 619 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); 620 dev_queue_xmit(xmit_skb); 621 } 622 623 if (skb) { /* deliver to local stack */ 624 625 skb->protocol = eth_type_trans(skb, ndev); 626 rc = napi_gro_receive(&wil->napi_rx, skb); 627 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 628 len, gro_res_str[rc]); 629 } 630 /* statistics. rc set to GRO_NORMAL for AP bridging */ 631 if (unlikely(rc == GRO_DROP)) { 632 ndev->stats.rx_dropped++; 633 stats->rx_dropped++; 634 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); 635 } else { 636 ndev->stats.rx_packets++; 637 stats->rx_packets++; 638 ndev->stats.rx_bytes += len; 639 stats->rx_bytes += len; 640 if (mcast) 641 ndev->stats.multicast++; 642 } 643 } 644 645 /** 646 * Proceed all completed skb's from Rx VRING 647 * 648 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled 649 */ 650 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 651 { 652 struct net_device *ndev = wil_to_ndev(wil); 653 struct vring *v = &wil->vring_rx; 654 struct sk_buff *skb; 655 656 if (unlikely(!v->va)) { 657 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 658 return; 659 } 660 wil_dbg_txrx(wil, "%s()\n", __func__); 661 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { 662 (*quota)--; 663 664 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 665 skb->dev = ndev; 666 skb_reset_mac_header(skb); 667 skb->ip_summed = CHECKSUM_UNNECESSARY; 668 skb->pkt_type = PACKET_OTHERHOST; 669 skb->protocol = htons(ETH_P_802_2); 670 wil_netif_rx_any(skb, ndev); 671 } else { 672 wil_rx_reorder(wil, skb); 673 } 674 } 675 wil_rx_refill(wil, v->size); 676 } 677 678 int wil_rx_init(struct wil6210_priv *wil, u16 size) 679 { 680 struct vring *vring = &wil->vring_rx; 681 int rc; 682 683 wil_dbg_misc(wil, "%s()\n", __func__); 684 685 if (vring->va) { 686 wil_err(wil, "Rx ring already allocated\n"); 687 return -EINVAL; 688 } 689 690 vring->size = size; 691 rc = wil_vring_alloc(wil, vring); 692 if (rc) 693 return rc; 694 695 rc = wmi_rx_chain_add(wil, vring); 696 if (rc) 697 goto err_free; 698 699 rc = wil_rx_refill(wil, vring->size); 700 if (rc) 701 goto err_free; 702 703 return 0; 704 err_free: 705 wil_vring_free(wil, vring, 0); 706 707 return rc; 708 } 709 710 void wil_rx_fini(struct wil6210_priv *wil) 711 { 712 struct vring *vring = &wil->vring_rx; 713 714 wil_dbg_misc(wil, "%s()\n", __func__); 715 716 if (vring->va) 717 wil_vring_free(wil, vring, 0); 718 } 719 720 static inline void wil_tx_data_init(struct vring_tx_data *txdata) 721 { 722 spin_lock_bh(&txdata->lock); 723 txdata->dot1x_open = 0; 724 txdata->enabled = 0; 725 txdata->idle = 0; 726 txdata->last_idle = 0; 727 txdata->begin = 0; 728 txdata->agg_wsize = 0; 729 txdata->agg_timeout = 0; 730 txdata->agg_amsdu = 0; 731 txdata->addba_in_progress = false; 732 spin_unlock_bh(&txdata->lock); 733 } 734 735 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 736 int cid, int tid) 737 { 738 int rc; 739 struct wmi_vring_cfg_cmd cmd = { 740 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 741 .vring_cfg = { 742 .tx_sw_ring = { 743 .max_mpdu_size = 744 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 745 .ring_size = cpu_to_le16(size), 746 }, 747 .ringid = id, 748 .cidxtid = mk_cidxtid(cid, tid), 749 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 750 .mac_ctrl = 0, 751 .to_resolution = 0, 752 .agg_max_wsize = 0, 753 .schd_params = { 754 .priority = cpu_to_le16(0), 755 .timeslot_us = cpu_to_le16(0xfff), 756 }, 757 }, 758 }; 759 struct { 760 struct wil6210_mbox_hdr_wmi wmi; 761 struct wmi_vring_cfg_done_event cmd; 762 } __packed reply; 763 struct vring *vring = &wil->vring_tx[id]; 764 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 765 766 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, 767 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 768 lockdep_assert_held(&wil->mutex); 769 770 if (vring->va) { 771 wil_err(wil, "Tx ring [%d] already allocated\n", id); 772 rc = -EINVAL; 773 goto out; 774 } 775 776 wil_tx_data_init(txdata); 777 vring->size = size; 778 rc = wil_vring_alloc(wil, vring); 779 if (rc) 780 goto out; 781 782 wil->vring2cid_tid[id][0] = cid; 783 wil->vring2cid_tid[id][1] = tid; 784 785 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 786 787 if (!wil->privacy) 788 txdata->dot1x_open = true; 789 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), 790 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 791 if (rc) 792 goto out_free; 793 794 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 795 wil_err(wil, "Tx config failed, status 0x%02x\n", 796 reply.cmd.status); 797 rc = -EINVAL; 798 goto out_free; 799 } 800 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 801 802 txdata->enabled = 1; 803 if (txdata->dot1x_open && (agg_wsize >= 0)) 804 wil_addba_tx_request(wil, id, agg_wsize); 805 806 return 0; 807 out_free: 808 spin_lock_bh(&txdata->lock); 809 txdata->dot1x_open = false; 810 txdata->enabled = 0; 811 spin_unlock_bh(&txdata->lock); 812 wil_vring_free(wil, vring, 1); 813 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; 814 wil->vring2cid_tid[id][1] = 0; 815 816 out: 817 818 return rc; 819 } 820 821 int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) 822 { 823 int rc; 824 struct wmi_bcast_vring_cfg_cmd cmd = { 825 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 826 .vring_cfg = { 827 .tx_sw_ring = { 828 .max_mpdu_size = 829 cpu_to_le16(wil_mtu2macbuf(mtu_max)), 830 .ring_size = cpu_to_le16(size), 831 }, 832 .ringid = id, 833 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 834 }, 835 }; 836 struct { 837 struct wil6210_mbox_hdr_wmi wmi; 838 struct wmi_vring_cfg_done_event cmd; 839 } __packed reply; 840 struct vring *vring = &wil->vring_tx[id]; 841 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 842 843 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, 844 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 845 lockdep_assert_held(&wil->mutex); 846 847 if (vring->va) { 848 wil_err(wil, "Tx ring [%d] already allocated\n", id); 849 rc = -EINVAL; 850 goto out; 851 } 852 853 wil_tx_data_init(txdata); 854 vring->size = size; 855 rc = wil_vring_alloc(wil, vring); 856 if (rc) 857 goto out; 858 859 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ 860 wil->vring2cid_tid[id][1] = 0; /* TID */ 861 862 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 863 864 if (!wil->privacy) 865 txdata->dot1x_open = true; 866 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), 867 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 868 if (rc) 869 goto out_free; 870 871 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 872 wil_err(wil, "Tx config failed, status 0x%02x\n", 873 reply.cmd.status); 874 rc = -EINVAL; 875 goto out_free; 876 } 877 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 878 879 txdata->enabled = 1; 880 881 return 0; 882 out_free: 883 spin_lock_bh(&txdata->lock); 884 txdata->enabled = 0; 885 txdata->dot1x_open = false; 886 spin_unlock_bh(&txdata->lock); 887 wil_vring_free(wil, vring, 1); 888 out: 889 890 return rc; 891 } 892 893 void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 894 { 895 struct vring *vring = &wil->vring_tx[id]; 896 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 897 898 lockdep_assert_held(&wil->mutex); 899 900 if (!vring->va) 901 return; 902 903 wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); 904 905 spin_lock_bh(&txdata->lock); 906 txdata->dot1x_open = false; 907 txdata->enabled = 0; /* no Tx can be in progress or start anew */ 908 spin_unlock_bh(&txdata->lock); 909 /* make sure NAPI won't touch this vring */ 910 if (test_bit(wil_status_napi_en, wil->status)) 911 napi_synchronize(&wil->napi_tx); 912 913 wil_vring_free(wil, vring, 1); 914 } 915 916 static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, 917 struct sk_buff *skb) 918 { 919 int i; 920 struct ethhdr *eth = (void *)skb->data; 921 int cid = wil_find_cid(wil, eth->h_dest); 922 923 if (cid < 0) 924 return NULL; 925 926 /* TODO: fix for multiple TID */ 927 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 928 if (!wil->vring_tx_data[i].dot1x_open && 929 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 930 continue; 931 if (wil->vring2cid_tid[i][0] == cid) { 932 struct vring *v = &wil->vring_tx[i]; 933 struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 934 935 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", 936 __func__, eth->h_dest, i); 937 if (v->va && txdata->enabled) { 938 return v; 939 } else { 940 wil_dbg_txrx(wil, "vring[%d] not valid\n", i); 941 return NULL; 942 } 943 } 944 } 945 946 return NULL; 947 } 948 949 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 950 struct sk_buff *skb); 951 952 static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, 953 struct sk_buff *skb) 954 { 955 struct vring *v; 956 int i; 957 u8 cid; 958 struct vring_tx_data *txdata; 959 960 /* In the STA mode, it is expected to have only 1 VRING 961 * for the AP we connected to. 962 * find 1-st vring eligible for this skb and use it. 963 */ 964 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 965 v = &wil->vring_tx[i]; 966 txdata = &wil->vring_tx_data[i]; 967 if (!v->va || !txdata->enabled) 968 continue; 969 970 cid = wil->vring2cid_tid[i][0]; 971 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 972 continue; 973 974 if (!wil->vring_tx_data[i].dot1x_open && 975 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 976 continue; 977 978 wil_dbg_txrx(wil, "Tx -> ring %d\n", i); 979 980 return v; 981 } 982 983 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 984 985 return NULL; 986 } 987 988 /* Use one of 2 strategies: 989 * 990 * 1. New (real broadcast): 991 * use dedicated broadcast vring 992 * 2. Old (pseudo-DMS): 993 * Find 1-st vring and return it; 994 * duplicate skb and send it to other active vrings; 995 * in all cases override dest address to unicast peer's address 996 * Use old strategy when new is not supported yet: 997 * - for PBSS 998 */ 999 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, 1000 struct sk_buff *skb) 1001 { 1002 struct vring *v; 1003 struct vring_tx_data *txdata; 1004 int i = wil->bcast_vring; 1005 1006 if (i < 0) 1007 return NULL; 1008 v = &wil->vring_tx[i]; 1009 txdata = &wil->vring_tx_data[i]; 1010 if (!v->va || !txdata->enabled) 1011 return NULL; 1012 if (!wil->vring_tx_data[i].dot1x_open && 1013 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1014 return NULL; 1015 1016 return v; 1017 } 1018 1019 static void wil_set_da_for_vring(struct wil6210_priv *wil, 1020 struct sk_buff *skb, int vring_index) 1021 { 1022 struct ethhdr *eth = (void *)skb->data; 1023 int cid = wil->vring2cid_tid[vring_index][0]; 1024 1025 ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 1026 } 1027 1028 static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, 1029 struct sk_buff *skb) 1030 { 1031 struct vring *v, *v2; 1032 struct sk_buff *skb2; 1033 int i; 1034 u8 cid; 1035 struct ethhdr *eth = (void *)skb->data; 1036 char *src = eth->h_source; 1037 struct vring_tx_data *txdata; 1038 1039 /* find 1-st vring eligible for data */ 1040 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1041 v = &wil->vring_tx[i]; 1042 txdata = &wil->vring_tx_data[i]; 1043 if (!v->va || !txdata->enabled) 1044 continue; 1045 1046 cid = wil->vring2cid_tid[i][0]; 1047 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1048 continue; 1049 if (!wil->vring_tx_data[i].dot1x_open && 1050 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1051 continue; 1052 1053 /* don't Tx back to source when re-routing Rx->Tx at the AP */ 1054 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1055 continue; 1056 1057 goto found; 1058 } 1059 1060 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 1061 1062 return NULL; 1063 1064 found: 1065 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); 1066 wil_set_da_for_vring(wil, skb, i); 1067 1068 /* find other active vrings and duplicate skb for each */ 1069 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 1070 v2 = &wil->vring_tx[i]; 1071 if (!v2->va) 1072 continue; 1073 cid = wil->vring2cid_tid[i][0]; 1074 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1075 continue; 1076 if (!wil->vring_tx_data[i].dot1x_open && 1077 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1078 continue; 1079 1080 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1081 continue; 1082 1083 skb2 = skb_copy(skb, GFP_ATOMIC); 1084 if (skb2) { 1085 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 1086 wil_set_da_for_vring(wil, skb2, i); 1087 wil_tx_vring(wil, v2, skb2); 1088 } else { 1089 wil_err(wil, "skb_copy failed\n"); 1090 } 1091 } 1092 1093 return v; 1094 } 1095 1096 static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, 1097 struct sk_buff *skb) 1098 { 1099 struct wireless_dev *wdev = wil->wdev; 1100 1101 if (wdev->iftype != NL80211_IFTYPE_AP) 1102 return wil_find_tx_bcast_2(wil, skb); 1103 1104 return wil_find_tx_bcast_1(wil, skb); 1105 } 1106 1107 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 1108 int vring_index) 1109 { 1110 wil_desc_addr_set(&d->dma.addr, pa); 1111 d->dma.ip_length = 0; 1112 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 1113 d->dma.b11 = 0/*14 | BIT(7)*/; 1114 d->dma.error = 0; 1115 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 1116 d->dma.length = cpu_to_le16((u16)len); 1117 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 1118 d->mac.d[0] = 0; 1119 d->mac.d[1] = 0; 1120 d->mac.d[2] = 0; 1121 d->mac.ucode_cmd = 0; 1122 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 1123 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 1124 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 1125 1126 return 0; 1127 } 1128 1129 static inline 1130 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 1131 { 1132 d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 1133 } 1134 1135 /** 1136 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding 1137 * @skb is used to obtain the protocol and headers length. 1138 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, 1139 * 2 - middle, 3 - last descriptor. 1140 */ 1141 1142 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, 1143 struct sk_buff *skb, 1144 int tso_desc_type, bool is_ipv4, 1145 int tcp_hdr_len, int skb_net_hdr_len) 1146 { 1147 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1148 d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; 1149 1150 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1151 /* L4 header len: TCP header length */ 1152 d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1153 1154 /* Setup TSO: bit and desc type */ 1155 d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | 1156 (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); 1157 d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); 1158 1159 d->dma.ip_length = skb_net_hdr_len; 1160 /* Enable TCP/UDP checksum */ 1161 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1162 /* Calculate pseudo-header */ 1163 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1164 } 1165 1166 /** 1167 * Sets the descriptor @d up for csum. The corresponding 1168 * @skb is used to obtain the protocol and headers length. 1169 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. 1170 * Note, if d==NULL, the function only returns the protocol result. 1171 * 1172 * It is very similar to previous wil_tx_desc_offload_setup_tso. This 1173 * is "if unrolling" to optimize the critical path. 1174 */ 1175 1176 static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, 1177 struct sk_buff *skb){ 1178 int protocol; 1179 1180 if (skb->ip_summed != CHECKSUM_PARTIAL) 1181 return 0; 1182 1183 d->dma.b11 = ETH_HLEN; /* MAC header length */ 1184 1185 switch (skb->protocol) { 1186 case cpu_to_be16(ETH_P_IP): 1187 protocol = ip_hdr(skb)->protocol; 1188 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); 1189 break; 1190 case cpu_to_be16(ETH_P_IPV6): 1191 protocol = ipv6_hdr(skb)->nexthdr; 1192 break; 1193 default: 1194 return -EINVAL; 1195 } 1196 1197 switch (protocol) { 1198 case IPPROTO_TCP: 1199 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 1200 /* L4 header len: TCP header length */ 1201 d->dma.d0 |= 1202 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1203 break; 1204 case IPPROTO_UDP: 1205 /* L4 header len: UDP header length */ 1206 d->dma.d0 |= 1207 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 1208 break; 1209 default: 1210 return -EINVAL; 1211 } 1212 1213 d->dma.ip_length = skb_network_header_len(skb); 1214 /* Enable TCP/UDP checksum */ 1215 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 1216 /* Calculate pseudo-header */ 1217 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 1218 1219 return 0; 1220 } 1221 1222 static inline void wil_tx_last_desc(struct vring_tx_desc *d) 1223 { 1224 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | 1225 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | 1226 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1227 } 1228 1229 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) 1230 { 1231 d->dma.d0 |= wil_tso_type_lst << 1232 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; 1233 } 1234 1235 static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, 1236 struct sk_buff *skb) 1237 { 1238 struct device *dev = wil_to_dev(wil); 1239 1240 /* point to descriptors in shared memory */ 1241 volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, 1242 *_first_desc = NULL; 1243 1244 /* pointers to shadow descriptors */ 1245 struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, 1246 *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, 1247 *first_desc = &first_desc_mem; 1248 1249 /* pointer to shadow descriptors' context */ 1250 struct wil_ctx *hdr_ctx, *first_ctx = NULL; 1251 1252 int descs_used = 0; /* total number of used descriptors */ 1253 int sg_desc_cnt = 0; /* number of descriptors for current mss*/ 1254 1255 u32 swhead = vring->swhead; 1256 int used, avail = wil_vring_avail_tx(vring); 1257 int nr_frags = skb_shinfo(skb)->nr_frags; 1258 int min_desc_required = nr_frags + 1; 1259 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ 1260 int f, len, hdrlen, headlen; 1261 int vring_index = vring - wil->vring_tx; 1262 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1263 uint i = swhead; 1264 dma_addr_t pa; 1265 const skb_frag_t *frag = NULL; 1266 int rem_data = mss; 1267 int lenmss; 1268 int hdr_compensation_need = true; 1269 int desc_tso_type = wil_tso_type_first; 1270 bool is_ipv4; 1271 int tcp_hdr_len; 1272 int skb_net_hdr_len; 1273 int gso_type; 1274 int rc = -EINVAL; 1275 1276 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", 1277 __func__, skb->len, vring_index); 1278 1279 if (unlikely(!txdata->enabled)) 1280 return -EINVAL; 1281 1282 /* A typical page 4K is 3-4 payloads, we assume each fragment 1283 * is a full payload, that's how min_desc_required has been 1284 * calculated. In real we might need more or less descriptors, 1285 * this is the initial check only. 1286 */ 1287 if (unlikely(avail < min_desc_required)) { 1288 wil_err_ratelimited(wil, 1289 "TSO: Tx ring[%2d] full. No space for %d fragments\n", 1290 vring_index, min_desc_required); 1291 return -ENOMEM; 1292 } 1293 1294 /* Header Length = MAC header len + IP header len + TCP header len*/ 1295 hdrlen = ETH_HLEN + 1296 (int)skb_network_header_len(skb) + 1297 tcp_hdrlen(skb); 1298 1299 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); 1300 switch (gso_type) { 1301 case SKB_GSO_TCPV4: 1302 /* TCP v4, zero out the IP length and IPv4 checksum fields 1303 * as required by the offloading doc 1304 */ 1305 ip_hdr(skb)->tot_len = 0; 1306 ip_hdr(skb)->check = 0; 1307 is_ipv4 = true; 1308 break; 1309 case SKB_GSO_TCPV6: 1310 /* TCP v6, zero out the payload length */ 1311 ipv6_hdr(skb)->payload_len = 0; 1312 is_ipv4 = false; 1313 break; 1314 default: 1315 /* other than TCPv4 or TCPv6 types are not supported for TSO. 1316 * It is also illegal for both to be set simultaneously 1317 */ 1318 return -EINVAL; 1319 } 1320 1321 if (skb->ip_summed != CHECKSUM_PARTIAL) 1322 return -EINVAL; 1323 1324 /* tcp header length and skb network header length are fixed for all 1325 * packet's descriptors - read then once here 1326 */ 1327 tcp_hdr_len = tcp_hdrlen(skb); 1328 skb_net_hdr_len = skb_network_header_len(skb); 1329 1330 _hdr_desc = &vring->va[i].tx; 1331 1332 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); 1333 if (unlikely(dma_mapping_error(dev, pa))) { 1334 wil_err(wil, "TSO: Skb head DMA map error\n"); 1335 goto err_exit; 1336 } 1337 1338 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); 1339 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, 1340 tcp_hdr_len, skb_net_hdr_len); 1341 wil_tx_last_desc(hdr_desc); 1342 1343 vring->ctx[i].mapped_as = wil_mapped_as_single; 1344 hdr_ctx = &vring->ctx[i]; 1345 1346 descs_used++; 1347 headlen = skb_headlen(skb) - hdrlen; 1348 1349 for (f = headlen ? -1 : 0; f < nr_frags; f++) { 1350 if (headlen) { 1351 len = headlen; 1352 wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", 1353 len); 1354 } else { 1355 frag = &skb_shinfo(skb)->frags[f]; 1356 len = frag->size; 1357 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); 1358 } 1359 1360 while (len) { 1361 wil_dbg_txrx(wil, 1362 "TSO: len %d, rem_data %d, descs_used %d\n", 1363 len, rem_data, descs_used); 1364 1365 if (descs_used == avail) { 1366 wil_err_ratelimited(wil, "TSO: ring overflow\n"); 1367 rc = -ENOMEM; 1368 goto mem_error; 1369 } 1370 1371 lenmss = min_t(int, rem_data, len); 1372 i = (swhead + descs_used) % vring->size; 1373 wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); 1374 1375 if (!headlen) { 1376 pa = skb_frag_dma_map(dev, frag, 1377 frag->size - len, lenmss, 1378 DMA_TO_DEVICE); 1379 vring->ctx[i].mapped_as = wil_mapped_as_page; 1380 } else { 1381 pa = dma_map_single(dev, 1382 skb->data + 1383 skb_headlen(skb) - headlen, 1384 lenmss, 1385 DMA_TO_DEVICE); 1386 vring->ctx[i].mapped_as = wil_mapped_as_single; 1387 headlen -= lenmss; 1388 } 1389 1390 if (unlikely(dma_mapping_error(dev, pa))) { 1391 wil_err(wil, "TSO: DMA map page error\n"); 1392 goto mem_error; 1393 } 1394 1395 _desc = &vring->va[i].tx; 1396 1397 if (!_first_desc) { 1398 _first_desc = _desc; 1399 first_ctx = &vring->ctx[i]; 1400 d = first_desc; 1401 } else { 1402 d = &desc_mem; 1403 } 1404 1405 wil_tx_desc_map(d, pa, lenmss, vring_index); 1406 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, 1407 is_ipv4, tcp_hdr_len, 1408 skb_net_hdr_len); 1409 1410 /* use tso_type_first only once */ 1411 desc_tso_type = wil_tso_type_mid; 1412 1413 descs_used++; /* desc used so far */ 1414 sg_desc_cnt++; /* desc used for this segment */ 1415 len -= lenmss; 1416 rem_data -= lenmss; 1417 1418 wil_dbg_txrx(wil, 1419 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", 1420 len, rem_data, descs_used, sg_desc_cnt); 1421 1422 /* Close the segment if reached mss size or last frag*/ 1423 if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { 1424 if (hdr_compensation_need) { 1425 /* first segment include hdr desc for 1426 * release 1427 */ 1428 hdr_ctx->nr_frags = sg_desc_cnt; 1429 wil_tx_desc_set_nr_frags(first_desc, 1430 sg_desc_cnt + 1431 1); 1432 hdr_compensation_need = false; 1433 } else { 1434 wil_tx_desc_set_nr_frags(first_desc, 1435 sg_desc_cnt); 1436 } 1437 first_ctx->nr_frags = sg_desc_cnt - 1; 1438 1439 wil_tx_last_desc(d); 1440 1441 /* first descriptor may also be the last 1442 * for this mss - make sure not to copy 1443 * it twice 1444 */ 1445 if (first_desc != d) 1446 *_first_desc = *first_desc; 1447 1448 /*last descriptor will be copied at the end 1449 * of this TS processing 1450 */ 1451 if (f < nr_frags - 1 || len > 0) 1452 *_desc = *d; 1453 1454 rem_data = mss; 1455 _first_desc = NULL; 1456 sg_desc_cnt = 0; 1457 } else if (first_desc != d) /* update mid descriptor */ 1458 *_desc = *d; 1459 } 1460 } 1461 1462 /* first descriptor may also be the last. 1463 * in this case d pointer is invalid 1464 */ 1465 if (_first_desc == _desc) 1466 d = first_desc; 1467 1468 /* Last data descriptor */ 1469 wil_set_tx_desc_last_tso(d); 1470 *_desc = *d; 1471 1472 /* Fill the total number of descriptors in first desc (hdr)*/ 1473 wil_tx_desc_set_nr_frags(hdr_desc, descs_used); 1474 *_hdr_desc = *hdr_desc; 1475 1476 /* hold reference to skb 1477 * to prevent skb release before accounting 1478 * in case of immediate "tx done" 1479 */ 1480 vring->ctx[i].skb = skb_get(skb); 1481 1482 /* performance monitoring */ 1483 used = wil_vring_used_tx(vring); 1484 if (wil_val_in_range(vring_idle_trsh, 1485 used, used + descs_used)) { 1486 txdata->idle += get_cycles() - txdata->last_idle; 1487 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1488 vring_index, used, used + descs_used); 1489 } 1490 1491 /* advance swhead */ 1492 wil_vring_advance_head(vring, descs_used); 1493 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); 1494 1495 /* make sure all writes to descriptors (shared memory) are done before 1496 * committing them to HW 1497 */ 1498 wmb(); 1499 1500 wil_w(wil, vring->hwtail, vring->swhead); 1501 return 0; 1502 1503 mem_error: 1504 while (descs_used > 0) { 1505 struct wil_ctx *ctx; 1506 1507 i = (swhead + descs_used) % vring->size; 1508 d = (struct vring_tx_desc *)&vring->va[i].tx; 1509 _desc = &vring->va[i].tx; 1510 *d = *_desc; 1511 _desc->dma.status = TX_DMA_STATUS_DU; 1512 ctx = &vring->ctx[i]; 1513 wil_txdesc_unmap(dev, d, ctx); 1514 memset(ctx, 0, sizeof(*ctx)); 1515 descs_used--; 1516 } 1517 err_exit: 1518 return rc; 1519 } 1520 1521 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1522 struct sk_buff *skb) 1523 { 1524 struct device *dev = wil_to_dev(wil); 1525 struct vring_tx_desc dd, *d = ⅆ 1526 volatile struct vring_tx_desc *_d; 1527 u32 swhead = vring->swhead; 1528 int avail = wil_vring_avail_tx(vring); 1529 int nr_frags = skb_shinfo(skb)->nr_frags; 1530 uint f = 0; 1531 int vring_index = vring - wil->vring_tx; 1532 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1533 uint i = swhead; 1534 dma_addr_t pa; 1535 int used; 1536 bool mcast = (vring_index == wil->bcast_vring); 1537 uint len = skb_headlen(skb); 1538 1539 wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", 1540 __func__, skb->len, vring_index); 1541 1542 if (unlikely(!txdata->enabled)) 1543 return -EINVAL; 1544 1545 if (unlikely(avail < 1 + nr_frags)) { 1546 wil_err_ratelimited(wil, 1547 "Tx ring[%2d] full. No space for %d fragments\n", 1548 vring_index, 1 + nr_frags); 1549 return -ENOMEM; 1550 } 1551 _d = &vring->va[i].tx; 1552 1553 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 1554 1555 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index, 1556 skb_headlen(skb), skb->data, &pa); 1557 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 1558 skb->data, skb_headlen(skb), false); 1559 1560 if (unlikely(dma_mapping_error(dev, pa))) 1561 return -EINVAL; 1562 vring->ctx[i].mapped_as = wil_mapped_as_single; 1563 /* 1-st segment */ 1564 wil_tx_desc_map(d, pa, len, vring_index); 1565 if (unlikely(mcast)) { 1566 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ 1567 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ 1568 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); 1569 } 1570 /* Process TCP/UDP checksum offloading */ 1571 if (unlikely(wil_tx_desc_offload_setup(d, skb))) { 1572 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", 1573 vring_index); 1574 goto dma_error; 1575 } 1576 1577 vring->ctx[i].nr_frags = nr_frags; 1578 wil_tx_desc_set_nr_frags(d, nr_frags + 1); 1579 1580 /* middle segments */ 1581 for (; f < nr_frags; f++) { 1582 const struct skb_frag_struct *frag = 1583 &skb_shinfo(skb)->frags[f]; 1584 int len = skb_frag_size(frag); 1585 1586 *_d = *d; 1587 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1588 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1589 (const void *)d, sizeof(*d), false); 1590 i = (swhead + f + 1) % vring->size; 1591 _d = &vring->va[i].tx; 1592 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1593 DMA_TO_DEVICE); 1594 if (unlikely(dma_mapping_error(dev, pa))) { 1595 wil_err(wil, "Tx[%2d] failed to map fragment\n", 1596 vring_index); 1597 goto dma_error; 1598 } 1599 vring->ctx[i].mapped_as = wil_mapped_as_page; 1600 wil_tx_desc_map(d, pa, len, vring_index); 1601 /* no need to check return code - 1602 * if it succeeded for 1-st descriptor, 1603 * it will succeed here too 1604 */ 1605 wil_tx_desc_offload_setup(d, skb); 1606 } 1607 /* for the last seg only */ 1608 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 1609 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 1610 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1611 *_d = *d; 1612 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1613 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1614 (const void *)d, sizeof(*d), false); 1615 1616 /* hold reference to skb 1617 * to prevent skb release before accounting 1618 * in case of immediate "tx done" 1619 */ 1620 vring->ctx[i].skb = skb_get(skb); 1621 1622 /* performance monitoring */ 1623 used = wil_vring_used_tx(vring); 1624 if (wil_val_in_range(vring_idle_trsh, 1625 used, used + nr_frags + 1)) { 1626 txdata->idle += get_cycles() - txdata->last_idle; 1627 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1628 vring_index, used, used + nr_frags + 1); 1629 } 1630 1631 /* advance swhead */ 1632 wil_vring_advance_head(vring, nr_frags + 1); 1633 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, 1634 vring->swhead); 1635 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 1636 1637 /* make sure all writes to descriptors (shared memory) are done before 1638 * committing them to HW 1639 */ 1640 wmb(); 1641 1642 wil_w(wil, vring->hwtail, vring->swhead); 1643 1644 return 0; 1645 dma_error: 1646 /* unmap what we have mapped */ 1647 nr_frags = f + 1; /* frags mapped + one for skb head */ 1648 for (f = 0; f < nr_frags; f++) { 1649 struct wil_ctx *ctx; 1650 1651 i = (swhead + f) % vring->size; 1652 ctx = &vring->ctx[i]; 1653 _d = &vring->va[i].tx; 1654 *d = *_d; 1655 _d->dma.status = TX_DMA_STATUS_DU; 1656 wil_txdesc_unmap(dev, d, ctx); 1657 1658 memset(ctx, 0, sizeof(*ctx)); 1659 } 1660 1661 return -EINVAL; 1662 } 1663 1664 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1665 struct sk_buff *skb) 1666 { 1667 int vring_index = vring - wil->vring_tx; 1668 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1669 int rc; 1670 1671 spin_lock(&txdata->lock); 1672 1673 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) 1674 (wil, vring, skb); 1675 1676 spin_unlock(&txdata->lock); 1677 1678 return rc; 1679 } 1680 1681 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1682 { 1683 struct wil6210_priv *wil = ndev_to_wil(ndev); 1684 struct ethhdr *eth = (void *)skb->data; 1685 bool bcast = is_multicast_ether_addr(eth->h_dest); 1686 struct vring *vring; 1687 static bool pr_once_fw; 1688 int rc; 1689 1690 wil_dbg_txrx(wil, "%s()\n", __func__); 1691 if (unlikely(!test_bit(wil_status_fwready, wil->status))) { 1692 if (!pr_once_fw) { 1693 wil_err(wil, "FW not ready\n"); 1694 pr_once_fw = true; 1695 } 1696 goto drop; 1697 } 1698 if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { 1699 wil_err_ratelimited(wil, "FW not connected\n"); 1700 goto drop; 1701 } 1702 if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { 1703 wil_err(wil, "Xmit in monitor mode not supported\n"); 1704 goto drop; 1705 } 1706 pr_once_fw = false; 1707 1708 /* find vring */ 1709 if (wil->wdev->iftype == NL80211_IFTYPE_STATION) { 1710 /* in STA mode (ESS), all to same VRING */ 1711 vring = wil_find_tx_vring_sta(wil, skb); 1712 } else { /* direct communication, find matching VRING */ 1713 vring = bcast ? wil_find_tx_bcast(wil, skb) : 1714 wil_find_tx_ucast(wil, skb); 1715 } 1716 if (unlikely(!vring)) { 1717 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); 1718 goto drop; 1719 } 1720 /* set up vring entry */ 1721 rc = wil_tx_vring(wil, vring, skb); 1722 1723 /* do we still have enough room in the vring? */ 1724 if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) { 1725 netif_tx_stop_all_queues(wil_to_ndev(wil)); 1726 wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); 1727 } 1728 1729 switch (rc) { 1730 case 0: 1731 /* statistics will be updated on the tx_complete */ 1732 dev_kfree_skb_any(skb); 1733 return NETDEV_TX_OK; 1734 case -ENOMEM: 1735 return NETDEV_TX_BUSY; 1736 default: 1737 break; /* goto drop; */ 1738 } 1739 drop: 1740 ndev->stats.tx_dropped++; 1741 dev_kfree_skb_any(skb); 1742 1743 return NET_XMIT_DROP; 1744 } 1745 1746 static inline bool wil_need_txstat(struct sk_buff *skb) 1747 { 1748 struct ethhdr *eth = (void *)skb->data; 1749 1750 return is_unicast_ether_addr(eth->h_dest) && skb->sk && 1751 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); 1752 } 1753 1754 static inline void wil_consume_skb(struct sk_buff *skb, bool acked) 1755 { 1756 if (unlikely(wil_need_txstat(skb))) 1757 skb_complete_wifi_ack(skb, acked); 1758 else 1759 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb); 1760 } 1761 1762 /** 1763 * Clean up transmitted skb's from the Tx VRING 1764 * 1765 * Return number of descriptors cleared 1766 * 1767 * Safe to call from IRQ 1768 */ 1769 int wil_tx_complete(struct wil6210_priv *wil, int ringid) 1770 { 1771 struct net_device *ndev = wil_to_ndev(wil); 1772 struct device *dev = wil_to_dev(wil); 1773 struct vring *vring = &wil->vring_tx[ringid]; 1774 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 1775 int done = 0; 1776 int cid = wil->vring2cid_tid[ringid][0]; 1777 struct wil_net_stats *stats = NULL; 1778 volatile struct vring_tx_desc *_d; 1779 int used_before_complete; 1780 int used_new; 1781 1782 if (unlikely(!vring->va)) { 1783 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 1784 return 0; 1785 } 1786 1787 if (unlikely(!txdata->enabled)) { 1788 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); 1789 return 0; 1790 } 1791 1792 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1793 1794 used_before_complete = wil_vring_used_tx(vring); 1795 1796 if (cid < WIL6210_MAX_CID) 1797 stats = &wil->sta[cid].stats; 1798 1799 while (!wil_vring_is_empty(vring)) { 1800 int new_swtail; 1801 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1802 /** 1803 * For the fragmented skb, HW will set DU bit only for the 1804 * last fragment. look for it. 1805 * In TSO the first DU will include hdr desc 1806 */ 1807 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 1808 /* TODO: check we are not past head */ 1809 1810 _d = &vring->va[lf].tx; 1811 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) 1812 break; 1813 1814 new_swtail = (lf + 1) % vring->size; 1815 while (vring->swtail != new_swtail) { 1816 struct vring_tx_desc dd, *d = ⅆ 1817 u16 dmalen; 1818 struct sk_buff *skb; 1819 1820 ctx = &vring->ctx[vring->swtail]; 1821 skb = ctx->skb; 1822 _d = &vring->va[vring->swtail].tx; 1823 1824 *d = *_d; 1825 1826 dmalen = le16_to_cpu(d->dma.length); 1827 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1828 d->dma.error); 1829 wil_dbg_txrx(wil, 1830 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1831 ringid, vring->swtail, dmalen, 1832 d->dma.status, d->dma.error); 1833 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, 1834 (const void *)d, sizeof(*d), false); 1835 1836 wil_txdesc_unmap(dev, d, ctx); 1837 1838 if (skb) { 1839 if (likely(d->dma.error == 0)) { 1840 ndev->stats.tx_packets++; 1841 ndev->stats.tx_bytes += skb->len; 1842 if (stats) { 1843 stats->tx_packets++; 1844 stats->tx_bytes += skb->len; 1845 } 1846 } else { 1847 ndev->stats.tx_errors++; 1848 if (stats) 1849 stats->tx_errors++; 1850 } 1851 wil_consume_skb(skb, d->dma.error == 0); 1852 } 1853 memset(ctx, 0, sizeof(*ctx)); 1854 /* There is no need to touch HW descriptor: 1855 * - ststus bit TX_DMA_STATUS_DU is set by design, 1856 * so hardware will not try to process this desc., 1857 * - rest of descriptor will be initialized on Tx. 1858 */ 1859 vring->swtail = wil_vring_next_tail(vring); 1860 done++; 1861 } 1862 } 1863 1864 /* performance monitoring */ 1865 used_new = wil_vring_used_tx(vring); 1866 if (wil_val_in_range(vring_idle_trsh, 1867 used_new, used_before_complete)) { 1868 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 1869 ringid, used_before_complete, used_new); 1870 txdata->last_idle = get_cycles(); 1871 } 1872 1873 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) { 1874 wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n"); 1875 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1876 } 1877 1878 return done; 1879 } 1880