1 /* 2 * Copyright (c) 2012 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/etherdevice.h> 18 #include <net/ieee80211_radiotap.h> 19 #include <linux/if_arp.h> 20 #include <linux/moduleparam.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <net/ipv6.h> 24 #include <linux/prefetch.h> 25 26 #include "wil6210.h" 27 #include "wmi.h" 28 #include "txrx.h" 29 #include "trace.h" 30 31 static bool rtap_include_phy_info; 32 module_param(rtap_include_phy_info, bool, S_IRUGO); 33 MODULE_PARM_DESC(rtap_include_phy_info, 34 " Include PHY info in the radiotap header, default - no"); 35 36 static inline int wil_vring_is_empty(struct vring *vring) 37 { 38 return vring->swhead == vring->swtail; 39 } 40 41 static inline u32 wil_vring_next_tail(struct vring *vring) 42 { 43 return (vring->swtail + 1) % vring->size; 44 } 45 46 static inline void wil_vring_advance_head(struct vring *vring, int n) 47 { 48 vring->swhead = (vring->swhead + n) % vring->size; 49 } 50 51 static inline int wil_vring_is_full(struct vring *vring) 52 { 53 return wil_vring_next_tail(vring) == vring->swhead; 54 } 55 /* 56 * Available space in Tx Vring 57 */ 58 static inline int wil_vring_avail_tx(struct vring *vring) 59 { 60 u32 swhead = vring->swhead; 61 u32 swtail = vring->swtail; 62 int used = (vring->size + swhead - swtail) % vring->size; 63 64 return vring->size - used - 1; 65 } 66 67 /** 68 * wil_vring_wmark_low - low watermark for available descriptor space 69 */ 70 static inline int wil_vring_wmark_low(struct vring *vring) 71 { 72 return vring->size/8; 73 } 74 75 /** 76 * wil_vring_wmark_high - high watermark for available descriptor space 77 */ 78 static inline int wil_vring_wmark_high(struct vring *vring) 79 { 80 return vring->size/4; 81 } 82 83 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) 84 { 85 struct device *dev = wil_to_dev(wil); 86 size_t sz = vring->size * sizeof(vring->va[0]); 87 uint i; 88 89 BUILD_BUG_ON(sizeof(vring->va[0]) != 32); 90 91 vring->swhead = 0; 92 vring->swtail = 0; 93 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); 94 if (!vring->ctx) { 95 vring->va = NULL; 96 return -ENOMEM; 97 } 98 /* 99 * vring->va should be aligned on its size rounded up to power of 2 100 * This is granted by the dma_alloc_coherent 101 */ 102 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 103 if (!vring->va) { 104 kfree(vring->ctx); 105 vring->ctx = NULL; 106 return -ENOMEM; 107 } 108 /* initially, all descriptors are SW owned 109 * For Tx and Rx, ownership bit is at the same location, thus 110 * we can use any 111 */ 112 for (i = 0; i < vring->size; i++) { 113 volatile struct vring_tx_desc *_d = &(vring->va[i].tx); 114 _d->dma.status = TX_DMA_STATUS_DU; 115 } 116 117 wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, 118 vring->va, &vring->pa, vring->ctx); 119 120 return 0; 121 } 122 123 static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, 124 struct wil_ctx *ctx) 125 { 126 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 127 u16 dmalen = le16_to_cpu(d->dma.length); 128 switch (ctx->mapped_as) { 129 case wil_mapped_as_single: 130 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); 131 break; 132 case wil_mapped_as_page: 133 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); 134 break; 135 default: 136 break; 137 } 138 } 139 140 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 141 int tx) 142 { 143 struct device *dev = wil_to_dev(wil); 144 size_t sz = vring->size * sizeof(vring->va[0]); 145 146 while (!wil_vring_is_empty(vring)) { 147 dma_addr_t pa; 148 u16 dmalen; 149 struct wil_ctx *ctx; 150 151 if (tx) { 152 struct vring_tx_desc dd, *d = ⅆ 153 volatile struct vring_tx_desc *_d = 154 &vring->va[vring->swtail].tx; 155 156 ctx = &vring->ctx[vring->swtail]; 157 *d = *_d; 158 wil_txdesc_unmap(dev, d, ctx); 159 if (ctx->skb) 160 dev_kfree_skb_any(ctx->skb); 161 vring->swtail = wil_vring_next_tail(vring); 162 } else { /* rx */ 163 struct vring_rx_desc dd, *d = ⅆ 164 volatile struct vring_rx_desc *_d = 165 &vring->va[vring->swhead].rx; 166 167 ctx = &vring->ctx[vring->swhead]; 168 *d = *_d; 169 pa = wil_desc_addr(&d->dma.addr); 170 dmalen = le16_to_cpu(d->dma.length); 171 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 172 kfree_skb(ctx->skb); 173 wil_vring_advance_head(vring, 1); 174 } 175 } 176 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 177 kfree(vring->ctx); 178 vring->pa = 0; 179 vring->va = NULL; 180 vring->ctx = NULL; 181 } 182 183 /** 184 * Allocate one skb for Rx VRING 185 * 186 * Safe to call from IRQ 187 */ 188 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, 189 u32 i, int headroom) 190 { 191 struct device *dev = wil_to_dev(wil); 192 unsigned int sz = RX_BUF_LEN; 193 struct vring_rx_desc dd, *d = ⅆ 194 volatile struct vring_rx_desc *_d = &(vring->va[i].rx); 195 dma_addr_t pa; 196 197 /* TODO align */ 198 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 199 if (unlikely(!skb)) 200 return -ENOMEM; 201 202 skb_reserve(skb, headroom); 203 skb_put(skb, sz); 204 205 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); 206 if (unlikely(dma_mapping_error(dev, pa))) { 207 kfree_skb(skb); 208 return -ENOMEM; 209 } 210 211 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 212 wil_desc_addr_set(&d->dma.addr, pa); 213 /* ip_length don't care */ 214 /* b11 don't care */ 215 /* error don't care */ 216 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 217 d->dma.length = cpu_to_le16(sz); 218 *_d = *d; 219 vring->ctx[i].skb = skb; 220 221 return 0; 222 } 223 224 /** 225 * Adds radiotap header 226 * 227 * Any error indicated as "Bad FCS" 228 * 229 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: 230 * - Rx descriptor: 32 bytes 231 * - Phy info 232 */ 233 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, 234 struct sk_buff *skb) 235 { 236 struct wireless_dev *wdev = wil->wdev; 237 struct wil6210_rtap { 238 struct ieee80211_radiotap_header rthdr; 239 /* fields should be in the order of bits in rthdr.it_present */ 240 /* flags */ 241 u8 flags; 242 /* channel */ 243 __le16 chnl_freq __aligned(2); 244 __le16 chnl_flags; 245 /* MCS */ 246 u8 mcs_present; 247 u8 mcs_flags; 248 u8 mcs_index; 249 } __packed; 250 struct wil6210_rtap_vendor { 251 struct wil6210_rtap rtap; 252 /* vendor */ 253 u8 vendor_oui[3] __aligned(2); 254 u8 vendor_ns; 255 __le16 vendor_skip; 256 u8 vendor_data[0]; 257 } __packed; 258 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 259 struct wil6210_rtap_vendor *rtap_vendor; 260 int rtap_len = sizeof(struct wil6210_rtap); 261 int phy_length = 0; /* phy info header size, bytes */ 262 static char phy_data[128]; 263 struct ieee80211_channel *ch = wdev->preset_chandef.chan; 264 265 if (rtap_include_phy_info) { 266 rtap_len = sizeof(*rtap_vendor) + sizeof(*d); 267 /* calculate additional length */ 268 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { 269 /** 270 * PHY info starts from 8-byte boundary 271 * there are 8-byte lines, last line may be partially 272 * written (HW bug), thus FW configures for last line 273 * to be excessive. Driver skips this last line. 274 */ 275 int len = min_t(int, 8 + sizeof(phy_data), 276 wil_rxdesc_phy_length(d)); 277 if (len > 8) { 278 void *p = skb_tail_pointer(skb); 279 void *pa = PTR_ALIGN(p, 8); 280 if (skb_tailroom(skb) >= len + (pa - p)) { 281 phy_length = len - 8; 282 memcpy(phy_data, pa, phy_length); 283 } 284 } 285 } 286 rtap_len += phy_length; 287 } 288 289 if (skb_headroom(skb) < rtap_len && 290 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { 291 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len); 292 return; 293 } 294 295 rtap_vendor = (void *)skb_push(skb, rtap_len); 296 memset(rtap_vendor, 0, rtap_len); 297 298 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 299 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); 300 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( 301 (1 << IEEE80211_RADIOTAP_FLAGS) | 302 (1 << IEEE80211_RADIOTAP_CHANNEL) | 303 (1 << IEEE80211_RADIOTAP_MCS)); 304 if (d->dma.status & RX_DMA_STATUS_ERROR) 305 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; 306 307 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 308 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); 309 310 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 311 rtap_vendor->rtap.mcs_flags = 0; 312 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); 313 314 if (rtap_include_phy_info) { 315 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << 316 IEEE80211_RADIOTAP_VENDOR_NAMESPACE); 317 /* OUI for Wilocity 04:ce:14 */ 318 rtap_vendor->vendor_oui[0] = 0x04; 319 rtap_vendor->vendor_oui[1] = 0xce; 320 rtap_vendor->vendor_oui[2] = 0x14; 321 rtap_vendor->vendor_ns = 1; 322 /* Rx descriptor + PHY data */ 323 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + 324 phy_length); 325 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); 326 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, 327 phy_length); 328 } 329 } 330 331 /* 332 * Fast swap in place between 2 registers 333 */ 334 static void wil_swap_u16(u16 *a, u16 *b) 335 { 336 *a ^= *b; 337 *b ^= *a; 338 *a ^= *b; 339 } 340 341 static void wil_swap_ethaddr(void *data) 342 { 343 struct ethhdr *eth = data; 344 u16 *s = (u16 *)eth->h_source; 345 u16 *d = (u16 *)eth->h_dest; 346 347 wil_swap_u16(s++, d++); 348 wil_swap_u16(s++, d++); 349 wil_swap_u16(s, d); 350 } 351 352 /** 353 * reap 1 frame from @swhead 354 * 355 * Rx descriptor copied to skb->cb 356 * 357 * Safe to call from IRQ 358 */ 359 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 360 struct vring *vring) 361 { 362 struct device *dev = wil_to_dev(wil); 363 struct net_device *ndev = wil_to_ndev(wil); 364 volatile struct vring_rx_desc *_d; 365 struct vring_rx_desc *d; 366 struct sk_buff *skb; 367 dma_addr_t pa; 368 unsigned int sz = RX_BUF_LEN; 369 u16 dmalen; 370 u8 ftype; 371 u8 ds_bits; 372 int cid; 373 struct wil_net_stats *stats; 374 375 376 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 377 378 if (wil_vring_is_empty(vring)) 379 return NULL; 380 381 _d = &(vring->va[vring->swhead].rx); 382 if (!(_d->dma.status & RX_DMA_STATUS_DU)) { 383 /* it is not error, we just reached end of Rx done area */ 384 return NULL; 385 } 386 387 skb = vring->ctx[vring->swhead].skb; 388 d = wil_skb_rxdesc(skb); 389 *d = *_d; 390 pa = wil_desc_addr(&d->dma.addr); 391 vring->ctx[vring->swhead].skb = NULL; 392 wil_vring_advance_head(vring, 1); 393 394 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 395 dmalen = le16_to_cpu(d->dma.length); 396 397 trace_wil6210_rx(vring->swhead, d); 398 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); 399 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, 400 (const void *)d, sizeof(*d), false); 401 402 if (dmalen > sz) { 403 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 404 kfree_skb(skb); 405 return NULL; 406 } 407 skb_trim(skb, dmalen); 408 409 prefetch(skb->data); 410 411 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 412 skb->data, skb_headlen(skb), false); 413 414 cid = wil_rxdesc_cid(d); 415 stats = &wil->sta[cid].stats; 416 stats->last_mcs_rx = wil_rxdesc_mcs(d); 417 wil->stats.last_mcs_rx = stats->last_mcs_rx; 418 419 /* use radiotap header only if required */ 420 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) 421 wil_rx_add_radiotap_header(wil, skb); 422 423 /* no extra checks if in sniffer mode */ 424 if (ndev->type != ARPHRD_ETHER) 425 return skb; 426 /* 427 * Non-data frames may be delivered through Rx DMA channel (ex: BAR) 428 * Driver should recognize it by frame type, that is found 429 * in Rx descriptor. If type is not data, it is 802.11 frame as is 430 */ 431 ftype = wil_rxdesc_ftype(d) << 2; 432 if (ftype != IEEE80211_FTYPE_DATA) { 433 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); 434 /* TODO: process it */ 435 kfree_skb(skb); 436 return NULL; 437 } 438 439 if (skb->len < ETH_HLEN) { 440 wil_err(wil, "Short frame, len = %d\n", skb->len); 441 /* TODO: process it (i.e. BAR) */ 442 kfree_skb(skb); 443 return NULL; 444 } 445 446 /* L4 IDENT is on when HW calculated checksum, check status 447 * and in case of error drop the packet 448 * higher stack layers will handle retransmission (if required) 449 */ 450 if (d->dma.status & RX_DMA_STATUS_L4_IDENT) { 451 /* L4 protocol identified, csum calculated */ 452 if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) 453 skb->ip_summed = CHECKSUM_UNNECESSARY; 454 /* If HW reports bad checksum, let IP stack re-check it 455 * For example, HW don't understand Microsoft IP stack that 456 * mis-calculates TCP checksum - if it should be 0x0, 457 * it writes 0xffff in violation of RFC 1624 458 */ 459 } 460 461 ds_bits = wil_rxdesc_ds_bits(d); 462 if (ds_bits == 1) { 463 /* 464 * HW bug - in ToDS mode, i.e. Rx on AP side, 465 * addresses get swapped 466 */ 467 wil_swap_ethaddr(skb->data); 468 } 469 470 return skb; 471 } 472 473 /** 474 * allocate and fill up to @count buffers in rx ring 475 * buffers posted at @swtail 476 */ 477 static int wil_rx_refill(struct wil6210_priv *wil, int count) 478 { 479 struct net_device *ndev = wil_to_ndev(wil); 480 struct vring *v = &wil->vring_rx; 481 u32 next_tail; 482 int rc = 0; 483 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 484 WIL6210_RTAP_SIZE : 0; 485 486 for (; next_tail = wil_vring_next_tail(v), 487 (next_tail != v->swhead) && (count-- > 0); 488 v->swtail = next_tail) { 489 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 490 if (rc) { 491 wil_err(wil, "Error %d in wil_rx_refill[%d]\n", 492 rc, v->swtail); 493 break; 494 } 495 } 496 iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail)); 497 498 return rc; 499 } 500 501 /* 502 * Pass Rx packet to the netif. Update statistics. 503 * Called in softirq context (NAPI poll). 504 */ 505 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 506 { 507 gro_result_t rc; 508 struct wil6210_priv *wil = ndev_to_wil(ndev); 509 unsigned int len = skb->len; 510 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 511 int cid = wil_rxdesc_cid(d); 512 struct wil_net_stats *stats = &wil->sta[cid].stats; 513 514 skb_orphan(skb); 515 516 rc = napi_gro_receive(&wil->napi_rx, skb); 517 518 if (unlikely(rc == GRO_DROP)) { 519 ndev->stats.rx_dropped++; 520 stats->rx_dropped++; 521 wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); 522 } else { 523 ndev->stats.rx_packets++; 524 stats->rx_packets++; 525 ndev->stats.rx_bytes += len; 526 stats->rx_bytes += len; 527 } 528 { 529 static const char * const gro_res_str[] = { 530 [GRO_MERGED] = "GRO_MERGED", 531 [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 532 [GRO_HELD] = "GRO_HELD", 533 [GRO_NORMAL] = "GRO_NORMAL", 534 [GRO_DROP] = "GRO_DROP", 535 }; 536 wil_dbg_txrx(wil, "Rx complete %d bytes => %s,\n", 537 len, gro_res_str[rc]); 538 } 539 } 540 541 /** 542 * Proceed all completed skb's from Rx VRING 543 * 544 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled 545 */ 546 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 547 { 548 struct net_device *ndev = wil_to_ndev(wil); 549 struct vring *v = &wil->vring_rx; 550 struct sk_buff *skb; 551 552 if (!v->va) { 553 wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); 554 return; 555 } 556 wil_dbg_txrx(wil, "%s()\n", __func__); 557 while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { 558 (*quota)--; 559 560 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 561 skb->dev = ndev; 562 skb_reset_mac_header(skb); 563 skb->ip_summed = CHECKSUM_UNNECESSARY; 564 skb->pkt_type = PACKET_OTHERHOST; 565 skb->protocol = htons(ETH_P_802_2); 566 wil_netif_rx_any(skb, ndev); 567 } else { 568 struct ethhdr *eth = (void *)skb->data; 569 570 skb->protocol = eth_type_trans(skb, ndev); 571 572 if (is_unicast_ether_addr(eth->h_dest)) 573 wil_rx_reorder(wil, skb); 574 else 575 wil_netif_rx_any(skb, ndev); 576 } 577 578 } 579 wil_rx_refill(wil, v->size); 580 } 581 582 int wil_rx_init(struct wil6210_priv *wil) 583 { 584 struct vring *vring = &wil->vring_rx; 585 int rc; 586 587 if (vring->va) { 588 wil_err(wil, "Rx ring already allocated\n"); 589 return -EINVAL; 590 } 591 592 vring->size = WIL6210_RX_RING_SIZE; 593 rc = wil_vring_alloc(wil, vring); 594 if (rc) 595 return rc; 596 597 rc = wmi_rx_chain_add(wil, vring); 598 if (rc) 599 goto err_free; 600 601 rc = wil_rx_refill(wil, vring->size); 602 if (rc) 603 goto err_free; 604 605 return 0; 606 err_free: 607 wil_vring_free(wil, vring, 0); 608 609 return rc; 610 } 611 612 void wil_rx_fini(struct wil6210_priv *wil) 613 { 614 struct vring *vring = &wil->vring_rx; 615 616 if (vring->va) 617 wil_vring_free(wil, vring, 0); 618 } 619 620 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 621 int cid, int tid) 622 { 623 int rc; 624 struct wmi_vring_cfg_cmd cmd = { 625 .action = cpu_to_le32(WMI_VRING_CMD_ADD), 626 .vring_cfg = { 627 .tx_sw_ring = { 628 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN), 629 .ring_size = cpu_to_le16(size), 630 }, 631 .ringid = id, 632 .cidxtid = mk_cidxtid(cid, tid), 633 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 634 .mac_ctrl = 0, 635 .to_resolution = 0, 636 .agg_max_wsize = 16, 637 .schd_params = { 638 .priority = cpu_to_le16(0), 639 .timeslot_us = cpu_to_le16(0xfff), 640 }, 641 }, 642 }; 643 struct { 644 struct wil6210_mbox_hdr_wmi wmi; 645 struct wmi_vring_cfg_done_event cmd; 646 } __packed reply; 647 struct vring *vring = &wil->vring_tx[id]; 648 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 649 650 if (vring->va) { 651 wil_err(wil, "Tx ring [%d] already allocated\n", id); 652 rc = -EINVAL; 653 goto out; 654 } 655 656 memset(txdata, 0, sizeof(*txdata)); 657 vring->size = size; 658 rc = wil_vring_alloc(wil, vring); 659 if (rc) 660 goto out; 661 662 wil->vring2cid_tid[id][0] = cid; 663 wil->vring2cid_tid[id][1] = tid; 664 665 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 666 667 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), 668 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 669 if (rc) 670 goto out_free; 671 672 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 673 wil_err(wil, "Tx config failed, status 0x%02x\n", 674 reply.cmd.status); 675 rc = -EINVAL; 676 goto out_free; 677 } 678 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 679 680 txdata->enabled = 1; 681 682 return 0; 683 out_free: 684 wil_vring_free(wil, vring, 1); 685 out: 686 687 return rc; 688 } 689 690 void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 691 { 692 struct vring *vring = &wil->vring_tx[id]; 693 694 WARN_ON(!mutex_is_locked(&wil->mutex)); 695 696 if (!vring->va) 697 return; 698 699 /* make sure NAPI won't touch this vring */ 700 wil->vring_tx_data[id].enabled = 0; 701 if (test_bit(wil_status_napi_en, &wil->status)) 702 napi_synchronize(&wil->napi_tx); 703 704 wil_vring_free(wil, vring, 1); 705 } 706 707 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 708 struct sk_buff *skb) 709 { 710 int i; 711 struct ethhdr *eth = (void *)skb->data; 712 int cid = wil_find_cid(wil, eth->h_dest); 713 714 if (cid < 0) 715 return NULL; 716 717 if (!wil->sta[cid].data_port_open && 718 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 719 return NULL; 720 721 /* TODO: fix for multiple TID */ 722 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 723 if (wil->vring2cid_tid[i][0] == cid) { 724 struct vring *v = &wil->vring_tx[i]; 725 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", 726 __func__, eth->h_dest, i); 727 if (v->va) { 728 return v; 729 } else { 730 wil_dbg_txrx(wil, "vring[%d] not valid\n", i); 731 return NULL; 732 } 733 } 734 } 735 736 return NULL; 737 } 738 739 static void wil_set_da_for_vring(struct wil6210_priv *wil, 740 struct sk_buff *skb, int vring_index) 741 { 742 struct ethhdr *eth = (void *)skb->data; 743 int cid = wil->vring2cid_tid[vring_index][0]; 744 memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN); 745 } 746 747 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 748 struct sk_buff *skb); 749 /* 750 * Find 1-st vring and return it; set dest address for this vring in skb 751 * duplicate skb and send it to other active vrings 752 */ 753 static struct vring *wil_tx_bcast(struct wil6210_priv *wil, 754 struct sk_buff *skb) 755 { 756 struct vring *v, *v2; 757 struct sk_buff *skb2; 758 int i; 759 u8 cid; 760 761 /* find 1-st vring eligible for data */ 762 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 763 v = &wil->vring_tx[i]; 764 if (!v->va) 765 continue; 766 767 cid = wil->vring2cid_tid[i][0]; 768 if (!wil->sta[cid].data_port_open) 769 continue; 770 771 goto found; 772 } 773 774 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 775 776 return NULL; 777 778 found: 779 wil_dbg_txrx(wil, "BCAST -> ring %d\n", i); 780 wil_set_da_for_vring(wil, skb, i); 781 782 /* find other active vrings and duplicate skb for each */ 783 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 784 v2 = &wil->vring_tx[i]; 785 if (!v2->va) 786 continue; 787 cid = wil->vring2cid_tid[i][0]; 788 if (!wil->sta[cid].data_port_open) 789 continue; 790 791 skb2 = skb_copy(skb, GFP_ATOMIC); 792 if (skb2) { 793 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 794 wil_set_da_for_vring(wil, skb2, i); 795 wil_tx_vring(wil, v2, skb2); 796 } else { 797 wil_err(wil, "skb_copy failed\n"); 798 } 799 } 800 801 return v; 802 } 803 804 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 805 int vring_index) 806 { 807 wil_desc_addr_set(&d->dma.addr, pa); 808 d->dma.ip_length = 0; 809 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ 810 d->dma.b11 = 0/*14 | BIT(7)*/; 811 d->dma.error = 0; 812 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ 813 d->dma.length = cpu_to_le16((u16)len); 814 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS); 815 d->mac.d[0] = 0; 816 d->mac.d[1] = 0; 817 d->mac.d[2] = 0; 818 d->mac.ucode_cmd = 0; 819 /* use dst index 0 */ 820 d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) | 821 (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS); 822 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 823 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 824 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 825 826 return 0; 827 } 828 829 static inline 830 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 831 { 832 d->mac.d[2] |= ((nr_frags + 1) << 833 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); 834 } 835 836 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, 837 struct vring_tx_desc *d, 838 struct sk_buff *skb) 839 { 840 int protocol; 841 842 if (skb->ip_summed != CHECKSUM_PARTIAL) 843 return 0; 844 845 d->dma.b11 = ETH_HLEN; /* MAC header length */ 846 847 switch (skb->protocol) { 848 case cpu_to_be16(ETH_P_IP): 849 protocol = ip_hdr(skb)->protocol; 850 d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS); 851 break; 852 case cpu_to_be16(ETH_P_IPV6): 853 protocol = ipv6_hdr(skb)->nexthdr; 854 break; 855 default: 856 return -EINVAL; 857 } 858 859 switch (protocol) { 860 case IPPROTO_TCP: 861 d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); 862 /* L4 header len: TCP header length */ 863 d->dma.d0 |= 864 (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 865 break; 866 case IPPROTO_UDP: 867 /* L4 header len: UDP header length */ 868 d->dma.d0 |= 869 (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); 870 break; 871 default: 872 return -EINVAL; 873 } 874 875 d->dma.ip_length = skb_network_header_len(skb); 876 /* Enable TCP/UDP checksum */ 877 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); 878 /* Calculate pseudo-header */ 879 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); 880 881 return 0; 882 } 883 884 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 885 struct sk_buff *skb) 886 { 887 struct device *dev = wil_to_dev(wil); 888 struct vring_tx_desc dd, *d = ⅆ 889 volatile struct vring_tx_desc *_d; 890 u32 swhead = vring->swhead; 891 int avail = wil_vring_avail_tx(vring); 892 int nr_frags = skb_shinfo(skb)->nr_frags; 893 uint f = 0; 894 int vring_index = vring - wil->vring_tx; 895 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 896 uint i = swhead; 897 dma_addr_t pa; 898 899 wil_dbg_txrx(wil, "%s()\n", __func__); 900 901 if (avail < 1 + nr_frags) { 902 wil_err(wil, "Tx ring full. No space for %d fragments\n", 903 1 + nr_frags); 904 return -ENOMEM; 905 } 906 _d = &(vring->va[i].tx); 907 908 pa = dma_map_single(dev, skb->data, 909 skb_headlen(skb), DMA_TO_DEVICE); 910 911 wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb), 912 skb->data, &pa); 913 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 914 skb->data, skb_headlen(skb), false); 915 916 if (unlikely(dma_mapping_error(dev, pa))) 917 return -EINVAL; 918 vring->ctx[i].mapped_as = wil_mapped_as_single; 919 /* 1-st segment */ 920 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 921 /* Process TCP/UDP checksum offloading */ 922 if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { 923 wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n", 924 vring_index); 925 goto dma_error; 926 } 927 928 vring->ctx[i].nr_frags = nr_frags; 929 wil_tx_desc_set_nr_frags(d, nr_frags); 930 if (nr_frags) 931 *_d = *d; 932 933 /* middle segments */ 934 for (; f < nr_frags; f++) { 935 const struct skb_frag_struct *frag = 936 &skb_shinfo(skb)->frags[f]; 937 int len = skb_frag_size(frag); 938 i = (swhead + f + 1) % vring->size; 939 _d = &(vring->va[i].tx); 940 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 941 DMA_TO_DEVICE); 942 if (unlikely(dma_mapping_error(dev, pa))) 943 goto dma_error; 944 vring->ctx[i].mapped_as = wil_mapped_as_page; 945 wil_tx_desc_map(d, pa, len, vring_index); 946 /* no need to check return code - 947 * if it succeeded for 1-st descriptor, 948 * it will succeed here too 949 */ 950 wil_tx_desc_offload_cksum_set(wil, d, skb); 951 *_d = *d; 952 } 953 /* for the last seg only */ 954 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 955 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 956 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 957 *_d = *d; 958 959 /* hold reference to skb 960 * to prevent skb release before accounting 961 * in case of immediate "tx done" 962 */ 963 vring->ctx[i].skb = skb_get(skb); 964 965 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, 966 (const void *)d, sizeof(*d), false); 967 968 if (wil_vring_is_empty(vring)) /* performance monitoring */ 969 txdata->idle += get_cycles() - txdata->last_idle; 970 971 /* advance swhead */ 972 wil_vring_advance_head(vring, nr_frags + 1); 973 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); 974 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 975 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); 976 977 return 0; 978 dma_error: 979 /* unmap what we have mapped */ 980 nr_frags = f + 1; /* frags mapped + one for skb head */ 981 for (f = 0; f < nr_frags; f++) { 982 struct wil_ctx *ctx; 983 984 i = (swhead + f) % vring->size; 985 ctx = &vring->ctx[i]; 986 _d = &(vring->va[i].tx); 987 *d = *_d; 988 _d->dma.status = TX_DMA_STATUS_DU; 989 wil_txdesc_unmap(dev, d, ctx); 990 991 if (ctx->skb) 992 dev_kfree_skb_any(ctx->skb); 993 994 memset(ctx, 0, sizeof(*ctx)); 995 } 996 997 return -EINVAL; 998 } 999 1000 1001 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1002 { 1003 struct wil6210_priv *wil = ndev_to_wil(ndev); 1004 struct ethhdr *eth = (void *)skb->data; 1005 struct vring *vring; 1006 static bool pr_once_fw; 1007 int rc; 1008 1009 wil_dbg_txrx(wil, "%s()\n", __func__); 1010 if (!test_bit(wil_status_fwready, &wil->status)) { 1011 if (!pr_once_fw) { 1012 wil_err(wil, "FW not ready\n"); 1013 pr_once_fw = true; 1014 } 1015 goto drop; 1016 } 1017 if (!test_bit(wil_status_fwconnected, &wil->status)) { 1018 wil_err(wil, "FW not connected\n"); 1019 goto drop; 1020 } 1021 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 1022 wil_err(wil, "Xmit in monitor mode not supported\n"); 1023 goto drop; 1024 } 1025 pr_once_fw = false; 1026 1027 /* find vring */ 1028 if (is_unicast_ether_addr(eth->h_dest)) { 1029 vring = wil_find_tx_vring(wil, skb); 1030 } else { 1031 vring = wil_tx_bcast(wil, skb); 1032 } 1033 if (!vring) { 1034 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); 1035 goto drop; 1036 } 1037 /* set up vring entry */ 1038 rc = wil_tx_vring(wil, vring, skb); 1039 1040 /* do we still have enough room in the vring? */ 1041 if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) { 1042 netif_tx_stop_all_queues(wil_to_ndev(wil)); 1043 wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); 1044 } 1045 1046 switch (rc) { 1047 case 0: 1048 /* statistics will be updated on the tx_complete */ 1049 dev_kfree_skb_any(skb); 1050 return NETDEV_TX_OK; 1051 case -ENOMEM: 1052 return NETDEV_TX_BUSY; 1053 default: 1054 break; /* goto drop; */ 1055 } 1056 drop: 1057 ndev->stats.tx_dropped++; 1058 dev_kfree_skb_any(skb); 1059 1060 return NET_XMIT_DROP; 1061 } 1062 1063 /** 1064 * Clean up transmitted skb's from the Tx VRING 1065 * 1066 * Return number of descriptors cleared 1067 * 1068 * Safe to call from IRQ 1069 */ 1070 int wil_tx_complete(struct wil6210_priv *wil, int ringid) 1071 { 1072 struct net_device *ndev = wil_to_ndev(wil); 1073 struct device *dev = wil_to_dev(wil); 1074 struct vring *vring = &wil->vring_tx[ringid]; 1075 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 1076 int done = 0; 1077 int cid = wil->vring2cid_tid[ringid][0]; 1078 struct wil_net_stats *stats = &wil->sta[cid].stats; 1079 volatile struct vring_tx_desc *_d; 1080 1081 if (!vring->va) { 1082 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); 1083 return 0; 1084 } 1085 1086 if (!txdata->enabled) { 1087 wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); 1088 return 0; 1089 } 1090 1091 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1092 1093 while (!wil_vring_is_empty(vring)) { 1094 int new_swtail; 1095 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1096 /** 1097 * For the fragmented skb, HW will set DU bit only for the 1098 * last fragment. look for it 1099 */ 1100 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 1101 /* TODO: check we are not past head */ 1102 1103 _d = &vring->va[lf].tx; 1104 if (!(_d->dma.status & TX_DMA_STATUS_DU)) 1105 break; 1106 1107 new_swtail = (lf + 1) % vring->size; 1108 while (vring->swtail != new_swtail) { 1109 struct vring_tx_desc dd, *d = ⅆ 1110 u16 dmalen; 1111 struct sk_buff *skb; 1112 1113 ctx = &vring->ctx[vring->swtail]; 1114 skb = ctx->skb; 1115 _d = &vring->va[vring->swtail].tx; 1116 1117 *d = *_d; 1118 1119 dmalen = le16_to_cpu(d->dma.length); 1120 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1121 d->dma.error); 1122 wil_dbg_txrx(wil, 1123 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1124 vring->swtail, dmalen, d->dma.status, 1125 d->dma.error); 1126 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 1127 (const void *)d, sizeof(*d), false); 1128 1129 wil_txdesc_unmap(dev, d, ctx); 1130 1131 if (skb) { 1132 if (d->dma.error == 0) { 1133 ndev->stats.tx_packets++; 1134 stats->tx_packets++; 1135 ndev->stats.tx_bytes += skb->len; 1136 stats->tx_bytes += skb->len; 1137 } else { 1138 ndev->stats.tx_errors++; 1139 stats->tx_errors++; 1140 } 1141 1142 dev_kfree_skb_any(skb); 1143 } 1144 memset(ctx, 0, sizeof(*ctx)); 1145 /* There is no need to touch HW descriptor: 1146 * - ststus bit TX_DMA_STATUS_DU is set by design, 1147 * so hardware will not try to process this desc., 1148 * - rest of descriptor will be initialized on Tx. 1149 */ 1150 vring->swtail = wil_vring_next_tail(vring); 1151 done++; 1152 } 1153 } 1154 1155 if (wil_vring_is_empty(vring)) { /* performance monitoring */ 1156 wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid); 1157 txdata->last_idle = get_cycles(); 1158 } 1159 1160 if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) { 1161 wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n"); 1162 netif_tx_wake_all_queues(wil_to_ndev(wil)); 1163 } 1164 1165 return done; 1166 } 1167