1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include "opt_inet.h" 34 #include <dev/mlx4/cq.h> 35 #include <linux/slab.h> 36 #include <dev/mlx4/qp.h> 37 #include <linux/if_ether.h> 38 #include <linux/if_vlan.h> 39 #include <linux/vmalloc.h> 40 #include <dev/mlx4/driver.h> 41 #ifdef CONFIG_NET_RX_BUSY_POLL 42 #include <net/busy_poll.h> 43 #endif 44 45 #include "en.h" 46 47 48 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 49 struct mlx4_en_rx_ring *ring, 50 int index) 51 { 52 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *) 53 (ring->buf + (ring->stride * index)); 54 int possible_frags; 55 int i; 56 57 /* Set size and memtype fields */ 58 rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN); 59 rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key); 60 61 /* 62 * If the number of used fragments does not fill up the ring 63 * stride, remaining (unused) fragments must be padded with 64 * null address/size and a special memory key: 65 */ 66 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; 67 for (i = 1; i < possible_frags; i++) { 68 rx_desc->data[i].byte_count = 0; 69 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); 70 rx_desc->data[i].addr = 0; 71 } 72 } 73 74 static int 75 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, 76 __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list) 77 { 78 bus_dma_segment_t segs[1]; 79 bus_dmamap_t map; 80 struct mbuf *mb; 81 int nsegs; 82 int err; 83 84 /* try to allocate a new spare mbuf */ 85 if (unlikely(ring->spare.mbuf == NULL)) { 86 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size); 87 if (unlikely(mb == NULL)) 88 return (-ENOMEM); 89 /* setup correct length */ 90 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size; 91 92 /* make sure IP header gets aligned */ 93 m_adj(mb, MLX4_NET_IP_ALIGN); 94 95 /* load spare mbuf into BUSDMA */ 96 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map, 97 mb, segs, &nsegs, BUS_DMA_NOWAIT); 98 if (unlikely(err != 0)) { 99 m_freem(mb); 100 return (err); 101 } 102 103 /* store spare info */ 104 ring->spare.mbuf = mb; 105 ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr); 106 107 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, 108 BUS_DMASYNC_PREREAD); 109 } 110 111 /* synchronize and unload the current mbuf, if any */ 112 if (likely(mb_list->mbuf != NULL)) { 113 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, 114 BUS_DMASYNC_POSTREAD); 115 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map); 116 } 117 118 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size); 119 if (unlikely(mb == NULL)) 120 goto use_spare; 121 122 /* setup correct length */ 123 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size; 124 125 /* make sure IP header gets aligned */ 126 m_adj(mb, MLX4_NET_IP_ALIGN); 127 128 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map, 129 mb, segs, &nsegs, BUS_DMA_NOWAIT); 130 if (unlikely(err != 0)) { 131 m_freem(mb); 132 goto use_spare; 133 } 134 135 *pdma = cpu_to_be64(segs[0].ds_addr); 136 mb_list->mbuf = mb; 137 138 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD); 139 return (0); 140 141 use_spare: 142 /* swap DMA maps */ 143 map = mb_list->dma_map; 144 mb_list->dma_map = ring->spare.dma_map; 145 ring->spare.dma_map = map; 146 147 /* swap MBUFs */ 148 mb_list->mbuf = ring->spare.mbuf; 149 ring->spare.mbuf = NULL; 150 151 /* store physical address */ 152 *pdma = ring->spare.paddr_be; 153 return (0); 154 } 155 156 static void 157 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list) 158 { 159 bus_dmamap_t map = mb_list->dma_map; 160 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD); 161 bus_dmamap_unload(ring->dma_tag, map); 162 m_freem(mb_list->mbuf); 163 mb_list->mbuf = NULL; /* safety clearing */ 164 } 165 166 static int 167 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 168 struct mlx4_en_rx_ring *ring, int index) 169 { 170 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *) 171 (ring->buf + (index * ring->stride)); 172 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index; 173 174 mb_list->mbuf = NULL; 175 176 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) { 177 priv->port_stats.rx_alloc_failed++; 178 return (-ENOMEM); 179 } 180 return (0); 181 } 182 183 static inline void 184 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 185 { 186 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 187 } 188 189 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 190 { 191 struct mlx4_en_rx_ring *ring; 192 int ring_ind; 193 int buf_ind; 194 int new_size; 195 int err; 196 197 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 198 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 199 ring = priv->rx_ring[ring_ind]; 200 201 err = mlx4_en_prepare_rx_desc(priv, ring, 202 ring->actual_size); 203 if (err) { 204 if (ring->actual_size == 0) { 205 en_err(priv, "Failed to allocate " 206 "enough rx buffers\n"); 207 return -ENOMEM; 208 } else { 209 new_size = 210 rounddown_pow_of_two(ring->actual_size); 211 en_warn(priv, "Only %d buffers allocated " 212 "reducing ring size to %d\n", 213 ring->actual_size, new_size); 214 goto reduce_rings; 215 } 216 } 217 ring->actual_size++; 218 ring->prod++; 219 } 220 } 221 return 0; 222 223 reduce_rings: 224 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 225 ring = priv->rx_ring[ring_ind]; 226 while (ring->actual_size > new_size) { 227 ring->actual_size--; 228 ring->prod--; 229 mlx4_en_free_buf(ring, 230 ring->mbuf + ring->actual_size); 231 } 232 } 233 234 return 0; 235 } 236 237 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 238 struct mlx4_en_rx_ring *ring) 239 { 240 int index; 241 242 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 243 ring->cons, ring->prod); 244 245 /* Unmap and free Rx buffers */ 246 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 247 while (ring->cons != ring->prod) { 248 index = ring->cons & ring->size_mask; 249 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 250 mlx4_en_free_buf(ring, ring->mbuf + index); 251 ++ring->cons; 252 } 253 } 254 255 void mlx4_en_calc_rx_buf(struct net_device *dev) 256 { 257 struct mlx4_en_priv *priv = netdev_priv(dev); 258 int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + 259 MLX4_NET_IP_ALIGN; 260 261 if (eff_mtu > MJUM16BYTES) { 262 en_err(priv, "MTU(%d) is too big\n", dev->if_mtu); 263 eff_mtu = MJUM16BYTES; 264 } else if (eff_mtu > MJUM9BYTES) { 265 eff_mtu = MJUM16BYTES; 266 } else if (eff_mtu > MJUMPAGESIZE) { 267 eff_mtu = MJUM9BYTES; 268 } else if (eff_mtu > MCLBYTES) { 269 eff_mtu = MJUMPAGESIZE; 270 } else { 271 eff_mtu = MCLBYTES; 272 } 273 274 priv->rx_mb_size = eff_mtu; 275 276 en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu); 277 } 278 279 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 280 struct mlx4_en_rx_ring **pring, 281 u32 size, int node) 282 { 283 struct mlx4_en_dev *mdev = priv->mdev; 284 struct mlx4_en_rx_ring *ring; 285 int err; 286 int tmp; 287 uint32_t x; 288 289 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL); 290 if (!ring) { 291 en_err(priv, "Failed to allocate RX ring structure\n"); 292 return -ENOMEM; 293 } 294 295 /* Create DMA descriptor TAG */ 296 if ((err = -bus_dma_tag_create( 297 bus_get_dma_tag(mdev->pdev->dev.bsddev), 298 1, /* any alignment */ 299 0, /* no boundary */ 300 BUS_SPACE_MAXADDR, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 MJUM16BYTES, /* maxsize */ 304 1, /* nsegments */ 305 MJUM16BYTES, /* maxsegsize */ 306 0, /* flags */ 307 NULL, NULL, /* lockfunc, lockfuncarg */ 308 &ring->dma_tag))) { 309 en_err(priv, "Failed to create DMA tag\n"); 310 goto err_ring; 311 } 312 313 ring->prod = 0; 314 ring->cons = 0; 315 ring->size = size; 316 ring->size_mask = size - 1; 317 ring->stride = roundup_pow_of_two( 318 sizeof(struct mlx4_en_rx_desc) + DS_SIZE); 319 ring->log_stride = ffs(ring->stride) - 1; 320 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 321 322 tmp = size * sizeof(struct mlx4_en_rx_mbuf); 323 324 ring->mbuf = kzalloc(tmp, GFP_KERNEL); 325 if (ring->mbuf == NULL) { 326 err = -ENOMEM; 327 goto err_dma_tag; 328 } 329 330 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map); 331 if (err != 0) 332 goto err_info; 333 334 for (x = 0; x != size; x++) { 335 err = -bus_dmamap_create(ring->dma_tag, 0, 336 &ring->mbuf[x].dma_map); 337 if (err != 0) { 338 while (x--) 339 bus_dmamap_destroy(ring->dma_tag, 340 ring->mbuf[x].dma_map); 341 goto err_info; 342 } 343 } 344 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n", 345 ring->mbuf, tmp); 346 347 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 348 ring->buf_size, 2 * PAGE_SIZE); 349 if (err) 350 goto err_dma_map; 351 352 err = mlx4_en_map_buffer(&ring->wqres.buf); 353 if (err) { 354 en_err(priv, "Failed to map RX buffer\n"); 355 goto err_hwq; 356 } 357 ring->buf = ring->wqres.buf.direct.buf; 358 *pring = ring; 359 return 0; 360 361 err_hwq: 362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 363 err_dma_map: 364 for (x = 0; x != size; x++) { 365 bus_dmamap_destroy(ring->dma_tag, 366 ring->mbuf[x].dma_map); 367 } 368 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); 369 err_info: 370 vfree(ring->mbuf); 371 err_dma_tag: 372 bus_dma_tag_destroy(ring->dma_tag); 373 err_ring: 374 kfree(ring); 375 return (err); 376 } 377 378 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) 379 { 380 struct mlx4_en_rx_ring *ring; 381 int i; 382 int ring_ind; 383 int err; 384 int stride = roundup_pow_of_two( 385 sizeof(struct mlx4_en_rx_desc) + DS_SIZE); 386 387 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 388 ring = priv->rx_ring[ring_ind]; 389 390 ring->prod = 0; 391 ring->cons = 0; 392 ring->actual_size = 0; 393 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; 394 ring->rx_mb_size = priv->rx_mb_size; 395 396 ring->stride = stride; 397 if (ring->stride <= TXBB_SIZE) 398 ring->buf += TXBB_SIZE; 399 400 ring->log_stride = ffs(ring->stride) - 1; 401 ring->buf_size = ring->size * ring->stride; 402 403 memset(ring->buf, 0, ring->buf_size); 404 mlx4_en_update_rx_prod_db(ring); 405 406 /* Initialize all descriptors */ 407 for (i = 0; i < ring->size; i++) 408 mlx4_en_init_rx_desc(priv, ring, i); 409 410 #ifdef INET 411 /* Configure lro mngr */ 412 if (priv->dev->if_capenable & IFCAP_LRO) { 413 if (tcp_lro_init(&ring->lro)) 414 priv->dev->if_capenable &= ~IFCAP_LRO; 415 else 416 ring->lro.ifp = priv->dev; 417 } 418 #endif 419 } 420 421 422 err = mlx4_en_fill_rx_buffers(priv); 423 if (err) 424 goto err_buffers; 425 426 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 427 ring = priv->rx_ring[ring_ind]; 428 429 ring->size_mask = ring->actual_size - 1; 430 mlx4_en_update_rx_prod_db(ring); 431 } 432 433 return 0; 434 435 err_buffers: 436 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 437 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); 438 439 ring_ind = priv->rx_ring_num - 1; 440 441 while (ring_ind >= 0) { 442 ring = priv->rx_ring[ring_ind]; 443 if (ring->stride <= TXBB_SIZE) 444 ring->buf -= TXBB_SIZE; 445 ring_ind--; 446 } 447 448 return err; 449 } 450 451 452 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 453 struct mlx4_en_rx_ring **pring, 454 u32 size, u16 stride) 455 { 456 struct mlx4_en_dev *mdev = priv->mdev; 457 struct mlx4_en_rx_ring *ring = *pring; 458 uint32_t x; 459 460 mlx4_en_unmap_buffer(&ring->wqres.buf); 461 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 462 for (x = 0; x != size; x++) 463 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map); 464 /* free spare mbuf, if any */ 465 if (ring->spare.mbuf != NULL) { 466 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, 467 BUS_DMASYNC_POSTREAD); 468 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map); 469 m_freem(ring->spare.mbuf); 470 } 471 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); 472 vfree(ring->mbuf); 473 bus_dma_tag_destroy(ring->dma_tag); 474 kfree(ring); 475 *pring = NULL; 476 #ifdef CONFIG_RFS_ACCEL 477 mlx4_en_cleanup_filters(priv, ring); 478 #endif 479 } 480 481 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 482 struct mlx4_en_rx_ring *ring) 483 { 484 #ifdef INET 485 tcp_lro_free(&ring->lro); 486 #endif 487 mlx4_en_free_rx_buf(priv, ring); 488 if (ring->stride <= TXBB_SIZE) 489 ring->buf -= TXBB_SIZE; 490 } 491 492 493 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb) 494 { 495 int i; 496 int offset = ETHER_HDR_LEN; 497 498 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { 499 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff)) 500 goto out_loopback; 501 } 502 /* Loopback found */ 503 priv->loopback_ok = 1; 504 505 out_loopback: 506 m_freem(mb); 507 } 508 509 510 static inline int invalid_cqe(struct mlx4_en_priv *priv, 511 struct mlx4_cqe *cqe) 512 { 513 /* Drop packet on bad receive or bad checksum */ 514 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 515 MLX4_CQE_OPCODE_ERROR)) { 516 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", 517 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, 518 ((struct mlx4_err_cqe *)cqe)->syndrome); 519 return 1; 520 } 521 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 522 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 523 return 1; 524 } 525 526 return 0; 527 } 528 529 static struct mbuf * 530 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, 531 struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list, 532 int length) 533 { 534 struct mbuf *mb; 535 536 /* get mbuf */ 537 mb = mb_list->mbuf; 538 539 /* collect used fragment while atomically replacing it */ 540 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) 541 return (NULL); 542 543 /* range check hardware computed value */ 544 if (unlikely(length > mb->m_len)) 545 length = mb->m_len; 546 547 /* update total packet length in packet header */ 548 mb->m_len = mb->m_pkthdr.len = length; 549 return (mb); 550 } 551 552 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B 553 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc) 554 * was added in the beginning of each cqe (the real data is in the corresponding 32B). 555 * The following calc ensures that when factor==1, it means we are aligned to 64B 556 * and we get the real cqe data*/ 557 #define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor) 558 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 559 { 560 struct mlx4_en_priv *priv = netdev_priv(dev); 561 struct mlx4_cqe *cqe; 562 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; 563 struct mlx4_en_rx_mbuf *mb_list; 564 struct mlx4_en_rx_desc *rx_desc; 565 struct mbuf *mb; 566 struct mlx4_cq *mcq = &cq->mcq; 567 struct mlx4_cqe *buf = cq->buf; 568 int index; 569 unsigned int length; 570 int polled = 0; 571 u32 cons_index = mcq->cons_index; 572 u32 size_mask = ring->size_mask; 573 int size = cq->size; 574 int factor = priv->cqe_factor; 575 576 if (!priv->port_up) 577 return 0; 578 579 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 580 * descriptor offset can be deducted from the CQE index instead of 581 * reading 'cqe->index' */ 582 index = cons_index & size_mask; 583 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; 584 585 /* Process all completed CQEs */ 586 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 587 cons_index & size)) { 588 mb_list = ring->mbuf + index; 589 rx_desc = (struct mlx4_en_rx_desc *) 590 (ring->buf + (index << ring->log_stride)); 591 592 /* 593 * make sure we read the CQE after we read the ownership bit 594 */ 595 rmb(); 596 597 if (invalid_cqe(priv, cqe)) { 598 goto next; 599 } 600 /* 601 * Packet is OK - process it. 602 */ 603 length = be32_to_cpu(cqe->byte_cnt); 604 length -= ring->fcs_del; 605 606 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length); 607 if (unlikely(!mb)) { 608 ring->errors++; 609 goto next; 610 } 611 612 ring->bytes += length; 613 ring->packets++; 614 615 if (unlikely(priv->validate_loopback)) { 616 validate_loopback(priv, mb); 617 goto next; 618 } 619 620 /* forward Toeplitz compatible hash value */ 621 mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid); 622 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE_HASH); 623 mb->m_pkthdr.rcvif = dev; 624 if (be32_to_cpu(cqe->vlan_my_qpn) & 625 MLX4_CQE_VLAN_PRESENT_MASK) { 626 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid); 627 mb->m_flags |= M_VLANTAG; 628 } 629 if (likely(dev->if_capenable & 630 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && 631 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 632 (cqe->checksum == cpu_to_be16(0xffff))) { 633 priv->port_stats.rx_chksum_good++; 634 mb->m_pkthdr.csum_flags = 635 CSUM_IP_CHECKED | CSUM_IP_VALID | 636 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 637 mb->m_pkthdr.csum_data = htons(0xffff); 638 /* This packet is eligible for LRO if it is: 639 * - DIX Ethernet (type interpretation) 640 * - TCP/IP (v4) 641 * - without IP options 642 * - not an IP fragment 643 */ 644 #ifdef INET 645 if (mlx4_en_can_lro(cqe->status) && 646 (dev->if_capenable & IFCAP_LRO)) { 647 if (ring->lro.lro_cnt != 0 && 648 tcp_lro_rx(&ring->lro, mb, 0) == 0) 649 goto next; 650 } 651 652 #endif 653 /* LRO not possible, complete processing here */ 654 INC_PERF_COUNTER(priv->pstats.lro_misses); 655 } else { 656 mb->m_pkthdr.csum_flags = 0; 657 priv->port_stats.rx_chksum_none++; 658 } 659 660 /* Push it up the stack */ 661 dev->if_input(dev, mb); 662 663 next: 664 ++cons_index; 665 index = cons_index & size_mask; 666 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; 667 if (++polled == budget) 668 goto out; 669 } 670 /* Flush all pending IP reassembly sessions */ 671 out: 672 #ifdef INET 673 tcp_lro_flush_all(&ring->lro); 674 #endif 675 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 676 mcq->cons_index = cons_index; 677 mlx4_cq_set_ci(mcq); 678 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 679 ring->cons = mcq->cons_index; 680 ring->prod += polled; /* Polled descriptors were realocated in place */ 681 mlx4_en_update_rx_prod_db(ring); 682 return polled; 683 684 } 685 686 /* Rx CQ polling - called by NAPI */ 687 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget) 688 { 689 struct net_device *dev = cq->dev; 690 int done; 691 692 done = mlx4_en_process_rx_cq(dev, cq, budget); 693 cq->tot_rx += done; 694 695 return done; 696 697 } 698 void mlx4_en_rx_irq(struct mlx4_cq *mcq) 699 { 700 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 701 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 702 int done; 703 704 // Shoot one within the irq context 705 // Because there is no NAPI in freeBSD 706 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET); 707 if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) { 708 cq->curr_poll_rx_cpu_id = curcpu; 709 taskqueue_enqueue(cq->tq, &cq->cq_task); 710 } 711 else { 712 mlx4_en_arm_cq(priv, cq); 713 } 714 } 715 716 void mlx4_en_rx_que(void *context, int pending) 717 { 718 struct mlx4_en_cq *cq; 719 struct thread *td; 720 721 cq = context; 722 td = curthread; 723 724 thread_lock(td); 725 sched_bind(td, cq->curr_poll_rx_cpu_id); 726 thread_unlock(td); 727 728 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET) 729 == MLX4_EN_RX_BUDGET); 730 mlx4_en_arm_cq(cq->dev->if_softc, cq); 731 } 732 733 734 /* RSS related functions */ 735 736 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, 737 struct mlx4_en_rx_ring *ring, 738 enum mlx4_qp_state *state, 739 struct mlx4_qp *qp) 740 { 741 struct mlx4_en_dev *mdev = priv->mdev; 742 struct mlx4_qp_context *context; 743 int err = 0; 744 745 context = kmalloc(sizeof *context , GFP_KERNEL); 746 if (!context) { 747 en_err(priv, "Failed to allocate qp context\n"); 748 return -ENOMEM; 749 } 750 751 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 752 if (err) { 753 en_err(priv, "Failed to allocate qp #%x\n", qpn); 754 goto out; 755 } 756 qp->event = mlx4_en_sqp_event; 757 758 memset(context, 0, sizeof *context); 759 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 760 qpn, ring->cqn, -1, context); 761 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 762 763 /* Cancel FCS removal if FW allows */ 764 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { 765 context->param3 |= cpu_to_be32(1 << 29); 766 ring->fcs_del = ETH_FCS_LEN; 767 } else 768 ring->fcs_del = 0; 769 770 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 771 if (err) { 772 mlx4_qp_remove(mdev->dev, qp); 773 mlx4_qp_free(mdev->dev, qp); 774 } 775 mlx4_en_update_rx_prod_db(ring); 776 out: 777 kfree(context); 778 return err; 779 } 780 781 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) 782 { 783 int err; 784 u32 qpn; 785 786 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0); 787 if (err) { 788 en_err(priv, "Failed reserving drop qpn\n"); 789 return err; 790 } 791 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); 792 if (err) { 793 en_err(priv, "Failed allocating drop qp\n"); 794 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 795 return err; 796 } 797 798 return 0; 799 } 800 801 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) 802 { 803 u32 qpn; 804 805 qpn = priv->drop_qp.qpn; 806 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); 807 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); 808 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 809 } 810 811 /* Allocate rx qp's and configure them according to rss map */ 812 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 813 { 814 struct mlx4_en_dev *mdev = priv->mdev; 815 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 816 struct mlx4_qp_context context; 817 struct mlx4_rss_context *rss_context; 818 int rss_rings; 819 void *ptr; 820 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 821 MLX4_RSS_TCP_IPV6); 822 int i; 823 int err = 0; 824 int good_qps = 0; 825 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, 826 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, 827 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; 828 829 en_dbg(DRV, priv, "Configuring rss steering\n"); 830 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, 831 priv->rx_ring_num, 832 &rss_map->base_qpn, 0); 833 if (err) { 834 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); 835 return err; 836 } 837 838 for (i = 0; i < priv->rx_ring_num; i++) { 839 priv->rx_ring[i]->qpn = rss_map->base_qpn + i; 840 err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn, 841 priv->rx_ring[i], 842 &rss_map->state[i], 843 &rss_map->qps[i]); 844 if (err) 845 goto rss_err; 846 847 ++good_qps; 848 } 849 850 /* Configure RSS indirection qp */ 851 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 852 if (err) { 853 en_err(priv, "Failed to allocate RSS indirection QP\n"); 854 goto rss_err; 855 } 856 rss_map->indir_qp.event = mlx4_en_sqp_event; 857 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 858 priv->rx_ring[0]->cqn, -1, &context); 859 860 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 861 rss_rings = priv->rx_ring_num; 862 else 863 rss_rings = priv->prof->rss_rings; 864 865 ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) + 866 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 867 rss_context = ptr; 868 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | 869 (rss_map->base_qpn)); 870 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 871 if (priv->mdev->profile.udp_rss) { 872 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; 873 rss_context->base_qpn_udp = rss_context->default_qpn; 874 } 875 rss_context->flags = rss_mask; 876 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 877 for (i = 0; i < 10; i++) 878 rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); 879 880 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 881 &rss_map->indir_qp, &rss_map->indir_state); 882 if (err) 883 goto indir_err; 884 885 return 0; 886 887 indir_err: 888 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 889 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 890 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 891 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 892 rss_err: 893 for (i = 0; i < good_qps; i++) { 894 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 895 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 896 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 897 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 898 } 899 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 900 return err; 901 } 902 903 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) 904 { 905 struct mlx4_en_dev *mdev = priv->mdev; 906 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 907 int i; 908 909 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 910 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 911 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 912 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 913 914 for (i = 0; i < priv->rx_ring_num; i++) { 915 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 916 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 917 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 918 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 919 } 920 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 921 } 922 923