1 /* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include "opt_inet.h" 34 #include <dev/mlx4/cq.h> 35 #include <linux/slab.h> 36 #include <dev/mlx4/qp.h> 37 #include <linux/if_ether.h> 38 #include <linux/if_vlan.h> 39 #include <linux/vmalloc.h> 40 #include <dev/mlx4/driver.h> 41 #ifdef CONFIG_NET_RX_BUSY_POLL 42 #include <net/busy_poll.h> 43 #endif 44 45 #include "en.h" 46 47 48 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, 49 struct mlx4_en_rx_ring *ring, 50 int index) 51 { 52 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *) 53 (ring->buf + (ring->stride * index)); 54 int possible_frags; 55 int i; 56 57 /* Set size and memtype fields */ 58 rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN); 59 rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key); 60 61 /* 62 * If the number of used fragments does not fill up the ring 63 * stride, remaining (unused) fragments must be padded with 64 * null address/size and a special memory key: 65 */ 66 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; 67 for (i = 1; i < possible_frags; i++) { 68 rx_desc->data[i].byte_count = 0; 69 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); 70 rx_desc->data[i].addr = 0; 71 } 72 } 73 74 static int 75 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, 76 __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list) 77 { 78 bus_dma_segment_t segs[1]; 79 bus_dmamap_t map; 80 struct mbuf *mb; 81 int nsegs; 82 int err; 83 84 /* try to allocate a new spare mbuf */ 85 if (unlikely(ring->spare.mbuf == NULL)) { 86 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size); 87 if (unlikely(mb == NULL)) 88 return (-ENOMEM); 89 /* setup correct length */ 90 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size; 91 92 /* make sure IP header gets aligned */ 93 m_adj(mb, MLX4_NET_IP_ALIGN); 94 95 /* load spare mbuf into BUSDMA */ 96 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map, 97 mb, segs, &nsegs, BUS_DMA_NOWAIT); 98 if (unlikely(err != 0)) { 99 m_freem(mb); 100 return (err); 101 } 102 103 /* store spare info */ 104 ring->spare.mbuf = mb; 105 ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr); 106 107 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, 108 BUS_DMASYNC_PREREAD); 109 } 110 111 /* synchronize and unload the current mbuf, if any */ 112 if (likely(mb_list->mbuf != NULL)) { 113 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, 114 BUS_DMASYNC_POSTREAD); 115 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map); 116 } 117 118 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size); 119 if (unlikely(mb == NULL)) 120 goto use_spare; 121 122 /* setup correct length */ 123 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size; 124 125 /* make sure IP header gets aligned */ 126 m_adj(mb, MLX4_NET_IP_ALIGN); 127 128 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map, 129 mb, segs, &nsegs, BUS_DMA_NOWAIT); 130 if (unlikely(err != 0)) { 131 m_freem(mb); 132 goto use_spare; 133 } 134 135 *pdma = cpu_to_be64(segs[0].ds_addr); 136 mb_list->mbuf = mb; 137 138 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD); 139 return (0); 140 141 use_spare: 142 /* swap DMA maps */ 143 map = mb_list->dma_map; 144 mb_list->dma_map = ring->spare.dma_map; 145 ring->spare.dma_map = map; 146 147 /* swap MBUFs */ 148 mb_list->mbuf = ring->spare.mbuf; 149 ring->spare.mbuf = NULL; 150 151 /* store physical address */ 152 *pdma = ring->spare.paddr_be; 153 return (0); 154 } 155 156 static void 157 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list) 158 { 159 bus_dmamap_t map = mb_list->dma_map; 160 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD); 161 bus_dmamap_unload(ring->dma_tag, map); 162 m_freem(mb_list->mbuf); 163 mb_list->mbuf = NULL; /* safety clearing */ 164 } 165 166 static int 167 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, 168 struct mlx4_en_rx_ring *ring, int index) 169 { 170 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *) 171 (ring->buf + (index * ring->stride)); 172 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index; 173 174 mb_list->mbuf = NULL; 175 176 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) { 177 priv->port_stats.rx_alloc_failed++; 178 return (-ENOMEM); 179 } 180 return (0); 181 } 182 183 static inline void 184 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) 185 { 186 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); 187 } 188 189 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 190 { 191 struct mlx4_en_rx_ring *ring; 192 int ring_ind; 193 int buf_ind; 194 int new_size; 195 int err; 196 197 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 198 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 199 ring = priv->rx_ring[ring_ind]; 200 201 err = mlx4_en_prepare_rx_desc(priv, ring, 202 ring->actual_size); 203 if (err) { 204 if (ring->actual_size == 0) { 205 en_err(priv, "Failed to allocate " 206 "enough rx buffers\n"); 207 return -ENOMEM; 208 } else { 209 new_size = 210 rounddown_pow_of_two(ring->actual_size); 211 en_warn(priv, "Only %d buffers allocated " 212 "reducing ring size to %d\n", 213 ring->actual_size, new_size); 214 goto reduce_rings; 215 } 216 } 217 ring->actual_size++; 218 ring->prod++; 219 } 220 } 221 return 0; 222 223 reduce_rings: 224 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 225 ring = priv->rx_ring[ring_ind]; 226 while (ring->actual_size > new_size) { 227 ring->actual_size--; 228 ring->prod--; 229 mlx4_en_free_buf(ring, 230 ring->mbuf + ring->actual_size); 231 } 232 } 233 234 return 0; 235 } 236 237 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, 238 struct mlx4_en_rx_ring *ring) 239 { 240 int index; 241 242 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 243 ring->cons, ring->prod); 244 245 /* Unmap and free Rx buffers */ 246 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 247 while (ring->cons != ring->prod) { 248 index = ring->cons & ring->size_mask; 249 en_dbg(DRV, priv, "Processing descriptor:%d\n", index); 250 mlx4_en_free_buf(ring, ring->mbuf + index); 251 ++ring->cons; 252 } 253 } 254 255 void mlx4_en_calc_rx_buf(struct net_device *dev) 256 { 257 struct mlx4_en_priv *priv = netdev_priv(dev); 258 int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + 259 MLX4_NET_IP_ALIGN; 260 261 if (eff_mtu > MJUM16BYTES) { 262 en_err(priv, "MTU(%d) is too big\n", dev->if_mtu); 263 eff_mtu = MJUM16BYTES; 264 } else if (eff_mtu > MJUM9BYTES) { 265 eff_mtu = MJUM16BYTES; 266 } else if (eff_mtu > MJUMPAGESIZE) { 267 eff_mtu = MJUM9BYTES; 268 } else if (eff_mtu > MCLBYTES) { 269 eff_mtu = MJUMPAGESIZE; 270 } else { 271 eff_mtu = MCLBYTES; 272 } 273 274 priv->rx_mb_size = eff_mtu; 275 276 en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu); 277 } 278 279 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 280 struct mlx4_en_rx_ring **pring, 281 u32 size, int node) 282 { 283 struct mlx4_en_dev *mdev = priv->mdev; 284 struct mlx4_en_rx_ring *ring; 285 int err; 286 int tmp; 287 uint32_t x; 288 289 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL); 290 if (!ring) { 291 en_err(priv, "Failed to allocate RX ring structure\n"); 292 return -ENOMEM; 293 } 294 295 /* Create DMA descriptor TAG */ 296 if ((err = -bus_dma_tag_create( 297 bus_get_dma_tag(mdev->pdev->dev.bsddev), 298 1, /* any alignment */ 299 0, /* no boundary */ 300 BUS_SPACE_MAXADDR, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 MJUM16BYTES, /* maxsize */ 304 1, /* nsegments */ 305 MJUM16BYTES, /* maxsegsize */ 306 0, /* flags */ 307 NULL, NULL, /* lockfunc, lockfuncarg */ 308 &ring->dma_tag))) { 309 en_err(priv, "Failed to create DMA tag\n"); 310 goto err_ring; 311 } 312 313 ring->prod = 0; 314 ring->cons = 0; 315 ring->size = size; 316 ring->size_mask = size - 1; 317 ring->stride = roundup_pow_of_two( 318 sizeof(struct mlx4_en_rx_desc) + DS_SIZE); 319 ring->log_stride = ffs(ring->stride) - 1; 320 ring->buf_size = ring->size * ring->stride + TXBB_SIZE; 321 322 tmp = size * sizeof(struct mlx4_en_rx_mbuf); 323 324 ring->mbuf = kzalloc(tmp, GFP_KERNEL); 325 if (ring->mbuf == NULL) { 326 err = -ENOMEM; 327 goto err_dma_tag; 328 } 329 330 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map); 331 if (err != 0) 332 goto err_info; 333 334 for (x = 0; x != size; x++) { 335 err = -bus_dmamap_create(ring->dma_tag, 0, 336 &ring->mbuf[x].dma_map); 337 if (err != 0) { 338 while (x--) 339 bus_dmamap_destroy(ring->dma_tag, 340 ring->mbuf[x].dma_map); 341 goto err_info; 342 } 343 } 344 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n", 345 ring->mbuf, tmp); 346 347 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 348 ring->buf_size, 2 * PAGE_SIZE); 349 if (err) 350 goto err_dma_map; 351 352 err = mlx4_en_map_buffer(&ring->wqres.buf); 353 if (err) { 354 en_err(priv, "Failed to map RX buffer\n"); 355 goto err_hwq; 356 } 357 ring->buf = ring->wqres.buf.direct.buf; 358 *pring = ring; 359 return 0; 360 361 err_hwq: 362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 363 err_dma_map: 364 for (x = 0; x != size; x++) { 365 bus_dmamap_destroy(ring->dma_tag, 366 ring->mbuf[x].dma_map); 367 } 368 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); 369 err_info: 370 vfree(ring->mbuf); 371 err_dma_tag: 372 bus_dma_tag_destroy(ring->dma_tag); 373 err_ring: 374 kfree(ring); 375 return (err); 376 } 377 378 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) 379 { 380 struct mlx4_en_rx_ring *ring; 381 int i; 382 int ring_ind; 383 int err; 384 int stride = roundup_pow_of_two( 385 sizeof(struct mlx4_en_rx_desc) + DS_SIZE); 386 387 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 388 ring = priv->rx_ring[ring_ind]; 389 390 ring->prod = 0; 391 ring->cons = 0; 392 ring->actual_size = 0; 393 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; 394 ring->rx_mb_size = priv->rx_mb_size; 395 396 ring->stride = stride; 397 if (ring->stride <= TXBB_SIZE) { 398 /* Stamp first unused send wqe */ 399 __be32 *ptr = (__be32 *)ring->buf; 400 __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); 401 *ptr = stamp; 402 /* Move pointer to start of rx section */ 403 ring->buf += TXBB_SIZE; 404 } 405 406 ring->log_stride = ffs(ring->stride) - 1; 407 ring->buf_size = ring->size * ring->stride; 408 409 memset(ring->buf, 0, ring->buf_size); 410 mlx4_en_update_rx_prod_db(ring); 411 412 /* Initialize all descriptors */ 413 for (i = 0; i < ring->size; i++) 414 mlx4_en_init_rx_desc(priv, ring, i); 415 416 #ifdef INET 417 /* Configure lro mngr */ 418 if (priv->dev->if_capenable & IFCAP_LRO) { 419 if (tcp_lro_init(&ring->lro)) 420 priv->dev->if_capenable &= ~IFCAP_LRO; 421 else 422 ring->lro.ifp = priv->dev; 423 } 424 #endif 425 } 426 427 428 err = mlx4_en_fill_rx_buffers(priv); 429 if (err) 430 goto err_buffers; 431 432 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 433 ring = priv->rx_ring[ring_ind]; 434 435 ring->size_mask = ring->actual_size - 1; 436 mlx4_en_update_rx_prod_db(ring); 437 } 438 439 return 0; 440 441 err_buffers: 442 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 443 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); 444 445 ring_ind = priv->rx_ring_num - 1; 446 447 while (ring_ind >= 0) { 448 ring = priv->rx_ring[ring_ind]; 449 if (ring->stride <= TXBB_SIZE) 450 ring->buf -= TXBB_SIZE; 451 ring_ind--; 452 } 453 454 return err; 455 } 456 457 458 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 459 struct mlx4_en_rx_ring **pring, 460 u32 size, u16 stride) 461 { 462 struct mlx4_en_dev *mdev = priv->mdev; 463 struct mlx4_en_rx_ring *ring = *pring; 464 uint32_t x; 465 466 mlx4_en_unmap_buffer(&ring->wqres.buf); 467 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 468 for (x = 0; x != size; x++) 469 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map); 470 /* free spare mbuf, if any */ 471 if (ring->spare.mbuf != NULL) { 472 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map, 473 BUS_DMASYNC_POSTREAD); 474 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map); 475 m_freem(ring->spare.mbuf); 476 } 477 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map); 478 vfree(ring->mbuf); 479 bus_dma_tag_destroy(ring->dma_tag); 480 kfree(ring); 481 *pring = NULL; 482 #ifdef CONFIG_RFS_ACCEL 483 mlx4_en_cleanup_filters(priv, ring); 484 #endif 485 } 486 487 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 488 struct mlx4_en_rx_ring *ring) 489 { 490 #ifdef INET 491 tcp_lro_free(&ring->lro); 492 #endif 493 mlx4_en_free_rx_buf(priv, ring); 494 if (ring->stride <= TXBB_SIZE) 495 ring->buf -= TXBB_SIZE; 496 } 497 498 499 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb) 500 { 501 int i; 502 int offset = ETHER_HDR_LEN; 503 504 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { 505 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff)) 506 goto out_loopback; 507 } 508 /* Loopback found */ 509 priv->loopback_ok = 1; 510 511 out_loopback: 512 m_freem(mb); 513 } 514 515 516 static inline int invalid_cqe(struct mlx4_en_priv *priv, 517 struct mlx4_cqe *cqe) 518 { 519 /* Drop packet on bad receive or bad checksum */ 520 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 521 MLX4_CQE_OPCODE_ERROR)) { 522 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", 523 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, 524 ((struct mlx4_err_cqe *)cqe)->syndrome); 525 return 1; 526 } 527 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 528 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 529 return 1; 530 } 531 532 return 0; 533 } 534 535 static struct mbuf * 536 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, 537 struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list, 538 int length) 539 { 540 struct mbuf *mb; 541 542 /* get mbuf */ 543 mb = mb_list->mbuf; 544 545 /* collect used fragment while atomically replacing it */ 546 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) 547 return (NULL); 548 549 /* range check hardware computed value */ 550 if (unlikely(length > mb->m_len)) 551 length = mb->m_len; 552 553 /* update total packet length in packet header */ 554 mb->m_len = mb->m_pkthdr.len = length; 555 return (mb); 556 } 557 558 static __inline int 559 mlx4_en_rss_hash(__be16 status, int udp_rss) 560 { 561 enum { 562 status_all = cpu_to_be16( 563 MLX4_CQE_STATUS_IPV4 | 564 MLX4_CQE_STATUS_IPV4F | 565 MLX4_CQE_STATUS_IPV6 | 566 MLX4_CQE_STATUS_TCP | 567 MLX4_CQE_STATUS_UDP), 568 status_ipv4_tcp = cpu_to_be16( 569 MLX4_CQE_STATUS_IPV4 | 570 MLX4_CQE_STATUS_TCP), 571 status_ipv6_tcp = cpu_to_be16( 572 MLX4_CQE_STATUS_IPV6 | 573 MLX4_CQE_STATUS_TCP), 574 status_ipv4_udp = cpu_to_be16( 575 MLX4_CQE_STATUS_IPV4 | 576 MLX4_CQE_STATUS_UDP), 577 status_ipv6_udp = cpu_to_be16( 578 MLX4_CQE_STATUS_IPV6 | 579 MLX4_CQE_STATUS_UDP), 580 status_ipv4 = cpu_to_be16(MLX4_CQE_STATUS_IPV4), 581 status_ipv6 = cpu_to_be16(MLX4_CQE_STATUS_IPV6) 582 }; 583 584 status &= status_all; 585 switch (status) { 586 case status_ipv4_tcp: 587 return (M_HASHTYPE_RSS_TCP_IPV4); 588 case status_ipv6_tcp: 589 return (M_HASHTYPE_RSS_TCP_IPV6); 590 case status_ipv4_udp: 591 return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV4 592 : M_HASHTYPE_RSS_IPV4); 593 case status_ipv6_udp: 594 return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV6 595 : M_HASHTYPE_RSS_IPV6); 596 default: 597 if (status & status_ipv4) 598 return (M_HASHTYPE_RSS_IPV4); 599 if (status & status_ipv6) 600 return (M_HASHTYPE_RSS_IPV6); 601 return (M_HASHTYPE_OPAQUE_HASH); 602 } 603 } 604 605 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B 606 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc) 607 * was added in the beginning of each cqe (the real data is in the corresponding 32B). 608 * The following calc ensures that when factor==1, it means we are aligned to 64B 609 * and we get the real cqe data*/ 610 #define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor) 611 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 612 { 613 struct mlx4_en_priv *priv = netdev_priv(dev); 614 struct mlx4_cqe *cqe; 615 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; 616 struct mlx4_en_rx_mbuf *mb_list; 617 struct mlx4_en_rx_desc *rx_desc; 618 struct mbuf *mb; 619 struct mlx4_cq *mcq = &cq->mcq; 620 struct mlx4_cqe *buf = cq->buf; 621 int index; 622 unsigned int length; 623 int polled = 0; 624 u32 cons_index = mcq->cons_index; 625 u32 size_mask = ring->size_mask; 626 int size = cq->size; 627 int factor = priv->cqe_factor; 628 const int udp_rss = priv->mdev->profile.udp_rss; 629 630 if (!priv->port_up) 631 return 0; 632 633 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 634 * descriptor offset can be deducted from the CQE index instead of 635 * reading 'cqe->index' */ 636 index = cons_index & size_mask; 637 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; 638 639 /* Process all completed CQEs */ 640 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 641 cons_index & size)) { 642 mb_list = ring->mbuf + index; 643 rx_desc = (struct mlx4_en_rx_desc *) 644 (ring->buf + (index << ring->log_stride)); 645 646 /* 647 * make sure we read the CQE after we read the ownership bit 648 */ 649 rmb(); 650 651 if (invalid_cqe(priv, cqe)) { 652 goto next; 653 } 654 /* 655 * Packet is OK - process it. 656 */ 657 length = be32_to_cpu(cqe->byte_cnt); 658 length -= ring->fcs_del; 659 660 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length); 661 if (unlikely(!mb)) { 662 ring->errors++; 663 goto next; 664 } 665 666 ring->bytes += length; 667 ring->packets++; 668 669 if (unlikely(priv->validate_loopback)) { 670 validate_loopback(priv, mb); 671 goto next; 672 } 673 674 /* forward Toeplitz compatible hash value */ 675 mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid); 676 M_HASHTYPE_SET(mb, mlx4_en_rss_hash(cqe->status, udp_rss)); 677 mb->m_pkthdr.rcvif = dev; 678 if (be32_to_cpu(cqe->vlan_my_qpn) & 679 MLX4_CQE_VLAN_PRESENT_MASK) { 680 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid); 681 mb->m_flags |= M_VLANTAG; 682 } 683 if (likely(dev->if_capenable & 684 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) && 685 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 686 (cqe->checksum == cpu_to_be16(0xffff))) { 687 priv->port_stats.rx_chksum_good++; 688 mb->m_pkthdr.csum_flags = 689 CSUM_IP_CHECKED | CSUM_IP_VALID | 690 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 691 mb->m_pkthdr.csum_data = htons(0xffff); 692 /* This packet is eligible for LRO if it is: 693 * - DIX Ethernet (type interpretation) 694 * - TCP/IP (v4) 695 * - without IP options 696 * - not an IP fragment 697 */ 698 #ifdef INET 699 if (mlx4_en_can_lro(cqe->status) && 700 (dev->if_capenable & IFCAP_LRO)) { 701 if (ring->lro.lro_cnt != 0 && 702 tcp_lro_rx(&ring->lro, mb, 0) == 0) 703 goto next; 704 } 705 706 #endif 707 /* LRO not possible, complete processing here */ 708 INC_PERF_COUNTER(priv->pstats.lro_misses); 709 } else { 710 mb->m_pkthdr.csum_flags = 0; 711 priv->port_stats.rx_chksum_none++; 712 } 713 714 /* Push it up the stack */ 715 dev->if_input(dev, mb); 716 717 next: 718 ++cons_index; 719 index = cons_index & size_mask; 720 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; 721 if (++polled == budget) 722 goto out; 723 } 724 /* Flush all pending IP reassembly sessions */ 725 out: 726 #ifdef INET 727 tcp_lro_flush_all(&ring->lro); 728 #endif 729 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 730 mcq->cons_index = cons_index; 731 mlx4_cq_set_ci(mcq); 732 wmb(); /* ensure HW sees CQ consumer before we post new buffers */ 733 ring->cons = mcq->cons_index; 734 ring->prod += polled; /* Polled descriptors were realocated in place */ 735 mlx4_en_update_rx_prod_db(ring); 736 return polled; 737 738 } 739 740 /* Rx CQ polling - called by NAPI */ 741 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget) 742 { 743 struct net_device *dev = cq->dev; 744 int done; 745 746 done = mlx4_en_process_rx_cq(dev, cq, budget); 747 cq->tot_rx += done; 748 749 return done; 750 751 } 752 void mlx4_en_rx_irq(struct mlx4_cq *mcq) 753 { 754 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 755 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 756 int done; 757 758 // Shoot one within the irq context 759 // Because there is no NAPI in freeBSD 760 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET); 761 if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) { 762 cq->curr_poll_rx_cpu_id = curcpu; 763 taskqueue_enqueue(cq->tq, &cq->cq_task); 764 } 765 else { 766 mlx4_en_arm_cq(priv, cq); 767 } 768 } 769 770 void mlx4_en_rx_que(void *context, int pending) 771 { 772 struct mlx4_en_cq *cq; 773 struct thread *td; 774 775 cq = context; 776 td = curthread; 777 778 thread_lock(td); 779 sched_bind(td, cq->curr_poll_rx_cpu_id); 780 thread_unlock(td); 781 782 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET) 783 == MLX4_EN_RX_BUDGET); 784 mlx4_en_arm_cq(cq->dev->if_softc, cq); 785 } 786 787 788 /* RSS related functions */ 789 790 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, 791 struct mlx4_en_rx_ring *ring, 792 enum mlx4_qp_state *state, 793 struct mlx4_qp *qp) 794 { 795 struct mlx4_en_dev *mdev = priv->mdev; 796 struct mlx4_qp_context *context; 797 int err = 0; 798 799 context = kmalloc(sizeof *context , GFP_KERNEL); 800 if (!context) { 801 en_err(priv, "Failed to allocate qp context\n"); 802 return -ENOMEM; 803 } 804 805 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 806 if (err) { 807 en_err(priv, "Failed to allocate qp #%x\n", qpn); 808 goto out; 809 } 810 qp->event = mlx4_en_sqp_event; 811 812 memset(context, 0, sizeof *context); 813 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 814 qpn, ring->cqn, -1, context); 815 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 816 817 /* Cancel FCS removal if FW allows */ 818 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { 819 context->param3 |= cpu_to_be32(1 << 29); 820 ring->fcs_del = ETH_FCS_LEN; 821 } else 822 ring->fcs_del = 0; 823 824 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); 825 if (err) { 826 mlx4_qp_remove(mdev->dev, qp); 827 mlx4_qp_free(mdev->dev, qp); 828 } 829 mlx4_en_update_rx_prod_db(ring); 830 out: 831 kfree(context); 832 return err; 833 } 834 835 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) 836 { 837 int err; 838 u32 qpn; 839 840 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0); 841 if (err) { 842 en_err(priv, "Failed reserving drop qpn\n"); 843 return err; 844 } 845 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); 846 if (err) { 847 en_err(priv, "Failed allocating drop qp\n"); 848 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 849 return err; 850 } 851 852 return 0; 853 } 854 855 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) 856 { 857 u32 qpn; 858 859 qpn = priv->drop_qp.qpn; 860 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); 861 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); 862 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 863 } 864 865 const u32 * 866 mlx4_en_get_rss_key(struct mlx4_en_priv *priv __unused, 867 u16 *keylen) 868 { 869 static const u32 rsskey[10] = { 870 cpu_to_be32(0xD181C62C), 871 cpu_to_be32(0xF7F4DB5B), 872 cpu_to_be32(0x1983A2FC), 873 cpu_to_be32(0x943E1ADB), 874 cpu_to_be32(0xD9389E6B), 875 cpu_to_be32(0xD1039C2C), 876 cpu_to_be32(0xA74499AD), 877 cpu_to_be32(0x593D56D9), 878 cpu_to_be32(0xF3253C06), 879 cpu_to_be32(0x2ADC1FFC) 880 }; 881 882 if (keylen != NULL) 883 *keylen = sizeof(rsskey); 884 return (rsskey); 885 } 886 887 u8 mlx4_en_get_rss_mask(struct mlx4_en_priv *priv) 888 { 889 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 890 MLX4_RSS_TCP_IPV6); 891 892 if (priv->mdev->profile.udp_rss) 893 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; 894 return (rss_mask); 895 } 896 897 /* Allocate rx qp's and configure them according to rss map */ 898 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) 899 { 900 struct mlx4_en_dev *mdev = priv->mdev; 901 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 902 struct mlx4_qp_context context; 903 struct mlx4_rss_context *rss_context; 904 const u32 *key; 905 int rss_rings; 906 void *ptr; 907 int i; 908 int err = 0; 909 int good_qps = 0; 910 911 en_dbg(DRV, priv, "Configuring rss steering\n"); 912 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, 913 priv->rx_ring_num, 914 &rss_map->base_qpn, 0); 915 if (err) { 916 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); 917 return err; 918 } 919 920 for (i = 0; i < priv->rx_ring_num; i++) { 921 priv->rx_ring[i]->qpn = rss_map->base_qpn + i; 922 err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn, 923 priv->rx_ring[i], 924 &rss_map->state[i], 925 &rss_map->qps[i]); 926 if (err) 927 goto rss_err; 928 929 ++good_qps; 930 } 931 932 /* Configure RSS indirection qp */ 933 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 934 if (err) { 935 en_err(priv, "Failed to allocate RSS indirection QP\n"); 936 goto rss_err; 937 } 938 rss_map->indir_qp.event = mlx4_en_sqp_event; 939 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 940 priv->rx_ring[0]->cqn, -1, &context); 941 942 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 943 rss_rings = priv->rx_ring_num; 944 else 945 rss_rings = priv->prof->rss_rings; 946 947 ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) + 948 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 949 rss_context = ptr; 950 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | 951 (rss_map->base_qpn)); 952 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 953 if (priv->mdev->profile.udp_rss) 954 rss_context->base_qpn_udp = rss_context->default_qpn; 955 rss_context->flags = mlx4_en_get_rss_mask(priv); 956 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 957 key = mlx4_en_get_rss_key(priv, NULL); 958 for (i = 0; i < 10; i++) 959 rss_context->rss_key[i] = key[i]; 960 961 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 962 &rss_map->indir_qp, &rss_map->indir_state); 963 if (err) 964 goto indir_err; 965 966 return 0; 967 968 indir_err: 969 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 970 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 971 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 972 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 973 rss_err: 974 for (i = 0; i < good_qps; i++) { 975 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 976 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 977 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 978 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 979 } 980 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 981 return err; 982 } 983 984 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) 985 { 986 struct mlx4_en_dev *mdev = priv->mdev; 987 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 988 int i; 989 990 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, 991 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 992 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 993 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 994 995 for (i = 0; i < priv->rx_ring_num; i++) { 996 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 997 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); 998 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); 999 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); 1000 } 1001 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); 1002 } 1003 1004