1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <linux/mlx4/cmd.h> 38 #include <linux/export.h> 39 40 #include "mlx4.h" 41 42 #define MGM_QPN_MASK 0x00FFFFFF 43 #define MGM_BLCK_LB_BIT 30 44 45 static const u8 zero_gid[16]; /* automatically initialized to 0 */ 46 47 struct mlx4_mgm { 48 __be32 next_gid_index; 49 __be32 members_count; 50 u32 reserved[2]; 51 u8 gid[16]; 52 __be32 qp[MLX4_MAX_QP_PER_MGM]; 53 }; 54 55 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56 { 57 return 1 << dev->oper_log_mgm_entry_size; 58 } 59 60 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 61 { 62 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 63 } 64 65 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 66 struct mlx4_cmd_mailbox *mailbox, 67 u32 size, 68 u64 *reg_id) 69 { 70 u64 imm; 71 int err = 0; 72 73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 74 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 75 MLX4_CMD_NATIVE); 76 if (err) 77 return err; 78 *reg_id = imm; 79 80 return err; 81 } 82 83 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 84 { 85 int err = 0; 86 87 err = mlx4_cmd(dev, regid, 0, 0, 88 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 89 MLX4_CMD_NATIVE); 90 91 return err; 92 } 93 94 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 95 struct mlx4_cmd_mailbox *mailbox) 96 { 97 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 98 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 99 } 100 101 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 102 struct mlx4_cmd_mailbox *mailbox) 103 { 104 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 105 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 106 } 107 108 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 109 struct mlx4_cmd_mailbox *mailbox) 110 { 111 u32 in_mod; 112 113 in_mod = (u32) port << 16 | steer << 1; 114 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 115 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 116 MLX4_CMD_NATIVE); 117 } 118 119 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 120 u16 *hash, u8 op_mod) 121 { 122 u64 imm; 123 int err; 124 125 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 126 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 127 MLX4_CMD_NATIVE); 128 129 if (!err) 130 *hash = imm; 131 132 return err; 133 } 134 135 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 136 enum mlx4_steer_type steer, 137 u32 qpn) 138 { 139 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; 140 struct mlx4_promisc_qp *pqp; 141 142 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 143 if (pqp->qpn == qpn) 144 return pqp; 145 } 146 /* not found */ 147 return NULL; 148 } 149 150 /* 151 * Add new entry to steering data structure. 152 * All promisc QPs should be added as well 153 */ 154 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 155 enum mlx4_steer_type steer, 156 unsigned int index, u32 qpn) 157 { 158 struct mlx4_steer *s_steer; 159 struct mlx4_cmd_mailbox *mailbox; 160 struct mlx4_mgm *mgm; 161 u32 members_count; 162 struct mlx4_steer_index *new_entry; 163 struct mlx4_promisc_qp *pqp; 164 struct mlx4_promisc_qp *dqp = NULL; 165 u32 prot; 166 int err; 167 168 s_steer = &mlx4_priv(dev)->steer[port - 1]; 169 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 170 if (!new_entry) 171 return -ENOMEM; 172 173 INIT_LIST_HEAD(&new_entry->duplicates); 174 new_entry->index = index; 175 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 176 177 /* If the given qpn is also a promisc qp, 178 * it should be inserted to duplicates list 179 */ 180 pqp = get_promisc_qp(dev, port, steer, qpn); 181 if (pqp) { 182 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 183 if (!dqp) { 184 err = -ENOMEM; 185 goto out_alloc; 186 } 187 dqp->qpn = qpn; 188 list_add_tail(&dqp->list, &new_entry->duplicates); 189 } 190 191 /* if no promisc qps for this vep, we are done */ 192 if (list_empty(&s_steer->promisc_qps[steer])) 193 return 0; 194 195 /* now need to add all the promisc qps to the new 196 * steering entry, as they should also receive the packets 197 * destined to this address */ 198 mailbox = mlx4_alloc_cmd_mailbox(dev); 199 if (IS_ERR(mailbox)) { 200 err = -ENOMEM; 201 goto out_alloc; 202 } 203 mgm = mailbox->buf; 204 205 err = mlx4_READ_ENTRY(dev, index, mailbox); 206 if (err) 207 goto out_mailbox; 208 209 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 210 prot = be32_to_cpu(mgm->members_count) >> 30; 211 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 212 /* don't add already existing qpn */ 213 if (pqp->qpn == qpn) 214 continue; 215 if (members_count == dev->caps.num_qp_per_mgm) { 216 /* out of space */ 217 err = -ENOMEM; 218 goto out_mailbox; 219 } 220 221 /* add the qpn */ 222 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 223 } 224 /* update the qps count and update the entry with all the promisc qps*/ 225 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 226 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 227 228 out_mailbox: 229 mlx4_free_cmd_mailbox(dev, mailbox); 230 if (!err) 231 return 0; 232 out_alloc: 233 if (dqp) { 234 list_del(&dqp->list); 235 kfree(dqp); 236 } 237 list_del(&new_entry->list); 238 kfree(new_entry); 239 return err; 240 } 241 242 /* update the data structures with existing steering entry */ 243 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 244 enum mlx4_steer_type steer, 245 unsigned int index, u32 qpn) 246 { 247 struct mlx4_steer *s_steer; 248 struct mlx4_steer_index *tmp_entry, *entry = NULL; 249 struct mlx4_promisc_qp *pqp; 250 struct mlx4_promisc_qp *dqp; 251 252 s_steer = &mlx4_priv(dev)->steer[port - 1]; 253 254 pqp = get_promisc_qp(dev, port, steer, qpn); 255 if (!pqp) 256 return 0; /* nothing to do */ 257 258 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 259 if (tmp_entry->index == index) { 260 entry = tmp_entry; 261 break; 262 } 263 } 264 if (unlikely(!entry)) { 265 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 266 return -EINVAL; 267 } 268 269 /* the given qpn is listed as a promisc qpn 270 * we need to add it as a duplicate to this entry 271 * for future references */ 272 list_for_each_entry(dqp, &entry->duplicates, list) { 273 if (qpn == pqp->qpn) 274 return 0; /* qp is already duplicated */ 275 } 276 277 /* add the qp as a duplicate on this index */ 278 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 279 if (!dqp) 280 return -ENOMEM; 281 dqp->qpn = qpn; 282 list_add_tail(&dqp->list, &entry->duplicates); 283 284 return 0; 285 } 286 287 /* Check whether a qpn is a duplicate on steering entry 288 * If so, it should not be removed from mgm */ 289 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 290 enum mlx4_steer_type steer, 291 unsigned int index, u32 qpn) 292 { 293 struct mlx4_steer *s_steer; 294 struct mlx4_steer_index *tmp_entry, *entry = NULL; 295 struct mlx4_promisc_qp *dqp, *tmp_dqp; 296 297 s_steer = &mlx4_priv(dev)->steer[port - 1]; 298 299 /* if qp is not promisc, it cannot be duplicated */ 300 if (!get_promisc_qp(dev, port, steer, qpn)) 301 return false; 302 303 /* The qp is promisc qp so it is a duplicate on this index 304 * Find the index entry, and remove the duplicate */ 305 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 306 if (tmp_entry->index == index) { 307 entry = tmp_entry; 308 break; 309 } 310 } 311 if (unlikely(!entry)) { 312 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 313 return false; 314 } 315 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 316 if (dqp->qpn == qpn) { 317 list_del(&dqp->list); 318 kfree(dqp); 319 } 320 } 321 return true; 322 } 323 324 /* I a steering entry contains only promisc QPs, it can be removed. */ 325 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 326 enum mlx4_steer_type steer, 327 unsigned int index, u32 tqpn) 328 { 329 struct mlx4_steer *s_steer; 330 struct mlx4_cmd_mailbox *mailbox; 331 struct mlx4_mgm *mgm; 332 struct mlx4_steer_index *entry = NULL, *tmp_entry; 333 u32 qpn; 334 u32 members_count; 335 bool ret = false; 336 int i; 337 338 s_steer = &mlx4_priv(dev)->steer[port - 1]; 339 340 mailbox = mlx4_alloc_cmd_mailbox(dev); 341 if (IS_ERR(mailbox)) 342 return false; 343 mgm = mailbox->buf; 344 345 if (mlx4_READ_ENTRY(dev, index, mailbox)) 346 goto out; 347 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 348 for (i = 0; i < members_count; i++) { 349 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 350 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 351 /* the qp is not promisc, the entry can't be removed */ 352 goto out; 353 } 354 } 355 /* All the qps currently registered for this entry are promiscuous, 356 * Checking for duplicates */ 357 ret = true; 358 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 359 if (entry->index == index) { 360 if (list_empty(&entry->duplicates)) { 361 list_del(&entry->list); 362 kfree(entry); 363 } else { 364 /* This entry contains duplicates so it shouldn't be removed */ 365 ret = false; 366 goto out; 367 } 368 } 369 } 370 371 out: 372 mlx4_free_cmd_mailbox(dev, mailbox); 373 return ret; 374 } 375 376 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 377 enum mlx4_steer_type steer, u32 qpn) 378 { 379 struct mlx4_steer *s_steer; 380 struct mlx4_cmd_mailbox *mailbox; 381 struct mlx4_mgm *mgm; 382 struct mlx4_steer_index *entry; 383 struct mlx4_promisc_qp *pqp; 384 struct mlx4_promisc_qp *dqp; 385 u32 members_count; 386 u32 prot; 387 int i; 388 bool found; 389 int err; 390 struct mlx4_priv *priv = mlx4_priv(dev); 391 392 s_steer = &mlx4_priv(dev)->steer[port - 1]; 393 394 mutex_lock(&priv->mcg_table.mutex); 395 396 if (get_promisc_qp(dev, port, steer, qpn)) { 397 err = 0; /* Noting to do, already exists */ 398 goto out_mutex; 399 } 400 401 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 402 if (!pqp) { 403 err = -ENOMEM; 404 goto out_mutex; 405 } 406 pqp->qpn = qpn; 407 408 mailbox = mlx4_alloc_cmd_mailbox(dev); 409 if (IS_ERR(mailbox)) { 410 err = -ENOMEM; 411 goto out_alloc; 412 } 413 mgm = mailbox->buf; 414 415 /* the promisc qp needs to be added for each one of the steering 416 * entries, if it already exists, needs to be added as a duplicate 417 * for this entry */ 418 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 419 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 420 if (err) 421 goto out_mailbox; 422 423 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 424 prot = be32_to_cpu(mgm->members_count) >> 30; 425 found = false; 426 for (i = 0; i < members_count; i++) { 427 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { 428 /* Entry already exists, add to duplicates */ 429 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 430 if (!dqp) { 431 err = -ENOMEM; 432 goto out_mailbox; 433 } 434 dqp->qpn = qpn; 435 list_add_tail(&dqp->list, &entry->duplicates); 436 found = true; 437 } 438 } 439 if (!found) { 440 /* Need to add the qpn to mgm */ 441 if (members_count == dev->caps.num_qp_per_mgm) { 442 /* entry is full */ 443 err = -ENOMEM; 444 goto out_mailbox; 445 } 446 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK); 447 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 448 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 449 if (err) 450 goto out_mailbox; 451 } 452 } 453 454 /* add the new qpn to list of promisc qps */ 455 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 456 /* now need to add all the promisc qps to default entry */ 457 memset(mgm, 0, sizeof *mgm); 458 members_count = 0; 459 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 460 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 461 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 462 463 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 464 if (err) 465 goto out_list; 466 467 mlx4_free_cmd_mailbox(dev, mailbox); 468 mutex_unlock(&priv->mcg_table.mutex); 469 return 0; 470 471 out_list: 472 list_del(&pqp->list); 473 out_mailbox: 474 mlx4_free_cmd_mailbox(dev, mailbox); 475 out_alloc: 476 kfree(pqp); 477 out_mutex: 478 mutex_unlock(&priv->mcg_table.mutex); 479 return err; 480 } 481 482 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 483 enum mlx4_steer_type steer, u32 qpn) 484 { 485 struct mlx4_priv *priv = mlx4_priv(dev); 486 struct mlx4_steer *s_steer; 487 struct mlx4_cmd_mailbox *mailbox; 488 struct mlx4_mgm *mgm; 489 struct mlx4_steer_index *entry; 490 struct mlx4_promisc_qp *pqp; 491 struct mlx4_promisc_qp *dqp; 492 u32 members_count; 493 bool found; 494 bool back_to_list = false; 495 int loc, i; 496 int err; 497 498 s_steer = &mlx4_priv(dev)->steer[port - 1]; 499 mutex_lock(&priv->mcg_table.mutex); 500 501 pqp = get_promisc_qp(dev, port, steer, qpn); 502 if (unlikely(!pqp)) { 503 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 504 /* nothing to do */ 505 err = 0; 506 goto out_mutex; 507 } 508 509 /*remove from list of promisc qps */ 510 list_del(&pqp->list); 511 512 /* set the default entry not to include the removed one */ 513 mailbox = mlx4_alloc_cmd_mailbox(dev); 514 if (IS_ERR(mailbox)) { 515 err = -ENOMEM; 516 back_to_list = true; 517 goto out_list; 518 } 519 mgm = mailbox->buf; 520 memset(mgm, 0, sizeof *mgm); 521 members_count = 0; 522 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 523 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 524 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 525 526 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 527 if (err) 528 goto out_mailbox; 529 530 /* remove the qp from all the steering entries*/ 531 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) { 532 found = false; 533 list_for_each_entry(dqp, &entry->duplicates, list) { 534 if (dqp->qpn == qpn) { 535 found = true; 536 break; 537 } 538 } 539 if (found) { 540 /* a duplicate, no need to change the mgm, 541 * only update the duplicates list */ 542 list_del(&dqp->list); 543 kfree(dqp); 544 } else { 545 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 546 if (err) 547 goto out_mailbox; 548 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 549 for (loc = -1, i = 0; i < members_count; ++i) 550 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) 551 loc = i; 552 553 mgm->members_count = cpu_to_be32(--members_count | 554 (MLX4_PROT_ETH << 30)); 555 mgm->qp[loc] = mgm->qp[i - 1]; 556 mgm->qp[i - 1] = 0; 557 558 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); 559 if (err) 560 goto out_mailbox; 561 } 562 563 } 564 565 out_mailbox: 566 mlx4_free_cmd_mailbox(dev, mailbox); 567 out_list: 568 if (back_to_list) 569 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 570 else 571 kfree(pqp); 572 out_mutex: 573 mutex_unlock(&priv->mcg_table.mutex); 574 return err; 575 } 576 577 /* 578 * Caller must hold MCG table semaphore. gid and mgm parameters must 579 * be properly aligned for command interface. 580 * 581 * Returns 0 unless a firmware command error occurs. 582 * 583 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 584 * and *mgm holds MGM entry. 585 * 586 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 587 * previous entry in hash chain and *mgm holds AMGM entry. 588 * 589 * If no AMGM exists for given gid, *index = -1, *prev = index of last 590 * entry in hash chain and *mgm holds end of hash chain. 591 */ 592 static int find_entry(struct mlx4_dev *dev, u8 port, 593 u8 *gid, enum mlx4_protocol prot, 594 struct mlx4_cmd_mailbox *mgm_mailbox, 595 int *prev, int *index) 596 { 597 struct mlx4_cmd_mailbox *mailbox; 598 struct mlx4_mgm *mgm = mgm_mailbox->buf; 599 u8 *mgid; 600 int err; 601 u16 hash; 602 u8 op_mod = (prot == MLX4_PROT_ETH) ? 603 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 604 605 mailbox = mlx4_alloc_cmd_mailbox(dev); 606 if (IS_ERR(mailbox)) 607 return -ENOMEM; 608 mgid = mailbox->buf; 609 610 memcpy(mgid, gid, 16); 611 612 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 613 mlx4_free_cmd_mailbox(dev, mailbox); 614 if (err) 615 return err; 616 617 if (0) 618 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); 619 620 *index = hash; 621 *prev = -1; 622 623 do { 624 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 625 if (err) 626 return err; 627 628 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 629 if (*index != hash) { 630 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 631 err = -EINVAL; 632 } 633 return err; 634 } 635 636 if (!memcmp(mgm->gid, gid, 16) && 637 be32_to_cpu(mgm->members_count) >> 30 == prot) 638 return err; 639 640 *prev = *index; 641 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 642 } while (*index); 643 644 *index = -1; 645 return err; 646 } 647 648 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 649 struct mlx4_net_trans_rule_hw_ctrl *hw) 650 { 651 static const u8 __promisc_mode[] = { 652 [MLX4_FS_PROMISC_NONE] = 0x0, 653 [MLX4_FS_PROMISC_UPLINK] = 0x1, 654 [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, 655 [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, 656 }; 657 658 u32 dw = 0; 659 660 dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 661 dw |= ctrl->exclusive ? (1 << 2) : 0; 662 dw |= ctrl->allow_loopback ? (1 << 3) : 0; 663 dw |= __promisc_mode[ctrl->promisc_mode] << 8; 664 dw |= ctrl->priority << 16; 665 666 hw->ctrl = cpu_to_be32(dw); 667 hw->vf_vep_port = cpu_to_be32(ctrl->port); 668 hw->qpn = cpu_to_be32(ctrl->qpn); 669 } 670 671 const u16 __sw_id_hw[] = { 672 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 673 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 674 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 675 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 676 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 677 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 678 }; 679 680 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 681 struct _rule_hw *rule_hw) 682 { 683 static const size_t __rule_hw_sz[] = { 684 [MLX4_NET_TRANS_RULE_ID_ETH] = 685 sizeof(struct mlx4_net_trans_rule_hw_eth), 686 [MLX4_NET_TRANS_RULE_ID_IB] = 687 sizeof(struct mlx4_net_trans_rule_hw_ib), 688 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 689 [MLX4_NET_TRANS_RULE_ID_IPV4] = 690 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 691 [MLX4_NET_TRANS_RULE_ID_TCP] = 692 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 693 [MLX4_NET_TRANS_RULE_ID_UDP] = 694 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) 695 }; 696 if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { 697 mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); 698 return -EINVAL; 699 } 700 memset(rule_hw, 0, __rule_hw_sz[spec->id]); 701 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 702 rule_hw->size = __rule_hw_sz[spec->id] >> 2; 703 704 switch (spec->id) { 705 case MLX4_NET_TRANS_RULE_ID_ETH: 706 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 707 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 708 ETH_ALEN); 709 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 710 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 711 ETH_ALEN); 712 if (spec->eth.ether_type_enable) { 713 rule_hw->eth.ether_type_enable = 1; 714 rule_hw->eth.ether_type = spec->eth.ether_type; 715 } 716 rule_hw->eth.vlan_id = spec->eth.vlan_id; 717 rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; 718 break; 719 720 case MLX4_NET_TRANS_RULE_ID_IB: 721 rule_hw->ib.qpn = spec->ib.r_qpn; 722 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 723 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 724 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 725 break; 726 727 case MLX4_NET_TRANS_RULE_ID_IPV6: 728 return -EOPNOTSUPP; 729 730 case MLX4_NET_TRANS_RULE_ID_IPV4: 731 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 732 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 733 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 734 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 735 break; 736 737 case MLX4_NET_TRANS_RULE_ID_TCP: 738 case MLX4_NET_TRANS_RULE_ID_UDP: 739 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 740 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 741 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 742 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 743 break; 744 745 default: 746 return -EINVAL; 747 } 748 749 return __rule_hw_sz[spec->id]; 750 } 751 752 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 753 struct mlx4_net_trans_rule *rule) 754 { 755 #define BUF_SIZE 256 756 struct mlx4_spec_list *cur; 757 char buf[BUF_SIZE]; 758 int len = 0; 759 760 mlx4_err(dev, "%s", str); 761 len += snprintf(buf + len, BUF_SIZE - len, 762 "port = %d prio = 0x%x qp = 0x%x ", 763 rule->port, rule->priority, rule->qpn); 764 765 list_for_each_entry(cur, &rule->list, list) { 766 switch (cur->id) { 767 case MLX4_NET_TRANS_RULE_ID_ETH: 768 len += snprintf(buf + len, BUF_SIZE - len, 769 "dmac = %pM ", &cur->eth.dst_mac); 770 if (cur->eth.ether_type) 771 len += snprintf(buf + len, BUF_SIZE - len, 772 "ethertype = 0x%x ", 773 be16_to_cpu(cur->eth.ether_type)); 774 if (cur->eth.vlan_id) 775 len += snprintf(buf + len, BUF_SIZE - len, 776 "vlan-id = %d ", 777 be16_to_cpu(cur->eth.vlan_id)); 778 break; 779 780 case MLX4_NET_TRANS_RULE_ID_IPV4: 781 if (cur->ipv4.src_ip) 782 len += snprintf(buf + len, BUF_SIZE - len, 783 "src-ip = %pI4 ", 784 &cur->ipv4.src_ip); 785 if (cur->ipv4.dst_ip) 786 len += snprintf(buf + len, BUF_SIZE - len, 787 "dst-ip = %pI4 ", 788 &cur->ipv4.dst_ip); 789 break; 790 791 case MLX4_NET_TRANS_RULE_ID_TCP: 792 case MLX4_NET_TRANS_RULE_ID_UDP: 793 if (cur->tcp_udp.src_port) 794 len += snprintf(buf + len, BUF_SIZE - len, 795 "src-port = %d ", 796 be16_to_cpu(cur->tcp_udp.src_port)); 797 if (cur->tcp_udp.dst_port) 798 len += snprintf(buf + len, BUF_SIZE - len, 799 "dst-port = %d ", 800 be16_to_cpu(cur->tcp_udp.dst_port)); 801 break; 802 803 case MLX4_NET_TRANS_RULE_ID_IB: 804 len += snprintf(buf + len, BUF_SIZE - len, 805 "dst-gid = %pI6\n", cur->ib.dst_gid); 806 len += snprintf(buf + len, BUF_SIZE - len, 807 "dst-gid-mask = %pI6\n", 808 cur->ib.dst_gid_msk); 809 break; 810 811 case MLX4_NET_TRANS_RULE_ID_IPV6: 812 break; 813 814 default: 815 break; 816 } 817 } 818 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 819 mlx4_err(dev, "%s", buf); 820 821 if (len >= BUF_SIZE) 822 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); 823 } 824 825 int mlx4_flow_attach(struct mlx4_dev *dev, 826 struct mlx4_net_trans_rule *rule, u64 *reg_id) 827 { 828 struct mlx4_cmd_mailbox *mailbox; 829 struct mlx4_spec_list *cur; 830 u32 size = 0; 831 int ret; 832 833 mailbox = mlx4_alloc_cmd_mailbox(dev); 834 if (IS_ERR(mailbox)) 835 return PTR_ERR(mailbox); 836 837 memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); 838 trans_rule_ctrl_to_hw(rule, mailbox->buf); 839 840 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 841 842 list_for_each_entry(cur, &rule->list, list) { 843 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 844 if (ret < 0) { 845 mlx4_free_cmd_mailbox(dev, mailbox); 846 return -EINVAL; 847 } 848 size += ret; 849 } 850 851 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 852 if (ret == -ENOMEM) 853 mlx4_err_rule(dev, 854 "mcg table is full. Fail to register network rule.\n", 855 rule); 856 else if (ret) 857 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 858 859 mlx4_free_cmd_mailbox(dev, mailbox); 860 861 return ret; 862 } 863 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 864 865 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 866 { 867 int err; 868 869 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 870 if (err) 871 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 872 reg_id); 873 return err; 874 } 875 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 876 877 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 878 int block_mcast_loopback, enum mlx4_protocol prot, 879 enum mlx4_steer_type steer) 880 { 881 struct mlx4_priv *priv = mlx4_priv(dev); 882 struct mlx4_cmd_mailbox *mailbox; 883 struct mlx4_mgm *mgm; 884 u32 members_count; 885 int index, prev; 886 int link = 0; 887 int i; 888 int err; 889 u8 port = gid[5]; 890 u8 new_entry = 0; 891 892 mailbox = mlx4_alloc_cmd_mailbox(dev); 893 if (IS_ERR(mailbox)) 894 return PTR_ERR(mailbox); 895 mgm = mailbox->buf; 896 897 mutex_lock(&priv->mcg_table.mutex); 898 err = find_entry(dev, port, gid, prot, 899 mailbox, &prev, &index); 900 if (err) 901 goto out; 902 903 if (index != -1) { 904 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 905 new_entry = 1; 906 memcpy(mgm->gid, gid, 16); 907 } 908 } else { 909 link = 1; 910 911 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 912 if (index == -1) { 913 mlx4_err(dev, "No AMGM entries left\n"); 914 err = -ENOMEM; 915 goto out; 916 } 917 index += dev->caps.num_mgms; 918 919 new_entry = 1; 920 memset(mgm, 0, sizeof *mgm); 921 memcpy(mgm->gid, gid, 16); 922 } 923 924 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 925 if (members_count == dev->caps.num_qp_per_mgm) { 926 mlx4_err(dev, "MGM at index %x is full.\n", index); 927 err = -ENOMEM; 928 goto out; 929 } 930 931 for (i = 0; i < members_count; ++i) 932 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 933 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 934 err = 0; 935 goto out; 936 } 937 938 if (block_mcast_loopback) 939 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 940 (1U << MGM_BLCK_LB_BIT)); 941 else 942 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 943 944 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 945 946 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 947 if (err) 948 goto out; 949 950 if (!link) 951 goto out; 952 953 err = mlx4_READ_ENTRY(dev, prev, mailbox); 954 if (err) 955 goto out; 956 957 mgm->next_gid_index = cpu_to_be32(index << 6); 958 959 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 960 if (err) 961 goto out; 962 963 out: 964 if (prot == MLX4_PROT_ETH) { 965 /* manage the steering entry for promisc mode */ 966 if (new_entry) 967 new_steering_entry(dev, port, steer, index, qp->qpn); 968 else 969 existing_steering_entry(dev, port, steer, 970 index, qp->qpn); 971 } 972 if (err && link && index != -1) { 973 if (index < dev->caps.num_mgms) 974 mlx4_warn(dev, "Got AMGM index %d < %d", 975 index, dev->caps.num_mgms); 976 else 977 mlx4_bitmap_free(&priv->mcg_table.bitmap, 978 index - dev->caps.num_mgms); 979 } 980 mutex_unlock(&priv->mcg_table.mutex); 981 982 mlx4_free_cmd_mailbox(dev, mailbox); 983 return err; 984 } 985 986 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 987 enum mlx4_protocol prot, enum mlx4_steer_type steer) 988 { 989 struct mlx4_priv *priv = mlx4_priv(dev); 990 struct mlx4_cmd_mailbox *mailbox; 991 struct mlx4_mgm *mgm; 992 u32 members_count; 993 int prev, index; 994 int i, loc; 995 int err; 996 u8 port = gid[5]; 997 bool removed_entry = false; 998 999 mailbox = mlx4_alloc_cmd_mailbox(dev); 1000 if (IS_ERR(mailbox)) 1001 return PTR_ERR(mailbox); 1002 mgm = mailbox->buf; 1003 1004 mutex_lock(&priv->mcg_table.mutex); 1005 1006 err = find_entry(dev, port, gid, prot, 1007 mailbox, &prev, &index); 1008 if (err) 1009 goto out; 1010 1011 if (index == -1) { 1012 mlx4_err(dev, "MGID %pI6 not found\n", gid); 1013 err = -EINVAL; 1014 goto out; 1015 } 1016 1017 /* if this pq is also a promisc qp, it shouldn't be removed */ 1018 if (prot == MLX4_PROT_ETH && 1019 check_duplicate_entry(dev, port, steer, index, qp->qpn)) 1020 goto out; 1021 1022 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1023 for (loc = -1, i = 0; i < members_count; ++i) 1024 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 1025 loc = i; 1026 1027 if (loc == -1) { 1028 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1029 err = -EINVAL; 1030 goto out; 1031 } 1032 1033 1034 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1035 mgm->qp[loc] = mgm->qp[i - 1]; 1036 mgm->qp[i - 1] = 0; 1037 1038 if (prot == MLX4_PROT_ETH) 1039 removed_entry = can_remove_steering_entry(dev, port, steer, 1040 index, qp->qpn); 1041 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) { 1042 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1043 goto out; 1044 } 1045 1046 /* We are going to delete the entry, members count should be 0 */ 1047 mgm->members_count = cpu_to_be32((u32) prot << 30); 1048 1049 if (prev == -1) { 1050 /* Remove entry from MGM */ 1051 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1052 if (amgm_index) { 1053 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1054 if (err) 1055 goto out; 1056 } else 1057 memset(mgm->gid, 0, 16); 1058 1059 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1060 if (err) 1061 goto out; 1062 1063 if (amgm_index) { 1064 if (amgm_index < dev->caps.num_mgms) 1065 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", 1066 index, amgm_index, dev->caps.num_mgms); 1067 else 1068 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1069 amgm_index - dev->caps.num_mgms); 1070 } 1071 } else { 1072 /* Remove entry from AMGM */ 1073 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1074 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1075 if (err) 1076 goto out; 1077 1078 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1079 1080 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1081 if (err) 1082 goto out; 1083 1084 if (index < dev->caps.num_mgms) 1085 mlx4_warn(dev, "entry %d had next AMGM index %d < %d", 1086 prev, index, dev->caps.num_mgms); 1087 else 1088 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1089 index - dev->caps.num_mgms); 1090 } 1091 1092 out: 1093 mutex_unlock(&priv->mcg_table.mutex); 1094 1095 mlx4_free_cmd_mailbox(dev, mailbox); 1096 return err; 1097 } 1098 1099 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1100 u8 gid[16], u8 attach, u8 block_loopback, 1101 enum mlx4_protocol prot) 1102 { 1103 struct mlx4_cmd_mailbox *mailbox; 1104 int err = 0; 1105 int qpn; 1106 1107 if (!mlx4_is_mfunc(dev)) 1108 return -EBADF; 1109 1110 mailbox = mlx4_alloc_cmd_mailbox(dev); 1111 if (IS_ERR(mailbox)) 1112 return PTR_ERR(mailbox); 1113 1114 memcpy(mailbox->buf, gid, 16); 1115 qpn = qp->qpn; 1116 qpn |= (prot << 28); 1117 if (attach && block_loopback) 1118 qpn |= (1 << 31); 1119 1120 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1121 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1122 MLX4_CMD_WRAPPED); 1123 1124 mlx4_free_cmd_mailbox(dev, mailbox); 1125 return err; 1126 } 1127 1128 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1129 u8 port, int block_mcast_loopback, 1130 enum mlx4_protocol prot, u64 *reg_id) 1131 { 1132 1133 switch (dev->caps.steering_mode) { 1134 case MLX4_STEERING_MODE_A0: 1135 if (prot == MLX4_PROT_ETH) 1136 return 0; 1137 1138 case MLX4_STEERING_MODE_B0: 1139 if (prot == MLX4_PROT_ETH) 1140 gid[7] |= (MLX4_MC_STEER << 1); 1141 1142 if (mlx4_is_mfunc(dev)) 1143 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1144 block_mcast_loopback, prot); 1145 return mlx4_qp_attach_common(dev, qp, gid, 1146 block_mcast_loopback, prot, 1147 MLX4_MC_STEER); 1148 1149 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 1150 struct mlx4_spec_list spec = { {NULL} }; 1151 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1152 1153 struct mlx4_net_trans_rule rule = { 1154 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1155 .exclusive = 0, 1156 .promisc_mode = MLX4_FS_PROMISC_NONE, 1157 .priority = MLX4_DOMAIN_NIC, 1158 }; 1159 1160 rule.allow_loopback = ~block_mcast_loopback; 1161 rule.port = port; 1162 rule.qpn = qp->qpn; 1163 INIT_LIST_HEAD(&rule.list); 1164 1165 switch (prot) { 1166 case MLX4_PROT_ETH: 1167 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1168 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1169 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1170 break; 1171 1172 case MLX4_PROT_IB_IPV6: 1173 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1174 memcpy(spec.ib.dst_gid, gid, 16); 1175 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1176 break; 1177 default: 1178 return -EINVAL; 1179 } 1180 list_add_tail(&spec.list, &rule.list); 1181 1182 return mlx4_flow_attach(dev, &rule, reg_id); 1183 } 1184 1185 default: 1186 return -EINVAL; 1187 } 1188 } 1189 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1190 1191 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1192 enum mlx4_protocol prot, u64 reg_id) 1193 { 1194 switch (dev->caps.steering_mode) { 1195 case MLX4_STEERING_MODE_A0: 1196 if (prot == MLX4_PROT_ETH) 1197 return 0; 1198 1199 case MLX4_STEERING_MODE_B0: 1200 if (prot == MLX4_PROT_ETH) 1201 gid[7] |= (MLX4_MC_STEER << 1); 1202 1203 if (mlx4_is_mfunc(dev)) 1204 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1205 1206 return mlx4_qp_detach_common(dev, qp, gid, prot, 1207 MLX4_MC_STEER); 1208 1209 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1210 return mlx4_flow_detach(dev, reg_id); 1211 1212 default: 1213 return -EINVAL; 1214 } 1215 } 1216 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1217 1218 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1219 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1220 { 1221 struct mlx4_net_trans_rule rule; 1222 u64 *regid_p; 1223 1224 switch (mode) { 1225 case MLX4_FS_PROMISC_UPLINK: 1226 case MLX4_FS_PROMISC_FUNCTION_PORT: 1227 regid_p = &dev->regid_promisc_array[port]; 1228 break; 1229 case MLX4_FS_PROMISC_ALL_MULTI: 1230 regid_p = &dev->regid_allmulti_array[port]; 1231 break; 1232 default: 1233 return -1; 1234 } 1235 1236 if (*regid_p != 0) 1237 return -1; 1238 1239 rule.promisc_mode = mode; 1240 rule.port = port; 1241 rule.qpn = qpn; 1242 INIT_LIST_HEAD(&rule.list); 1243 mlx4_err(dev, "going promisc on %x\n", port); 1244 1245 return mlx4_flow_attach(dev, &rule, regid_p); 1246 } 1247 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1248 1249 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1250 enum mlx4_net_trans_promisc_mode mode) 1251 { 1252 int ret; 1253 u64 *regid_p; 1254 1255 switch (mode) { 1256 case MLX4_FS_PROMISC_UPLINK: 1257 case MLX4_FS_PROMISC_FUNCTION_PORT: 1258 regid_p = &dev->regid_promisc_array[port]; 1259 break; 1260 case MLX4_FS_PROMISC_ALL_MULTI: 1261 regid_p = &dev->regid_allmulti_array[port]; 1262 break; 1263 default: 1264 return -1; 1265 } 1266 1267 if (*regid_p == 0) 1268 return -1; 1269 1270 ret = mlx4_flow_detach(dev, *regid_p); 1271 if (ret == 0) 1272 *regid_p = 0; 1273 1274 return ret; 1275 } 1276 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1277 1278 int mlx4_unicast_attach(struct mlx4_dev *dev, 1279 struct mlx4_qp *qp, u8 gid[16], 1280 int block_mcast_loopback, enum mlx4_protocol prot) 1281 { 1282 if (prot == MLX4_PROT_ETH) 1283 gid[7] |= (MLX4_UC_STEER << 1); 1284 1285 if (mlx4_is_mfunc(dev)) 1286 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1287 block_mcast_loopback, prot); 1288 1289 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1290 prot, MLX4_UC_STEER); 1291 } 1292 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1293 1294 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1295 u8 gid[16], enum mlx4_protocol prot) 1296 { 1297 if (prot == MLX4_PROT_ETH) 1298 gid[7] |= (MLX4_UC_STEER << 1); 1299 1300 if (mlx4_is_mfunc(dev)) 1301 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1302 1303 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1304 } 1305 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1306 1307 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1308 struct mlx4_vhcr *vhcr, 1309 struct mlx4_cmd_mailbox *inbox, 1310 struct mlx4_cmd_mailbox *outbox, 1311 struct mlx4_cmd_info *cmd) 1312 { 1313 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1314 u8 port = vhcr->in_param >> 62; 1315 enum mlx4_steer_type steer = vhcr->in_modifier; 1316 1317 /* Promiscuous unicast is not allowed in mfunc */ 1318 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1319 return 0; 1320 1321 if (vhcr->op_modifier) 1322 return add_promisc_qp(dev, port, steer, qpn); 1323 else 1324 return remove_promisc_qp(dev, port, steer, qpn); 1325 } 1326 1327 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1328 enum mlx4_steer_type steer, u8 add, u8 port) 1329 { 1330 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1331 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1332 MLX4_CMD_WRAPPED); 1333 } 1334 1335 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1336 { 1337 if (mlx4_is_mfunc(dev)) 1338 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1339 1340 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1341 } 1342 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1343 1344 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1345 { 1346 if (mlx4_is_mfunc(dev)) 1347 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1348 1349 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1350 } 1351 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1352 1353 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1354 { 1355 if (mlx4_is_mfunc(dev)) 1356 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1357 1358 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1359 } 1360 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1361 1362 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1363 { 1364 if (mlx4_is_mfunc(dev)) 1365 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1366 1367 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1368 } 1369 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1370 1371 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1372 { 1373 struct mlx4_priv *priv = mlx4_priv(dev); 1374 int err; 1375 1376 /* No need for mcg_table when fw managed the mcg table*/ 1377 if (dev->caps.steering_mode == 1378 MLX4_STEERING_MODE_DEVICE_MANAGED) 1379 return 0; 1380 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1381 dev->caps.num_amgms - 1, 0, 0); 1382 if (err) 1383 return err; 1384 1385 mutex_init(&priv->mcg_table.mutex); 1386 1387 return 0; 1388 } 1389 1390 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1391 { 1392 if (dev->caps.steering_mode != 1393 MLX4_STEERING_MODE_DEVICE_MANAGED) 1394 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1395 } 1396