1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/if_ether.h> 35 #include <linux/if_vlan.h> 36 #include <linux/export.h> 37 38 #include <linux/mlx4/cmd.h> 39 40 #include "mlx4.h" 41 #include "mlx4_stats.h" 42 43 #define MLX4_MAC_VALID (1ull << 63) 44 45 #define MLX4_VLAN_VALID (1u << 31) 46 #define MLX4_VLAN_MASK 0xfff 47 48 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL 49 #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL 50 #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL 51 #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL 52 53 #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 54 #define MLX4_IGNORE_FCS_MASK 0x1 55 56 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 57 { 58 int i; 59 60 mutex_init(&table->mutex); 61 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 62 table->entries[i] = 0; 63 table->refs[i] = 0; 64 } 65 table->max = 1 << dev->caps.log_num_macs; 66 table->total = 0; 67 } 68 69 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) 70 { 71 int i; 72 73 mutex_init(&table->mutex); 74 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { 75 table->entries[i] = 0; 76 table->refs[i] = 0; 77 } 78 table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; 79 table->total = 0; 80 } 81 82 void mlx4_init_roce_gid_table(struct mlx4_dev *dev, 83 struct mlx4_roce_gid_table *table) 84 { 85 int i; 86 87 mutex_init(&table->mutex); 88 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) 89 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); 90 } 91 92 static int validate_index(struct mlx4_dev *dev, 93 struct mlx4_mac_table *table, int index) 94 { 95 int err = 0; 96 97 if (index < 0 || index >= table->max || !table->entries[index]) { 98 mlx4_warn(dev, "No valid Mac entry for the given index\n"); 99 err = -EINVAL; 100 } 101 return err; 102 } 103 104 static int find_index(struct mlx4_dev *dev, 105 struct mlx4_mac_table *table, u64 mac) 106 { 107 int i; 108 109 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 110 if (table->refs[i] && 111 (MLX4_MAC_MASK & mac) == 112 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 113 return i; 114 } 115 /* Mac not found */ 116 return -EINVAL; 117 } 118 119 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, 120 __be64 *entries) 121 { 122 struct mlx4_cmd_mailbox *mailbox; 123 u32 in_mod; 124 int err; 125 126 mailbox = mlx4_alloc_cmd_mailbox(dev); 127 if (IS_ERR(mailbox)) 128 return PTR_ERR(mailbox); 129 130 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); 131 132 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; 133 134 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 135 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 136 MLX4_CMD_NATIVE); 137 138 mlx4_free_cmd_mailbox(dev, mailbox); 139 return err; 140 } 141 142 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) 143 { 144 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 145 struct mlx4_mac_table *table = &info->mac_table; 146 int i; 147 148 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 149 if (!table->refs[i]) 150 continue; 151 152 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 153 *idx = i; 154 return 0; 155 } 156 } 157 158 return -ENOENT; 159 } 160 EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); 161 162 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 163 { 164 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 165 struct mlx4_mac_table *table = &info->mac_table; 166 int i, err = 0; 167 int free = -1; 168 169 mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", 170 (unsigned long long) mac, port); 171 172 mutex_lock(&table->mutex); 173 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 174 if (!table->refs[i]) { 175 if (free < 0) 176 free = i; 177 continue; 178 } 179 180 if ((MLX4_MAC_MASK & mac) == 181 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 182 /* MAC already registered, increment ref count */ 183 err = i; 184 ++table->refs[i]; 185 goto out; 186 } 187 } 188 189 mlx4_dbg(dev, "Free MAC index is %d\n", free); 190 191 if (table->total == table->max) { 192 /* No free mac entries */ 193 err = -ENOSPC; 194 goto out; 195 } 196 197 /* Register new MAC */ 198 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); 199 200 err = mlx4_set_port_mac_table(dev, port, table->entries); 201 if (unlikely(err)) { 202 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 203 (unsigned long long) mac); 204 table->entries[free] = 0; 205 goto out; 206 } 207 table->refs[free] = 1; 208 err = free; 209 ++table->total; 210 out: 211 mutex_unlock(&table->mutex); 212 return err; 213 } 214 EXPORT_SYMBOL_GPL(__mlx4_register_mac); 215 216 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 217 { 218 u64 out_param = 0; 219 int err = -EINVAL; 220 221 if (mlx4_is_mfunc(dev)) { 222 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 223 err = mlx4_cmd_imm(dev, mac, &out_param, 224 ((u32) port) << 8 | (u32) RES_MAC, 225 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 226 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 227 } 228 if (err && err == -EINVAL && mlx4_is_slave(dev)) { 229 /* retry using old REG_MAC format */ 230 set_param_l(&out_param, port); 231 err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 232 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 233 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 234 if (!err) 235 dev->flags |= MLX4_FLAG_OLD_REG_MAC; 236 } 237 if (err) 238 return err; 239 240 return get_param_l(&out_param); 241 } 242 return __mlx4_register_mac(dev, port, mac); 243 } 244 EXPORT_SYMBOL_GPL(mlx4_register_mac); 245 246 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port) 247 { 248 return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 249 (port - 1) * (1 << dev->caps.log_num_macs); 250 } 251 EXPORT_SYMBOL_GPL(mlx4_get_base_qpn); 252 253 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 254 { 255 struct mlx4_port_info *info; 256 struct mlx4_mac_table *table; 257 int index; 258 259 if (port < 1 || port > dev->caps.num_ports) { 260 mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); 261 return; 262 } 263 info = &mlx4_priv(dev)->port[port]; 264 table = &info->mac_table; 265 mutex_lock(&table->mutex); 266 index = find_index(dev, table, mac); 267 268 if (validate_index(dev, table, index)) 269 goto out; 270 if (--table->refs[index]) { 271 mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", 272 index); 273 goto out; 274 } 275 276 table->entries[index] = 0; 277 mlx4_set_port_mac_table(dev, port, table->entries); 278 --table->total; 279 out: 280 mutex_unlock(&table->mutex); 281 } 282 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); 283 284 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 285 { 286 u64 out_param = 0; 287 288 if (mlx4_is_mfunc(dev)) { 289 if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 290 (void) mlx4_cmd_imm(dev, mac, &out_param, 291 ((u32) port) << 8 | (u32) RES_MAC, 292 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 293 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 294 } else { 295 /* use old unregister mac format */ 296 set_param_l(&out_param, port); 297 (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 298 RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 299 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 300 } 301 return; 302 } 303 __mlx4_unregister_mac(dev, port, mac); 304 return; 305 } 306 EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 307 308 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) 309 { 310 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 311 struct mlx4_mac_table *table = &info->mac_table; 312 int index = qpn - info->base_qpn; 313 int err = 0; 314 315 /* CX1 doesn't support multi-functions */ 316 mutex_lock(&table->mutex); 317 318 err = validate_index(dev, table, index); 319 if (err) 320 goto out; 321 322 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); 323 324 err = mlx4_set_port_mac_table(dev, port, table->entries); 325 if (unlikely(err)) { 326 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", 327 (unsigned long long) new_mac); 328 table->entries[index] = 0; 329 } 330 out: 331 mutex_unlock(&table->mutex); 332 return err; 333 } 334 EXPORT_SYMBOL_GPL(__mlx4_replace_mac); 335 336 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 337 __be32 *entries) 338 { 339 struct mlx4_cmd_mailbox *mailbox; 340 u32 in_mod; 341 int err; 342 343 mailbox = mlx4_alloc_cmd_mailbox(dev); 344 if (IS_ERR(mailbox)) 345 return PTR_ERR(mailbox); 346 347 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 348 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 349 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 350 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 351 MLX4_CMD_NATIVE); 352 353 mlx4_free_cmd_mailbox(dev, mailbox); 354 355 return err; 356 } 357 358 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) 359 { 360 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 361 int i; 362 363 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { 364 if (table->refs[i] && 365 (vid == (MLX4_VLAN_MASK & 366 be32_to_cpu(table->entries[i])))) { 367 /* VLAN already registered, increase reference count */ 368 *idx = i; 369 return 0; 370 } 371 } 372 373 return -ENOENT; 374 } 375 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); 376 377 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, 378 int *index) 379 { 380 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 381 int i, err = 0; 382 int free = -1; 383 384 mutex_lock(&table->mutex); 385 386 if (table->total == table->max) { 387 /* No free vlan entries */ 388 err = -ENOSPC; 389 goto out; 390 } 391 392 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { 393 if (free < 0 && (table->refs[i] == 0)) { 394 free = i; 395 continue; 396 } 397 398 if (table->refs[i] && 399 (vlan == (MLX4_VLAN_MASK & 400 be32_to_cpu(table->entries[i])))) { 401 /* Vlan already registered, increase references count */ 402 *index = i; 403 ++table->refs[i]; 404 goto out; 405 } 406 } 407 408 if (free < 0) { 409 err = -ENOMEM; 410 goto out; 411 } 412 413 /* Register new VLAN */ 414 table->refs[free] = 1; 415 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); 416 417 err = mlx4_set_port_vlan_table(dev, port, table->entries); 418 if (unlikely(err)) { 419 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); 420 table->refs[free] = 0; 421 table->entries[free] = 0; 422 goto out; 423 } 424 425 *index = free; 426 ++table->total; 427 out: 428 mutex_unlock(&table->mutex); 429 return err; 430 } 431 432 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 433 { 434 u64 out_param = 0; 435 int err; 436 437 if (vlan > 4095) 438 return -EINVAL; 439 440 if (mlx4_is_mfunc(dev)) { 441 err = mlx4_cmd_imm(dev, vlan, &out_param, 442 ((u32) port) << 8 | (u32) RES_VLAN, 443 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 444 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 445 if (!err) 446 *index = get_param_l(&out_param); 447 448 return err; 449 } 450 return __mlx4_register_vlan(dev, port, vlan, index); 451 } 452 EXPORT_SYMBOL_GPL(mlx4_register_vlan); 453 454 void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 455 { 456 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 457 int index; 458 459 mutex_lock(&table->mutex); 460 if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { 461 mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); 462 goto out; 463 } 464 465 if (index < MLX4_VLAN_REGULAR) { 466 mlx4_warn(dev, "Trying to free special vlan index %d\n", index); 467 goto out; 468 } 469 470 if (--table->refs[index]) { 471 mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", 472 table->refs[index], index); 473 goto out; 474 } 475 table->entries[index] = 0; 476 mlx4_set_port_vlan_table(dev, port, table->entries); 477 --table->total; 478 out: 479 mutex_unlock(&table->mutex); 480 } 481 482 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 483 { 484 u64 out_param = 0; 485 486 if (mlx4_is_mfunc(dev)) { 487 (void) mlx4_cmd_imm(dev, vlan, &out_param, 488 ((u32) port) << 8 | (u32) RES_VLAN, 489 RES_OP_RESERVE_AND_MAP, 490 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 491 MLX4_CMD_WRAPPED); 492 return; 493 } 494 __mlx4_unregister_vlan(dev, port, vlan); 495 } 496 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); 497 498 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) 499 { 500 struct mlx4_cmd_mailbox *inmailbox, *outmailbox; 501 u8 *inbuf, *outbuf; 502 int err; 503 504 inmailbox = mlx4_alloc_cmd_mailbox(dev); 505 if (IS_ERR(inmailbox)) 506 return PTR_ERR(inmailbox); 507 508 outmailbox = mlx4_alloc_cmd_mailbox(dev); 509 if (IS_ERR(outmailbox)) { 510 mlx4_free_cmd_mailbox(dev, inmailbox); 511 return PTR_ERR(outmailbox); 512 } 513 514 inbuf = inmailbox->buf; 515 outbuf = outmailbox->buf; 516 inbuf[0] = 1; 517 inbuf[1] = 1; 518 inbuf[2] = 1; 519 inbuf[3] = 1; 520 *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); 521 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); 522 523 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, 524 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 525 MLX4_CMD_NATIVE); 526 if (!err) 527 *caps = *(__be32 *) (outbuf + 84); 528 mlx4_free_cmd_mailbox(dev, inmailbox); 529 mlx4_free_cmd_mailbox(dev, outmailbox); 530 return err; 531 } 532 static struct mlx4_roce_gid_entry zgid_entry; 533 534 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) 535 { 536 int vfs; 537 int slave_gid = slave; 538 unsigned i; 539 struct mlx4_slaves_pport slaves_pport; 540 struct mlx4_active_ports actv_ports; 541 unsigned max_port_p_one; 542 543 if (slave == 0) 544 return MLX4_ROCE_PF_GIDS; 545 546 /* Slave is a VF */ 547 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 548 actv_ports = mlx4_get_active_ports(dev, slave); 549 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + 550 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; 551 552 for (i = 1; i < max_port_p_one; i++) { 553 struct mlx4_active_ports exclusive_ports; 554 struct mlx4_slaves_pport slaves_pport_actv; 555 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 556 set_bit(i - 1, exclusive_ports.ports); 557 if (i == port) 558 continue; 559 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 560 dev, &exclusive_ports); 561 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 562 dev->persist->num_vfs + 1); 563 } 564 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; 565 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) 566 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; 567 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; 568 } 569 570 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) 571 { 572 int gids; 573 unsigned i; 574 int slave_gid = slave; 575 int vfs; 576 577 struct mlx4_slaves_pport slaves_pport; 578 struct mlx4_active_ports actv_ports; 579 unsigned max_port_p_one; 580 581 if (slave == 0) 582 return 0; 583 584 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 585 actv_ports = mlx4_get_active_ports(dev, slave); 586 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + 587 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; 588 589 for (i = 1; i < max_port_p_one; i++) { 590 struct mlx4_active_ports exclusive_ports; 591 struct mlx4_slaves_pport slaves_pport_actv; 592 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 593 set_bit(i - 1, exclusive_ports.ports); 594 if (i == port) 595 continue; 596 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 597 dev, &exclusive_ports); 598 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 599 dev->persist->num_vfs + 1); 600 } 601 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 602 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; 603 if (slave_gid <= gids % vfs) 604 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); 605 606 return MLX4_ROCE_PF_GIDS + (gids % vfs) + 607 ((gids / vfs) * (slave_gid - 1)); 608 } 609 EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 610 611 static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, 612 int port, struct mlx4_cmd_mailbox *mailbox) 613 { 614 struct mlx4_roce_gid_entry *gid_entry_mbox; 615 struct mlx4_priv *priv = mlx4_priv(dev); 616 int num_gids, base, offset; 617 int i, err; 618 619 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 620 base = mlx4_get_base_gid_ix(dev, slave, port); 621 622 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); 623 624 mutex_lock(&(priv->port[port].gid_table.mutex)); 625 /* Zero-out gids belonging to that slave in the port GID table */ 626 for (i = 0, offset = base; i < num_gids; offset++, i++) 627 memcpy(priv->port[port].gid_table.roce_gids[offset].raw, 628 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); 629 630 /* Now, copy roce port gids table to mailbox for passing to FW */ 631 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; 632 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 633 memcpy(gid_entry_mbox->raw, 634 priv->port[port].gid_table.roce_gids[i].raw, 635 MLX4_ROCE_GID_ENTRY_SIZE); 636 637 err = mlx4_cmd(dev, mailbox->dma, 638 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 639 MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, 640 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 641 mutex_unlock(&(priv->port[port].gid_table.mutex)); 642 return err; 643 } 644 645 646 void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) 647 { 648 struct mlx4_active_ports actv_ports; 649 struct mlx4_cmd_mailbox *mailbox; 650 int num_eth_ports, err; 651 int i; 652 653 if (slave < 0 || slave > dev->persist->num_vfs) 654 return; 655 656 actv_ports = mlx4_get_active_ports(dev, slave); 657 658 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { 659 if (test_bit(i, actv_ports.ports)) { 660 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) 661 continue; 662 num_eth_ports++; 663 } 664 } 665 666 if (!num_eth_ports) 667 return; 668 669 /* have ETH ports. Alloc mailbox for SET_PORT command */ 670 mailbox = mlx4_alloc_cmd_mailbox(dev); 671 if (IS_ERR(mailbox)) 672 return; 673 674 for (i = 0; i < dev->caps.num_ports; i++) { 675 if (test_bit(i, actv_ports.ports)) { 676 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) 677 continue; 678 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); 679 if (err) 680 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", 681 slave, i + 1, err); 682 } 683 } 684 685 mlx4_free_cmd_mailbox(dev, mailbox); 686 return; 687 } 688 689 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 690 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 691 { 692 struct mlx4_priv *priv = mlx4_priv(dev); 693 struct mlx4_port_info *port_info; 694 struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; 695 struct mlx4_slave_state *slave_st = &master->slave_state[slave]; 696 struct mlx4_set_port_rqp_calc_context *qpn_context; 697 struct mlx4_set_port_general_context *gen_context; 698 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; 699 int reset_qkey_viols; 700 int port; 701 int is_eth; 702 int num_gids; 703 int base; 704 u32 in_modifier; 705 u32 promisc; 706 u16 mtu, prev_mtu; 707 int err; 708 int i, j; 709 int offset; 710 __be32 agg_cap_mask; 711 __be32 slave_cap_mask; 712 __be32 new_cap_mask; 713 714 port = in_mod & 0xff; 715 in_modifier = in_mod >> 8; 716 is_eth = op_mod; 717 port_info = &priv->port[port]; 718 719 /* Slaves cannot perform SET_PORT operations except changing MTU */ 720 if (is_eth) { 721 if (slave != dev->caps.function && 722 in_modifier != MLX4_SET_PORT_GENERAL && 723 in_modifier != MLX4_SET_PORT_GID_TABLE) { 724 mlx4_warn(dev, "denying SET_PORT for slave:%d\n", 725 slave); 726 return -EINVAL; 727 } 728 switch (in_modifier) { 729 case MLX4_SET_PORT_RQP_CALC: 730 qpn_context = inbox->buf; 731 qpn_context->base_qpn = 732 cpu_to_be32(port_info->base_qpn); 733 qpn_context->n_mac = 0x7; 734 promisc = be32_to_cpu(qpn_context->promisc) >> 735 SET_PORT_PROMISC_SHIFT; 736 qpn_context->promisc = cpu_to_be32( 737 promisc << SET_PORT_PROMISC_SHIFT | 738 port_info->base_qpn); 739 promisc = be32_to_cpu(qpn_context->mcast) >> 740 SET_PORT_MC_PROMISC_SHIFT; 741 qpn_context->mcast = cpu_to_be32( 742 promisc << SET_PORT_MC_PROMISC_SHIFT | 743 port_info->base_qpn); 744 break; 745 case MLX4_SET_PORT_GENERAL: 746 gen_context = inbox->buf; 747 /* Mtu is configured as the max MTU among all the 748 * the functions on the port. */ 749 mtu = be16_to_cpu(gen_context->mtu); 750 mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + 751 ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); 752 prev_mtu = slave_st->mtu[port]; 753 slave_st->mtu[port] = mtu; 754 if (mtu > master->max_mtu[port]) 755 master->max_mtu[port] = mtu; 756 if (mtu < prev_mtu && prev_mtu == 757 master->max_mtu[port]) { 758 slave_st->mtu[port] = mtu; 759 master->max_mtu[port] = mtu; 760 for (i = 0; i < dev->num_slaves; i++) { 761 master->max_mtu[port] = 762 max(master->max_mtu[port], 763 master->slave_state[i].mtu[port]); 764 } 765 } 766 767 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 768 break; 769 case MLX4_SET_PORT_GID_TABLE: 770 /* change to MULTIPLE entries: number of guest's gids 771 * need a FOR-loop here over number of gids the guest has. 772 * 1. Check no duplicates in gids passed by slave 773 */ 774 num_gids = mlx4_get_slave_num_gids(dev, slave, port); 775 base = mlx4_get_base_gid_ix(dev, slave, port); 776 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 777 for (i = 0; i < num_gids; gid_entry_mbox++, i++) { 778 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, 779 sizeof(zgid_entry))) 780 continue; 781 gid_entry_mb1 = gid_entry_mbox + 1; 782 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { 783 if (!memcmp(gid_entry_mb1->raw, 784 zgid_entry.raw, sizeof(zgid_entry))) 785 continue; 786 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, 787 sizeof(gid_entry_mbox->raw))) { 788 /* found duplicate */ 789 return -EINVAL; 790 } 791 } 792 } 793 794 /* 2. Check that do not have duplicates in OTHER 795 * entries in the port GID table 796 */ 797 798 mutex_lock(&(priv->port[port].gid_table.mutex)); 799 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 800 if (i >= base && i < base + num_gids) 801 continue; /* don't compare to slave's current gids */ 802 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; 803 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 804 continue; 805 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 806 for (j = 0; j < num_gids; gid_entry_mbox++, j++) { 807 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, 808 sizeof(zgid_entry))) 809 continue; 810 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, 811 sizeof(gid_entry_tbl->raw))) { 812 /* found duplicate */ 813 mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", 814 slave, i); 815 mutex_unlock(&(priv->port[port].gid_table.mutex)); 816 return -EINVAL; 817 } 818 } 819 } 820 821 /* insert slave GIDs with memcpy, starting at slave's base index */ 822 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 823 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 824 memcpy(priv->port[port].gid_table.roce_gids[offset].raw, 825 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE); 826 827 /* Now, copy roce port gids table to current mailbox for passing to FW */ 828 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 829 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 830 memcpy(gid_entry_mbox->raw, 831 priv->port[port].gid_table.roce_gids[i].raw, 832 MLX4_ROCE_GID_ENTRY_SIZE); 833 834 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, 835 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 836 MLX4_CMD_NATIVE); 837 mutex_unlock(&(priv->port[port].gid_table.mutex)); 838 return err; 839 } 840 841 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, 842 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 843 MLX4_CMD_NATIVE); 844 } 845 846 /* Slaves are not allowed to SET_PORT beacon (LED) blink */ 847 if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) { 848 mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave); 849 return -EPERM; 850 } 851 852 /* For IB, we only consider: 853 * - The capability mask, which is set to the aggregate of all 854 * slave function capabilities 855 * - The QKey violatin counter - reset according to each request. 856 */ 857 858 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 859 reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; 860 new_cap_mask = ((__be32 *) inbox->buf)[2]; 861 } else { 862 reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; 863 new_cap_mask = ((__be32 *) inbox->buf)[1]; 864 } 865 866 /* slave may not set the IS_SM capability for the port */ 867 if (slave != mlx4_master_func_num(dev) && 868 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM)) 869 return -EINVAL; 870 871 /* No DEV_MGMT in multifunc mode */ 872 if (mlx4_is_mfunc(dev) && 873 (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP)) 874 return -EINVAL; 875 876 agg_cap_mask = 0; 877 slave_cap_mask = 878 priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; 879 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; 880 for (i = 0; i < dev->num_slaves; i++) 881 agg_cap_mask |= 882 priv->mfunc.master.slave_state[i].ib_cap_mask[port]; 883 884 /* only clear mailbox for guests. Master may be setting 885 * MTU or PKEY table size 886 */ 887 if (slave != dev->caps.function) 888 memset(inbox->buf, 0, 256); 889 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 890 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; 891 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 892 } else { 893 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; 894 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 895 } 896 897 err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, 898 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 899 if (err) 900 priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = 901 slave_cap_mask; 902 return err; 903 } 904 905 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 906 struct mlx4_vhcr *vhcr, 907 struct mlx4_cmd_mailbox *inbox, 908 struct mlx4_cmd_mailbox *outbox, 909 struct mlx4_cmd_info *cmd) 910 { 911 int port = mlx4_slave_convert_port( 912 dev, slave, vhcr->in_modifier & 0xFF); 913 914 if (port < 0) 915 return -EINVAL; 916 917 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | 918 (port & 0xFF); 919 920 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 921 vhcr->op_modifier, inbox); 922 } 923 924 /* bit locations for set port command with zero op modifier */ 925 enum { 926 MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ 927 MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ 928 MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, 929 MLX4_CHANGE_PORT_VL_CAP = 21, 930 MLX4_CHANGE_PORT_MTU_CAP = 22, 931 }; 932 933 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) 934 { 935 struct mlx4_cmd_mailbox *mailbox; 936 int err, vl_cap, pkey_tbl_flag = 0; 937 938 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 939 return 0; 940 941 mailbox = mlx4_alloc_cmd_mailbox(dev); 942 if (IS_ERR(mailbox)) 943 return PTR_ERR(mailbox); 944 945 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; 946 947 if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { 948 pkey_tbl_flag = 1; 949 ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); 950 } 951 952 /* IB VL CAP enum isn't used by the firmware, just numerical values */ 953 for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { 954 ((__be32 *) mailbox->buf)[0] = cpu_to_be32( 955 (1 << MLX4_CHANGE_PORT_MTU_CAP) | 956 (1 << MLX4_CHANGE_PORT_VL_CAP) | 957 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | 958 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | 959 (vl_cap << MLX4_SET_PORT_VL_CAP)); 960 err = mlx4_cmd(dev, mailbox->dma, port, 961 MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT, 962 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 963 if (err != -ENOMEM) 964 break; 965 } 966 967 mlx4_free_cmd_mailbox(dev, mailbox); 968 return err; 969 } 970 971 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, 972 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) 973 { 974 struct mlx4_cmd_mailbox *mailbox; 975 struct mlx4_set_port_general_context *context; 976 int err; 977 u32 in_mod; 978 979 mailbox = mlx4_alloc_cmd_mailbox(dev); 980 if (IS_ERR(mailbox)) 981 return PTR_ERR(mailbox); 982 context = mailbox->buf; 983 context->flags = SET_PORT_GEN_ALL_VALID; 984 context->mtu = cpu_to_be16(mtu); 985 context->pptx = (pptx * (!pfctx)) << 7; 986 context->pfctx = pfctx; 987 context->pprx = (pprx * (!pfcrx)) << 7; 988 context->pfcrx = pfcrx; 989 990 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 991 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 992 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 993 MLX4_CMD_WRAPPED); 994 995 mlx4_free_cmd_mailbox(dev, mailbox); 996 return err; 997 } 998 EXPORT_SYMBOL(mlx4_SET_PORT_general); 999 1000 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 1001 u8 promisc) 1002 { 1003 struct mlx4_cmd_mailbox *mailbox; 1004 struct mlx4_set_port_rqp_calc_context *context; 1005 int err; 1006 u32 in_mod; 1007 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? 1008 MCAST_DIRECT : MCAST_DEFAULT; 1009 1010 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 1011 return 0; 1012 1013 mailbox = mlx4_alloc_cmd_mailbox(dev); 1014 if (IS_ERR(mailbox)) 1015 return PTR_ERR(mailbox); 1016 context = mailbox->buf; 1017 context->base_qpn = cpu_to_be32(base_qpn); 1018 context->n_mac = dev->caps.log_num_macs; 1019 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 1020 base_qpn); 1021 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | 1022 base_qpn); 1023 context->intra_no_vlan = 0; 1024 context->no_vlan = MLX4_NO_VLAN_IDX; 1025 context->intra_vlan_miss = 0; 1026 context->vlan_miss = MLX4_VLAN_MISS_IDX; 1027 1028 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; 1029 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 1030 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 1031 MLX4_CMD_WRAPPED); 1032 1033 mlx4_free_cmd_mailbox(dev, mailbox); 1034 return err; 1035 } 1036 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 1037 1038 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) 1039 { 1040 struct mlx4_cmd_mailbox *mailbox; 1041 struct mlx4_set_port_general_context *context; 1042 u32 in_mod; 1043 int err; 1044 1045 mailbox = mlx4_alloc_cmd_mailbox(dev); 1046 if (IS_ERR(mailbox)) 1047 return PTR_ERR(mailbox); 1048 context = mailbox->buf; 1049 context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; 1050 if (ignore_fcs_value) 1051 context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; 1052 else 1053 context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK; 1054 1055 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 1056 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1057 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1058 1059 mlx4_free_cmd_mailbox(dev, mailbox); 1060 return err; 1061 } 1062 EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check); 1063 1064 enum { 1065 VXLAN_ENABLE_MODIFY = 1 << 7, 1066 VXLAN_STEERING_MODIFY = 1 << 6, 1067 1068 VXLAN_ENABLE = 1 << 7, 1069 }; 1070 1071 struct mlx4_set_port_vxlan_context { 1072 u32 reserved1; 1073 u8 modify_flags; 1074 u8 reserved2; 1075 u8 enable_flags; 1076 u8 steering; 1077 }; 1078 1079 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) 1080 { 1081 int err; 1082 u32 in_mod; 1083 struct mlx4_cmd_mailbox *mailbox; 1084 struct mlx4_set_port_vxlan_context *context; 1085 1086 mailbox = mlx4_alloc_cmd_mailbox(dev); 1087 if (IS_ERR(mailbox)) 1088 return PTR_ERR(mailbox); 1089 context = mailbox->buf; 1090 memset(context, 0, sizeof(*context)); 1091 1092 context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; 1093 if (enable) 1094 context->enable_flags = VXLAN_ENABLE; 1095 context->steering = steering; 1096 1097 in_mod = MLX4_SET_PORT_VXLAN << 8 | port; 1098 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, 1099 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 1100 MLX4_CMD_NATIVE); 1101 1102 mlx4_free_cmd_mailbox(dev, mailbox); 1103 return err; 1104 } 1105 EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); 1106 1107 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time) 1108 { 1109 int err; 1110 struct mlx4_cmd_mailbox *mailbox; 1111 1112 mailbox = mlx4_alloc_cmd_mailbox(dev); 1113 if (IS_ERR(mailbox)) 1114 return PTR_ERR(mailbox); 1115 1116 *((__be32 *)mailbox->buf) = cpu_to_be32(time); 1117 1118 err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE, 1119 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 1120 MLX4_CMD_NATIVE); 1121 1122 mlx4_free_cmd_mailbox(dev, mailbox); 1123 return err; 1124 } 1125 EXPORT_SYMBOL(mlx4_SET_PORT_BEACON); 1126 1127 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1128 struct mlx4_vhcr *vhcr, 1129 struct mlx4_cmd_mailbox *inbox, 1130 struct mlx4_cmd_mailbox *outbox, 1131 struct mlx4_cmd_info *cmd) 1132 { 1133 int err = 0; 1134 1135 return err; 1136 } 1137 1138 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, 1139 u64 mac, u64 clear, u8 mode) 1140 { 1141 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, 1142 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, 1143 MLX4_CMD_WRAPPED); 1144 } 1145 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); 1146 1147 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1148 struct mlx4_vhcr *vhcr, 1149 struct mlx4_cmd_mailbox *inbox, 1150 struct mlx4_cmd_mailbox *outbox, 1151 struct mlx4_cmd_info *cmd) 1152 { 1153 int err = 0; 1154 1155 return err; 1156 } 1157 1158 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, 1159 u32 in_mod, struct mlx4_cmd_mailbox *outbox) 1160 { 1161 return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, 1162 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 1163 MLX4_CMD_NATIVE); 1164 } 1165 1166 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, 1167 struct mlx4_vhcr *vhcr, 1168 struct mlx4_cmd_mailbox *inbox, 1169 struct mlx4_cmd_mailbox *outbox, 1170 struct mlx4_cmd_info *cmd) 1171 { 1172 if (slave != dev->caps.function) 1173 return 0; 1174 return mlx4_common_dump_eth_stats(dev, slave, 1175 vhcr->in_modifier, outbox); 1176 } 1177 1178 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, 1179 int *slave_id) 1180 { 1181 struct mlx4_priv *priv = mlx4_priv(dev); 1182 int i, found_ix = -1; 1183 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 1184 struct mlx4_slaves_pport slaves_pport; 1185 unsigned num_vfs; 1186 int slave_gid; 1187 1188 if (!mlx4_is_mfunc(dev)) 1189 return -EINVAL; 1190 1191 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 1192 num_vfs = bitmap_weight(slaves_pport.slaves, 1193 dev->persist->num_vfs + 1) - 1; 1194 1195 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1196 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, 1197 MLX4_ROCE_GID_ENTRY_SIZE)) { 1198 found_ix = i; 1199 break; 1200 } 1201 } 1202 1203 if (found_ix >= 0) { 1204 /* Calculate a slave_gid which is the slave number in the gid 1205 * table and not a globally unique slave number. 1206 */ 1207 if (found_ix < MLX4_ROCE_PF_GIDS) 1208 slave_gid = 0; 1209 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1210 (vf_gids / num_vfs + 1)) 1211 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / 1212 (vf_gids / num_vfs + 1)) + 1; 1213 else 1214 slave_gid = 1215 ((found_ix - MLX4_ROCE_PF_GIDS - 1216 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1217 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1218 1219 /* Calculate the globally unique slave id */ 1220 if (slave_gid) { 1221 struct mlx4_active_ports exclusive_ports; 1222 struct mlx4_active_ports actv_ports; 1223 struct mlx4_slaves_pport slaves_pport_actv; 1224 unsigned max_port_p_one; 1225 int num_vfs_before = 0; 1226 int candidate_slave_gid; 1227 1228 /* Calculate how many VFs are on the previous port, if exists */ 1229 for (i = 1; i < port; i++) { 1230 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1231 set_bit(i - 1, exclusive_ports.ports); 1232 slaves_pport_actv = 1233 mlx4_phys_to_slaves_pport_actv( 1234 dev, &exclusive_ports); 1235 num_vfs_before += bitmap_weight( 1236 slaves_pport_actv.slaves, 1237 dev->persist->num_vfs + 1); 1238 } 1239 1240 /* candidate_slave_gid isn't necessarily the correct slave, but 1241 * it has the same number of ports and is assigned to the same 1242 * ports as the real slave we're looking for. On dual port VF, 1243 * slave_gid = [single port VFs on port <port>] + 1244 * [offset of the current slave from the first dual port VF] + 1245 * 1 (for the PF). 1246 */ 1247 candidate_slave_gid = slave_gid + num_vfs_before; 1248 1249 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); 1250 max_port_p_one = find_first_bit( 1251 actv_ports.ports, dev->caps.num_ports) + 1252 bitmap_weight(actv_ports.ports, 1253 dev->caps.num_ports) + 1; 1254 1255 /* Calculate the real slave number */ 1256 for (i = 1; i < max_port_p_one; i++) { 1257 if (i == port) 1258 continue; 1259 bitmap_zero(exclusive_ports.ports, 1260 dev->caps.num_ports); 1261 set_bit(i - 1, exclusive_ports.ports); 1262 slaves_pport_actv = 1263 mlx4_phys_to_slaves_pport_actv( 1264 dev, &exclusive_ports); 1265 slave_gid += bitmap_weight( 1266 slaves_pport_actv.slaves, 1267 dev->persist->num_vfs + 1); 1268 } 1269 } 1270 *slave_id = slave_gid; 1271 } 1272 1273 return (found_ix >= 0) ? 0 : -EINVAL; 1274 } 1275 EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); 1276 1277 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, 1278 u8 *gid) 1279 { 1280 struct mlx4_priv *priv = mlx4_priv(dev); 1281 1282 if (!mlx4_is_master(dev)) 1283 return -EINVAL; 1284 1285 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, 1286 MLX4_ROCE_GID_ENTRY_SIZE); 1287 return 0; 1288 } 1289 EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1290 1291 /* Cable Module Info */ 1292 #define MODULE_INFO_MAX_READ 48 1293 1294 #define I2C_ADDR_LOW 0x50 1295 #define I2C_ADDR_HIGH 0x51 1296 #define I2C_PAGE_SIZE 256 1297 1298 /* Module Info Data */ 1299 struct mlx4_cable_info { 1300 u8 i2c_addr; 1301 u8 page_num; 1302 __be16 dev_mem_address; 1303 __be16 reserved1; 1304 __be16 size; 1305 __be32 reserved2[2]; 1306 u8 data[MODULE_INFO_MAX_READ]; 1307 }; 1308 1309 enum cable_info_err { 1310 CABLE_INF_INV_PORT = 0x1, 1311 CABLE_INF_OP_NOSUP = 0x2, 1312 CABLE_INF_NOT_CONN = 0x3, 1313 CABLE_INF_NO_EEPRM = 0x4, 1314 CABLE_INF_PAGE_ERR = 0x5, 1315 CABLE_INF_INV_ADDR = 0x6, 1316 CABLE_INF_I2C_ADDR = 0x7, 1317 CABLE_INF_QSFP_VIO = 0x8, 1318 CABLE_INF_I2C_BUSY = 0x9, 1319 }; 1320 1321 #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) 1322 1323 static inline const char *cable_info_mad_err_str(u16 mad_status) 1324 { 1325 u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); 1326 1327 switch (err) { 1328 case CABLE_INF_INV_PORT: 1329 return "invalid port selected"; 1330 case CABLE_INF_OP_NOSUP: 1331 return "operation not supported for this port (the port is of type CX4 or internal)"; 1332 case CABLE_INF_NOT_CONN: 1333 return "cable is not connected"; 1334 case CABLE_INF_NO_EEPRM: 1335 return "the connected cable has no EPROM (passive copper cable)"; 1336 case CABLE_INF_PAGE_ERR: 1337 return "page number is greater than 15"; 1338 case CABLE_INF_INV_ADDR: 1339 return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; 1340 case CABLE_INF_I2C_ADDR: 1341 return "invalid I2C slave address"; 1342 case CABLE_INF_QSFP_VIO: 1343 return "at least one cable violates the QSFP specification and ignores the modsel signal"; 1344 case CABLE_INF_I2C_BUSY: 1345 return "I2C bus is constantly busy"; 1346 } 1347 return "Unknown Error"; 1348 } 1349 1350 /** 1351 * mlx4_get_module_info - Read cable module eeprom data 1352 * @dev: mlx4_dev. 1353 * @port: port number. 1354 * @offset: byte offset in eeprom to start reading data from. 1355 * @size: num of bytes to read. 1356 * @data: output buffer to put the requested data into. 1357 * 1358 * Reads cable module eeprom data, puts the outcome data into 1359 * data pointer paramer. 1360 * Returns num of read bytes on success or a negative error 1361 * code. 1362 */ 1363 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, 1364 u16 offset, u16 size, u8 *data) 1365 { 1366 struct mlx4_cmd_mailbox *inbox, *outbox; 1367 struct mlx4_mad_ifc *inmad, *outmad; 1368 struct mlx4_cable_info *cable_info; 1369 u16 i2c_addr; 1370 int ret; 1371 1372 if (size > MODULE_INFO_MAX_READ) 1373 size = MODULE_INFO_MAX_READ; 1374 1375 inbox = mlx4_alloc_cmd_mailbox(dev); 1376 if (IS_ERR(inbox)) 1377 return PTR_ERR(inbox); 1378 1379 outbox = mlx4_alloc_cmd_mailbox(dev); 1380 if (IS_ERR(outbox)) { 1381 mlx4_free_cmd_mailbox(dev, inbox); 1382 return PTR_ERR(outbox); 1383 } 1384 1385 inmad = (struct mlx4_mad_ifc *)(inbox->buf); 1386 outmad = (struct mlx4_mad_ifc *)(outbox->buf); 1387 1388 inmad->method = 0x1; /* Get */ 1389 inmad->class_version = 0x1; 1390 inmad->mgmt_class = 0x1; 1391 inmad->base_version = 0x1; 1392 inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ 1393 1394 if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) 1395 /* Cross pages reads are not allowed 1396 * read until offset 256 in low page 1397 */ 1398 size -= offset + size - I2C_PAGE_SIZE; 1399 1400 i2c_addr = I2C_ADDR_LOW; 1401 if (offset >= I2C_PAGE_SIZE) { 1402 /* Reset offset to high page */ 1403 i2c_addr = I2C_ADDR_HIGH; 1404 offset -= I2C_PAGE_SIZE; 1405 } 1406 1407 cable_info = (struct mlx4_cable_info *)inmad->data; 1408 cable_info->dev_mem_address = cpu_to_be16(offset); 1409 cable_info->page_num = 0; 1410 cable_info->i2c_addr = i2c_addr; 1411 cable_info->size = cpu_to_be16(size); 1412 1413 ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, 1414 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 1415 MLX4_CMD_NATIVE); 1416 if (ret) 1417 goto out; 1418 1419 if (be16_to_cpu(outmad->status)) { 1420 /* Mad returned with bad status */ 1421 ret = be16_to_cpu(outmad->status); 1422 mlx4_warn(dev, 1423 "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", 1424 0xFF60, port, i2c_addr, offset, size, 1425 ret, cable_info_mad_err_str(ret)); 1426 1427 if (i2c_addr == I2C_ADDR_HIGH && 1428 MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) 1429 /* Some SFP cables do not support i2c slave 1430 * address 0x51 (high page), abort silently. 1431 */ 1432 ret = 0; 1433 else 1434 ret = -ret; 1435 goto out; 1436 } 1437 cable_info = (struct mlx4_cable_info *)outmad->data; 1438 memcpy(data, cable_info->data, size); 1439 ret = size; 1440 out: 1441 mlx4_free_cmd_mailbox(dev, inbox); 1442 mlx4_free_cmd_mailbox(dev, outbox); 1443 return ret; 1444 } 1445 EXPORT_SYMBOL(mlx4_get_module_info); 1446