1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/debugfs.h> 34 #include <linux/highmem.h> 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/errno.h> 38 #include <linux/pci.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/bitmap.h> 42 #if defined(CONFIG_X86) 43 #include <asm/pat.h> 44 #endif 45 #include <linux/sched.h> 46 #include <linux/sched/mm.h> 47 #include <linux/sched/task.h> 48 #include <linux/delay.h> 49 #include <rdma/ib_user_verbs.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/ib_cache.h> 52 #include <linux/mlx5/port.h> 53 #include <linux/mlx5/vport.h> 54 #include <linux/mlx5/fs.h> 55 #include <linux/mlx5/eswitch.h> 56 #include <linux/list.h> 57 #include <rdma/ib_smi.h> 58 #include <rdma/ib_umem.h> 59 #include <linux/in.h> 60 #include <linux/etherdevice.h> 61 #include "mlx5_ib.h" 62 #include "ib_rep.h" 63 #include "cmd.h" 64 #include "srq.h" 65 #include <linux/mlx5/fs_helpers.h> 66 #include <linux/mlx5/accel.h> 67 #include <rdma/uverbs_std_types.h> 68 #include <rdma/mlx5_user_ioctl_verbs.h> 69 #include <rdma/mlx5_user_ioctl_cmds.h> 70 71 #define UVERBS_MODULE_NAME mlx5_ib 72 #include <rdma/uverbs_named_ioctl.h> 73 74 #define DRIVER_NAME "mlx5_ib" 75 #define DRIVER_VERSION "5.0-0" 76 77 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 78 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 79 MODULE_LICENSE("Dual BSD/GPL"); 80 81 static char mlx5_version[] = 82 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 83 DRIVER_VERSION "\n"; 84 85 struct mlx5_ib_event_work { 86 struct work_struct work; 87 union { 88 struct mlx5_ib_dev *dev; 89 struct mlx5_ib_multiport_info *mpi; 90 }; 91 bool is_slave; 92 unsigned int event; 93 void *param; 94 }; 95 96 enum { 97 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 98 }; 99 100 static struct workqueue_struct *mlx5_ib_event_wq; 101 static LIST_HEAD(mlx5_ib_unaffiliated_port_list); 102 static LIST_HEAD(mlx5_ib_dev_list); 103 /* 104 * This mutex should be held when accessing either of the above lists 105 */ 106 static DEFINE_MUTEX(mlx5_ib_multiport_mutex); 107 108 /* We can't use an array for xlt_emergency_page because dma_map_single 109 * doesn't work on kernel modules memory 110 */ 111 static unsigned long xlt_emergency_page; 112 static struct mutex xlt_emergency_page_mutex; 113 114 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) 115 { 116 struct mlx5_ib_dev *dev; 117 118 mutex_lock(&mlx5_ib_multiport_mutex); 119 dev = mpi->ibdev; 120 mutex_unlock(&mlx5_ib_multiport_mutex); 121 return dev; 122 } 123 124 static enum rdma_link_layer 125 mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 126 { 127 switch (port_type_cap) { 128 case MLX5_CAP_PORT_TYPE_IB: 129 return IB_LINK_LAYER_INFINIBAND; 130 case MLX5_CAP_PORT_TYPE_ETH: 131 return IB_LINK_LAYER_ETHERNET; 132 default: 133 return IB_LINK_LAYER_UNSPECIFIED; 134 } 135 } 136 137 static enum rdma_link_layer 138 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 139 { 140 struct mlx5_ib_dev *dev = to_mdev(device); 141 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 142 143 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 144 } 145 146 static int get_port_state(struct ib_device *ibdev, 147 u8 port_num, 148 enum ib_port_state *state) 149 { 150 struct ib_port_attr attr; 151 int ret; 152 153 memset(&attr, 0, sizeof(attr)); 154 ret = ibdev->ops.query_port(ibdev, port_num, &attr); 155 if (!ret) 156 *state = attr.state; 157 return ret; 158 } 159 160 static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, 161 struct net_device *ndev, 162 u8 *port_num) 163 { 164 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 165 struct net_device *rep_ndev; 166 struct mlx5_ib_port *port; 167 int i; 168 169 for (i = 0; i < dev->num_ports; i++) { 170 port = &dev->port[i]; 171 if (!port->rep) 172 continue; 173 174 read_lock(&port->roce.netdev_lock); 175 rep_ndev = mlx5_ib_get_rep_netdev(esw, 176 port->rep->vport); 177 if (rep_ndev == ndev) { 178 read_unlock(&port->roce.netdev_lock); 179 *port_num = i + 1; 180 return &port->roce; 181 } 182 read_unlock(&port->roce.netdev_lock); 183 } 184 185 return NULL; 186 } 187 188 static int mlx5_netdev_event(struct notifier_block *this, 189 unsigned long event, void *ptr) 190 { 191 struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); 192 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 193 u8 port_num = roce->native_port_num; 194 struct mlx5_core_dev *mdev; 195 struct mlx5_ib_dev *ibdev; 196 197 ibdev = roce->dev; 198 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 199 if (!mdev) 200 return NOTIFY_DONE; 201 202 switch (event) { 203 case NETDEV_REGISTER: 204 /* Should already be registered during the load */ 205 if (ibdev->is_rep) 206 break; 207 write_lock(&roce->netdev_lock); 208 if (ndev->dev.parent == mdev->device) 209 roce->netdev = ndev; 210 write_unlock(&roce->netdev_lock); 211 break; 212 213 case NETDEV_UNREGISTER: 214 /* In case of reps, ib device goes away before the netdevs */ 215 write_lock(&roce->netdev_lock); 216 if (roce->netdev == ndev) 217 roce->netdev = NULL; 218 write_unlock(&roce->netdev_lock); 219 break; 220 221 case NETDEV_CHANGE: 222 case NETDEV_UP: 223 case NETDEV_DOWN: { 224 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); 225 struct net_device *upper = NULL; 226 227 if (lag_ndev) { 228 upper = netdev_master_upper_dev_get(lag_ndev); 229 dev_put(lag_ndev); 230 } 231 232 if (ibdev->is_rep) 233 roce = mlx5_get_rep_roce(ibdev, ndev, &port_num); 234 if (!roce) 235 return NOTIFY_DONE; 236 if ((upper == ndev || (!upper && ndev == roce->netdev)) 237 && ibdev->ib_active) { 238 struct ib_event ibev = { }; 239 enum ib_port_state port_state; 240 241 if (get_port_state(&ibdev->ib_dev, port_num, 242 &port_state)) 243 goto done; 244 245 if (roce->last_port_state == port_state) 246 goto done; 247 248 roce->last_port_state = port_state; 249 ibev.device = &ibdev->ib_dev; 250 if (port_state == IB_PORT_DOWN) 251 ibev.event = IB_EVENT_PORT_ERR; 252 else if (port_state == IB_PORT_ACTIVE) 253 ibev.event = IB_EVENT_PORT_ACTIVE; 254 else 255 goto done; 256 257 ibev.element.port_num = port_num; 258 ib_dispatch_event(&ibev); 259 } 260 break; 261 } 262 263 default: 264 break; 265 } 266 done: 267 mlx5_ib_put_native_port_mdev(ibdev, port_num); 268 return NOTIFY_DONE; 269 } 270 271 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 272 u8 port_num) 273 { 274 struct mlx5_ib_dev *ibdev = to_mdev(device); 275 struct net_device *ndev; 276 struct mlx5_core_dev *mdev; 277 278 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); 279 if (!mdev) 280 return NULL; 281 282 ndev = mlx5_lag_get_roce_netdev(mdev); 283 if (ndev) 284 goto out; 285 286 /* Ensure ndev does not disappear before we invoke dev_hold() 287 */ 288 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); 289 ndev = ibdev->port[port_num - 1].roce.netdev; 290 if (ndev) 291 dev_hold(ndev); 292 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); 293 294 out: 295 mlx5_ib_put_native_port_mdev(ibdev, port_num); 296 return ndev; 297 } 298 299 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, 300 u8 ib_port_num, 301 u8 *native_port_num) 302 { 303 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 304 ib_port_num); 305 struct mlx5_core_dev *mdev = NULL; 306 struct mlx5_ib_multiport_info *mpi; 307 struct mlx5_ib_port *port; 308 309 if (!mlx5_core_mp_enabled(ibdev->mdev) || 310 ll != IB_LINK_LAYER_ETHERNET) { 311 if (native_port_num) 312 *native_port_num = ib_port_num; 313 return ibdev->mdev; 314 } 315 316 if (native_port_num) 317 *native_port_num = 1; 318 319 port = &ibdev->port[ib_port_num - 1]; 320 if (!port) 321 return NULL; 322 323 spin_lock(&port->mp.mpi_lock); 324 mpi = ibdev->port[ib_port_num - 1].mp.mpi; 325 if (mpi && !mpi->unaffiliate) { 326 mdev = mpi->mdev; 327 /* If it's the master no need to refcount, it'll exist 328 * as long as the ib_dev exists. 329 */ 330 if (!mpi->is_master) 331 mpi->mdev_refcnt++; 332 } 333 spin_unlock(&port->mp.mpi_lock); 334 335 return mdev; 336 } 337 338 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num) 339 { 340 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, 341 port_num); 342 struct mlx5_ib_multiport_info *mpi; 343 struct mlx5_ib_port *port; 344 345 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 346 return; 347 348 port = &ibdev->port[port_num - 1]; 349 350 spin_lock(&port->mp.mpi_lock); 351 mpi = ibdev->port[port_num - 1].mp.mpi; 352 if (mpi->is_master) 353 goto out; 354 355 mpi->mdev_refcnt--; 356 if (mpi->unaffiliate) 357 complete(&mpi->unref_comp); 358 out: 359 spin_unlock(&port->mp.mpi_lock); 360 } 361 362 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, 363 u8 *active_width) 364 { 365 switch (eth_proto_oper) { 366 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 367 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 368 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 369 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 370 *active_width = IB_WIDTH_1X; 371 *active_speed = IB_SPEED_SDR; 372 break; 373 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 374 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 375 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 376 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 377 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 378 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 379 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 380 *active_width = IB_WIDTH_1X; 381 *active_speed = IB_SPEED_QDR; 382 break; 383 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 384 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 385 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 386 *active_width = IB_WIDTH_1X; 387 *active_speed = IB_SPEED_EDR; 388 break; 389 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 390 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 391 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 392 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 393 *active_width = IB_WIDTH_4X; 394 *active_speed = IB_SPEED_QDR; 395 break; 396 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 397 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 398 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 399 *active_width = IB_WIDTH_1X; 400 *active_speed = IB_SPEED_HDR; 401 break; 402 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 403 *active_width = IB_WIDTH_4X; 404 *active_speed = IB_SPEED_FDR; 405 break; 406 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 407 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 408 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 409 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 410 *active_width = IB_WIDTH_4X; 411 *active_speed = IB_SPEED_EDR; 412 break; 413 default: 414 return -EINVAL; 415 } 416 417 return 0; 418 } 419 420 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, 421 u8 *active_width) 422 { 423 switch (eth_proto_oper) { 424 case MLX5E_PROT_MASK(MLX5E_SGMII_100M): 425 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): 426 *active_width = IB_WIDTH_1X; 427 *active_speed = IB_SPEED_SDR; 428 break; 429 case MLX5E_PROT_MASK(MLX5E_5GBASE_R): 430 *active_width = IB_WIDTH_1X; 431 *active_speed = IB_SPEED_DDR; 432 break; 433 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): 434 *active_width = IB_WIDTH_1X; 435 *active_speed = IB_SPEED_QDR; 436 break; 437 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): 438 *active_width = IB_WIDTH_4X; 439 *active_speed = IB_SPEED_QDR; 440 break; 441 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): 442 *active_width = IB_WIDTH_1X; 443 *active_speed = IB_SPEED_EDR; 444 break; 445 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): 446 *active_width = IB_WIDTH_2X; 447 *active_speed = IB_SPEED_EDR; 448 break; 449 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): 450 *active_width = IB_WIDTH_1X; 451 *active_speed = IB_SPEED_HDR; 452 break; 453 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): 454 *active_width = IB_WIDTH_4X; 455 *active_speed = IB_SPEED_EDR; 456 break; 457 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): 458 *active_width = IB_WIDTH_2X; 459 *active_speed = IB_SPEED_HDR; 460 break; 461 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): 462 *active_width = IB_WIDTH_4X; 463 *active_speed = IB_SPEED_HDR; 464 break; 465 default: 466 return -EINVAL; 467 } 468 469 return 0; 470 } 471 472 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 473 u8 *active_width, bool ext) 474 { 475 return ext ? 476 translate_eth_ext_proto_oper(eth_proto_oper, active_speed, 477 active_width) : 478 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, 479 active_width); 480 } 481 482 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 483 struct ib_port_attr *props) 484 { 485 struct mlx5_ib_dev *dev = to_mdev(device); 486 u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; 487 struct mlx5_core_dev *mdev; 488 struct net_device *ndev, *upper; 489 enum ib_mtu ndev_ib_mtu; 490 bool put_mdev = true; 491 u16 qkey_viol_cntr; 492 u32 eth_prot_oper; 493 u8 mdev_port_num; 494 bool ext; 495 int err; 496 497 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 498 if (!mdev) { 499 /* This means the port isn't affiliated yet. Get the 500 * info for the master port instead. 501 */ 502 put_mdev = false; 503 mdev = dev->mdev; 504 mdev_port_num = 1; 505 port_num = 1; 506 } 507 508 /* Possible bad flows are checked before filling out props so in case 509 * of an error it will still be zeroed out. 510 * Use native port in case of reps 511 */ 512 if (dev->is_rep) 513 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 514 1); 515 else 516 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 517 mdev_port_num); 518 if (err) 519 goto out; 520 ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); 521 eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); 522 523 props->active_width = IB_WIDTH_4X; 524 props->active_speed = IB_SPEED_QDR; 525 526 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 527 &props->active_width, ext); 528 529 props->port_cap_flags |= IB_PORT_CM_SUP; 530 props->ip_gids = true; 531 532 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 533 roce_address_table_size); 534 props->max_mtu = IB_MTU_4096; 535 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 536 props->pkey_tbl_len = 1; 537 props->state = IB_PORT_DOWN; 538 props->phys_state = 3; 539 540 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); 541 props->qkey_viol_cntr = qkey_viol_cntr; 542 543 /* If this is a stub query for an unaffiliated port stop here */ 544 if (!put_mdev) 545 goto out; 546 547 ndev = mlx5_ib_get_netdev(device, port_num); 548 if (!ndev) 549 goto out; 550 551 if (dev->lag_active) { 552 rcu_read_lock(); 553 upper = netdev_master_upper_dev_get_rcu(ndev); 554 if (upper) { 555 dev_put(ndev); 556 ndev = upper; 557 dev_hold(ndev); 558 } 559 rcu_read_unlock(); 560 } 561 562 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 563 props->state = IB_PORT_ACTIVE; 564 props->phys_state = 5; 565 } 566 567 ndev_ib_mtu = iboe_get_mtu(ndev->mtu); 568 569 dev_put(ndev); 570 571 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 572 out: 573 if (put_mdev) 574 mlx5_ib_put_native_port_mdev(dev, port_num); 575 return err; 576 } 577 578 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, 579 unsigned int index, const union ib_gid *gid, 580 const struct ib_gid_attr *attr) 581 { 582 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 583 u16 vlan_id = 0xffff; 584 u8 roce_version = 0; 585 u8 roce_l3_type = 0; 586 u8 mac[ETH_ALEN]; 587 int ret; 588 589 if (gid) { 590 gid_type = attr->gid_type; 591 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); 592 if (ret) 593 return ret; 594 } 595 596 switch (gid_type) { 597 case IB_GID_TYPE_IB: 598 roce_version = MLX5_ROCE_VERSION_1; 599 break; 600 case IB_GID_TYPE_ROCE_UDP_ENCAP: 601 roce_version = MLX5_ROCE_VERSION_2; 602 if (ipv6_addr_v4mapped((void *)gid)) 603 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; 604 else 605 roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; 606 break; 607 608 default: 609 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); 610 } 611 612 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, 613 roce_l3_type, gid->raw, mac, 614 vlan_id < VLAN_CFI_MASK, vlan_id, 615 port_num); 616 } 617 618 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, 619 __always_unused void **context) 620 { 621 return set_roce_addr(to_mdev(attr->device), attr->port_num, 622 attr->index, &attr->gid, attr); 623 } 624 625 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, 626 __always_unused void **context) 627 { 628 return set_roce_addr(to_mdev(attr->device), attr->port_num, 629 attr->index, NULL, NULL); 630 } 631 632 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, 633 const struct ib_gid_attr *attr) 634 { 635 if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 636 return 0; 637 638 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 639 } 640 641 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 642 { 643 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 644 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 645 return 0; 646 } 647 648 enum { 649 MLX5_VPORT_ACCESS_METHOD_MAD, 650 MLX5_VPORT_ACCESS_METHOD_HCA, 651 MLX5_VPORT_ACCESS_METHOD_NIC, 652 }; 653 654 static int mlx5_get_vport_access_method(struct ib_device *ibdev) 655 { 656 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 657 return MLX5_VPORT_ACCESS_METHOD_MAD; 658 659 if (mlx5_ib_port_link_layer(ibdev, 1) == 660 IB_LINK_LAYER_ETHERNET) 661 return MLX5_VPORT_ACCESS_METHOD_NIC; 662 663 return MLX5_VPORT_ACCESS_METHOD_HCA; 664 } 665 666 static void get_atomic_caps(struct mlx5_ib_dev *dev, 667 u8 atomic_size_qp, 668 struct ib_device_attr *props) 669 { 670 u8 tmp; 671 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 672 u8 atomic_req_8B_endianness_mode = 673 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); 674 675 /* Check if HW supports 8 bytes standard atomic operations and capable 676 * of host endianness respond 677 */ 678 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 679 if (((atomic_operations & tmp) == tmp) && 680 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 681 (atomic_req_8B_endianness_mode)) { 682 props->atomic_cap = IB_ATOMIC_HCA; 683 } else { 684 props->atomic_cap = IB_ATOMIC_NONE; 685 } 686 } 687 688 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, 689 struct ib_device_attr *props) 690 { 691 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 692 693 get_atomic_caps(dev, atomic_size_qp, props); 694 } 695 696 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev, 697 struct ib_device_attr *props) 698 { 699 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 700 701 get_atomic_caps(dev, atomic_size_qp, props); 702 } 703 704 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev) 705 { 706 struct ib_device_attr props = {}; 707 708 get_atomic_caps_dc(dev, &props); 709 return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false; 710 } 711 static int mlx5_query_system_image_guid(struct ib_device *ibdev, 712 __be64 *sys_image_guid) 713 { 714 struct mlx5_ib_dev *dev = to_mdev(ibdev); 715 struct mlx5_core_dev *mdev = dev->mdev; 716 u64 tmp; 717 int err; 718 719 switch (mlx5_get_vport_access_method(ibdev)) { 720 case MLX5_VPORT_ACCESS_METHOD_MAD: 721 return mlx5_query_mad_ifc_system_image_guid(ibdev, 722 sys_image_guid); 723 724 case MLX5_VPORT_ACCESS_METHOD_HCA: 725 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 726 break; 727 728 case MLX5_VPORT_ACCESS_METHOD_NIC: 729 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 730 break; 731 732 default: 733 return -EINVAL; 734 } 735 736 if (!err) 737 *sys_image_guid = cpu_to_be64(tmp); 738 739 return err; 740 741 } 742 743 static int mlx5_query_max_pkeys(struct ib_device *ibdev, 744 u16 *max_pkeys) 745 { 746 struct mlx5_ib_dev *dev = to_mdev(ibdev); 747 struct mlx5_core_dev *mdev = dev->mdev; 748 749 switch (mlx5_get_vport_access_method(ibdev)) { 750 case MLX5_VPORT_ACCESS_METHOD_MAD: 751 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 752 753 case MLX5_VPORT_ACCESS_METHOD_HCA: 754 case MLX5_VPORT_ACCESS_METHOD_NIC: 755 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 756 pkey_table_size)); 757 return 0; 758 759 default: 760 return -EINVAL; 761 } 762 } 763 764 static int mlx5_query_vendor_id(struct ib_device *ibdev, 765 u32 *vendor_id) 766 { 767 struct mlx5_ib_dev *dev = to_mdev(ibdev); 768 769 switch (mlx5_get_vport_access_method(ibdev)) { 770 case MLX5_VPORT_ACCESS_METHOD_MAD: 771 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 772 773 case MLX5_VPORT_ACCESS_METHOD_HCA: 774 case MLX5_VPORT_ACCESS_METHOD_NIC: 775 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 776 777 default: 778 return -EINVAL; 779 } 780 } 781 782 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 783 __be64 *node_guid) 784 { 785 u64 tmp; 786 int err; 787 788 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 789 case MLX5_VPORT_ACCESS_METHOD_MAD: 790 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 791 792 case MLX5_VPORT_ACCESS_METHOD_HCA: 793 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 794 break; 795 796 case MLX5_VPORT_ACCESS_METHOD_NIC: 797 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 798 break; 799 800 default: 801 return -EINVAL; 802 } 803 804 if (!err) 805 *node_guid = cpu_to_be64(tmp); 806 807 return err; 808 } 809 810 struct mlx5_reg_node_desc { 811 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 812 }; 813 814 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 815 { 816 struct mlx5_reg_node_desc in; 817 818 if (mlx5_use_mad_ifc(dev)) 819 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 820 821 memset(&in, 0, sizeof(in)); 822 823 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 824 sizeof(struct mlx5_reg_node_desc), 825 MLX5_REG_NODE_DESC, 0, 0); 826 } 827 828 static int mlx5_ib_query_device(struct ib_device *ibdev, 829 struct ib_device_attr *props, 830 struct ib_udata *uhw) 831 { 832 struct mlx5_ib_dev *dev = to_mdev(ibdev); 833 struct mlx5_core_dev *mdev = dev->mdev; 834 int err = -ENOMEM; 835 int max_sq_desc; 836 int max_rq_sg; 837 int max_sq_sg; 838 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 839 bool raw_support = !mlx5_core_mp_enabled(mdev); 840 struct mlx5_ib_query_device_resp resp = {}; 841 size_t resp_len; 842 u64 max_tso; 843 844 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 845 if (uhw->outlen && uhw->outlen < resp_len) 846 return -EINVAL; 847 else 848 resp.response_length = resp_len; 849 850 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 851 return -EINVAL; 852 853 memset(props, 0, sizeof(*props)); 854 err = mlx5_query_system_image_guid(ibdev, 855 &props->sys_image_guid); 856 if (err) 857 return err; 858 859 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 860 if (err) 861 return err; 862 863 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 864 if (err) 865 return err; 866 867 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 868 (fw_rev_min(dev->mdev) << 16) | 869 fw_rev_sub(dev->mdev); 870 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 871 IB_DEVICE_PORT_ACTIVE_EVENT | 872 IB_DEVICE_SYS_IMAGE_GUID | 873 IB_DEVICE_RC_RNR_NAK_GEN; 874 875 if (MLX5_CAP_GEN(mdev, pkv)) 876 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 877 if (MLX5_CAP_GEN(mdev, qkv)) 878 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 879 if (MLX5_CAP_GEN(mdev, apm)) 880 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 881 if (MLX5_CAP_GEN(mdev, xrc)) 882 props->device_cap_flags |= IB_DEVICE_XRC; 883 if (MLX5_CAP_GEN(mdev, imaicl)) { 884 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 885 IB_DEVICE_MEM_WINDOW_TYPE_2B; 886 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 887 /* We support 'Gappy' memory registration too */ 888 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 889 } 890 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 891 if (MLX5_CAP_GEN(mdev, sho)) { 892 props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; 893 /* At this stage no support for signature handover */ 894 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 895 IB_PROT_T10DIF_TYPE_2 | 896 IB_PROT_T10DIF_TYPE_3; 897 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 898 IB_GUARD_T10DIF_CSUM; 899 } 900 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 901 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 902 903 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { 904 if (MLX5_CAP_ETH(mdev, csum_cap)) { 905 /* Legacy bit to support old userspace libraries */ 906 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 907 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; 908 } 909 910 if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) 911 props->raw_packet_caps |= 912 IB_RAW_PACKET_CAP_CVLAN_STRIPPING; 913 914 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 915 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 916 if (max_tso) { 917 resp.tso_caps.max_tso = 1 << max_tso; 918 resp.tso_caps.supported_qpts |= 919 1 << IB_QPT_RAW_PACKET; 920 resp.response_length += sizeof(resp.tso_caps); 921 } 922 } 923 924 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 925 resp.rss_caps.rx_hash_function = 926 MLX5_RX_HASH_FUNC_TOEPLITZ; 927 resp.rss_caps.rx_hash_fields_mask = 928 MLX5_RX_HASH_SRC_IPV4 | 929 MLX5_RX_HASH_DST_IPV4 | 930 MLX5_RX_HASH_SRC_IPV6 | 931 MLX5_RX_HASH_DST_IPV6 | 932 MLX5_RX_HASH_SRC_PORT_TCP | 933 MLX5_RX_HASH_DST_PORT_TCP | 934 MLX5_RX_HASH_SRC_PORT_UDP | 935 MLX5_RX_HASH_DST_PORT_UDP | 936 MLX5_RX_HASH_INNER; 937 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 938 MLX5_ACCEL_IPSEC_CAP_DEVICE) 939 resp.rss_caps.rx_hash_fields_mask |= 940 MLX5_RX_HASH_IPSEC_SPI; 941 resp.response_length += sizeof(resp.rss_caps); 942 } 943 } else { 944 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 945 resp.response_length += sizeof(resp.tso_caps); 946 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 947 resp.response_length += sizeof(resp.rss_caps); 948 } 949 950 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 951 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 952 props->device_cap_flags |= IB_DEVICE_UD_TSO; 953 } 954 955 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && 956 MLX5_CAP_GEN(dev->mdev, general_notification_event) && 957 raw_support) 958 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; 959 960 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 961 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) 962 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 963 964 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 965 MLX5_CAP_ETH(dev->mdev, scatter_fcs) && 966 raw_support) { 967 /* Legacy bit to support old userspace libraries */ 968 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 969 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; 970 } 971 972 if (MLX5_CAP_DEV_MEM(mdev, memic)) { 973 props->max_dm_size = 974 MLX5_CAP_DEV_MEM(mdev, max_memic_size); 975 } 976 977 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 978 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 979 980 if (MLX5_CAP_GEN(mdev, end_pad)) 981 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; 982 983 props->vendor_part_id = mdev->pdev->device; 984 props->hw_ver = mdev->pdev->revision; 985 986 props->max_mr_size = ~0ull; 987 props->page_size_cap = ~(min_page_size - 1); 988 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 989 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 990 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 991 sizeof(struct mlx5_wqe_data_seg); 992 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); 993 max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - 994 sizeof(struct mlx5_wqe_raddr_seg)) / 995 sizeof(struct mlx5_wqe_data_seg); 996 props->max_send_sge = max_sq_sg; 997 props->max_recv_sge = max_rq_sg; 998 props->max_sge_rd = MLX5_MAX_SGE_RD; 999 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 1000 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 1001 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 1002 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 1003 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 1004 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 1005 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 1006 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 1007 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 1008 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 1009 props->max_srq_sge = max_rq_sg - 1; 1010 props->max_fast_reg_page_list_len = 1011 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 1012 props->max_pi_fast_reg_page_list_len = 1013 props->max_fast_reg_page_list_len / 2; 1014 get_atomic_caps_qp(dev, props); 1015 props->masked_atomic_cap = IB_ATOMIC_NONE; 1016 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 1017 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 1018 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1019 props->max_mcast_grp; 1020 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 1021 props->max_ah = INT_MAX; 1022 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1024 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1026 if (MLX5_CAP_GEN(mdev, pg)) 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1028 props->odp_caps = dev->odp_caps; 1029 } 1030 1031 if (MLX5_CAP_GEN(mdev, cd)) 1032 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 1033 1034 if (!mlx5_core_is_pf(mdev)) 1035 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 1036 1037 if (mlx5_ib_port_link_layer(ibdev, 1) == 1038 IB_LINK_LAYER_ETHERNET && raw_support) { 1039 props->rss_caps.max_rwq_indirection_tables = 1040 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 1041 props->rss_caps.max_rwq_indirection_table_size = 1042 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 1043 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 1044 props->max_wq_type_rq = 1045 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 1046 } 1047 1048 if (MLX5_CAP_GEN(mdev, tag_matching)) { 1049 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 1050 props->tm_caps.max_num_tags = 1051 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 1052 props->tm_caps.flags = IB_TM_CAP_RC; 1053 props->tm_caps.max_ops = 1054 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1055 props->tm_caps.max_sge = MLX5_TM_MAX_SGE; 1056 } 1057 1058 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { 1059 props->cq_caps.max_cq_moderation_count = 1060 MLX5_MAX_CQ_COUNT; 1061 props->cq_caps.max_cq_moderation_period = 1062 MLX5_MAX_CQ_PERIOD; 1063 } 1064 1065 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 1066 resp.response_length += sizeof(resp.cqe_comp_caps); 1067 1068 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { 1069 resp.cqe_comp_caps.max_num = 1070 MLX5_CAP_GEN(dev->mdev, 1071 cqe_compression_max_num); 1072 1073 resp.cqe_comp_caps.supported_format = 1074 MLX5_IB_CQE_RES_FORMAT_HASH | 1075 MLX5_IB_CQE_RES_FORMAT_CSUM; 1076 1077 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) 1078 resp.cqe_comp_caps.supported_format |= 1079 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; 1080 } 1081 } 1082 1083 if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) && 1084 raw_support) { 1085 if (MLX5_CAP_QOS(mdev, packet_pacing) && 1086 MLX5_CAP_GEN(mdev, qos)) { 1087 resp.packet_pacing_caps.qp_rate_limit_max = 1088 MLX5_CAP_QOS(mdev, packet_pacing_max_rate); 1089 resp.packet_pacing_caps.qp_rate_limit_min = 1090 MLX5_CAP_QOS(mdev, packet_pacing_min_rate); 1091 resp.packet_pacing_caps.supported_qpts |= 1092 1 << IB_QPT_RAW_PACKET; 1093 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && 1094 MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) 1095 resp.packet_pacing_caps.cap_flags |= 1096 MLX5_IB_PP_SUPPORT_BURST; 1097 } 1098 resp.response_length += sizeof(resp.packet_pacing_caps); 1099 } 1100 1101 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, 1102 uhw->outlen)) { 1103 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) 1104 resp.mlx5_ib_support_multi_pkt_send_wqes = 1105 MLX5_IB_ALLOW_MPW; 1106 1107 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) 1108 resp.mlx5_ib_support_multi_pkt_send_wqes |= 1109 MLX5_IB_SUPPORT_EMPW; 1110 1111 resp.response_length += 1112 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); 1113 } 1114 1115 if (field_avail(typeof(resp), flags, uhw->outlen)) { 1116 resp.response_length += sizeof(resp.flags); 1117 1118 if (MLX5_CAP_GEN(mdev, cqe_compression_128)) 1119 resp.flags |= 1120 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; 1121 1122 if (MLX5_CAP_GEN(mdev, cqe_128_always)) 1123 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; 1124 if (MLX5_CAP_GEN(mdev, qp_packet_based)) 1125 resp.flags |= 1126 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; 1127 1128 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; 1129 } 1130 1131 if (field_avail(typeof(resp), sw_parsing_caps, 1132 uhw->outlen)) { 1133 resp.response_length += sizeof(resp.sw_parsing_caps); 1134 if (MLX5_CAP_ETH(mdev, swp)) { 1135 resp.sw_parsing_caps.sw_parsing_offloads |= 1136 MLX5_IB_SW_PARSING; 1137 1138 if (MLX5_CAP_ETH(mdev, swp_csum)) 1139 resp.sw_parsing_caps.sw_parsing_offloads |= 1140 MLX5_IB_SW_PARSING_CSUM; 1141 1142 if (MLX5_CAP_ETH(mdev, swp_lso)) 1143 resp.sw_parsing_caps.sw_parsing_offloads |= 1144 MLX5_IB_SW_PARSING_LSO; 1145 1146 if (resp.sw_parsing_caps.sw_parsing_offloads) 1147 resp.sw_parsing_caps.supported_qpts = 1148 BIT(IB_QPT_RAW_PACKET); 1149 } 1150 } 1151 1152 if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) && 1153 raw_support) { 1154 resp.response_length += sizeof(resp.striding_rq_caps); 1155 if (MLX5_CAP_GEN(mdev, striding_rq)) { 1156 resp.striding_rq_caps.min_single_stride_log_num_of_bytes = 1157 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 1158 resp.striding_rq_caps.max_single_stride_log_num_of_bytes = 1159 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; 1160 resp.striding_rq_caps.min_single_wqe_log_num_of_strides = 1161 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 1162 resp.striding_rq_caps.max_single_wqe_log_num_of_strides = 1163 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; 1164 resp.striding_rq_caps.supported_qpts = 1165 BIT(IB_QPT_RAW_PACKET); 1166 } 1167 } 1168 1169 if (field_avail(typeof(resp), tunnel_offloads_caps, 1170 uhw->outlen)) { 1171 resp.response_length += sizeof(resp.tunnel_offloads_caps); 1172 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) 1173 resp.tunnel_offloads_caps |= 1174 MLX5_IB_TUNNELED_OFFLOADS_VXLAN; 1175 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) 1176 resp.tunnel_offloads_caps |= 1177 MLX5_IB_TUNNELED_OFFLOADS_GENEVE; 1178 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) 1179 resp.tunnel_offloads_caps |= 1180 MLX5_IB_TUNNELED_OFFLOADS_GRE; 1181 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1182 MLX5_FLEX_PROTO_CW_MPLS_GRE) 1183 resp.tunnel_offloads_caps |= 1184 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; 1185 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) & 1186 MLX5_FLEX_PROTO_CW_MPLS_UDP) 1187 resp.tunnel_offloads_caps |= 1188 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; 1189 } 1190 1191 if (uhw->outlen) { 1192 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 1193 1194 if (err) 1195 return err; 1196 } 1197 1198 return 0; 1199 } 1200 1201 enum mlx5_ib_width { 1202 MLX5_IB_WIDTH_1X = 1 << 0, 1203 MLX5_IB_WIDTH_2X = 1 << 1, 1204 MLX5_IB_WIDTH_4X = 1 << 2, 1205 MLX5_IB_WIDTH_8X = 1 << 3, 1206 MLX5_IB_WIDTH_12X = 1 << 4 1207 }; 1208 1209 static void translate_active_width(struct ib_device *ibdev, u8 active_width, 1210 u8 *ib_width) 1211 { 1212 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1213 1214 if (active_width & MLX5_IB_WIDTH_1X) 1215 *ib_width = IB_WIDTH_1X; 1216 else if (active_width & MLX5_IB_WIDTH_2X) 1217 *ib_width = IB_WIDTH_2X; 1218 else if (active_width & MLX5_IB_WIDTH_4X) 1219 *ib_width = IB_WIDTH_4X; 1220 else if (active_width & MLX5_IB_WIDTH_8X) 1221 *ib_width = IB_WIDTH_8X; 1222 else if (active_width & MLX5_IB_WIDTH_12X) 1223 *ib_width = IB_WIDTH_12X; 1224 else { 1225 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", 1226 (int)active_width); 1227 *ib_width = IB_WIDTH_4X; 1228 } 1229 1230 return; 1231 } 1232 1233 static int mlx5_mtu_to_ib_mtu(int mtu) 1234 { 1235 switch (mtu) { 1236 case 256: return 1; 1237 case 512: return 2; 1238 case 1024: return 3; 1239 case 2048: return 4; 1240 case 4096: return 5; 1241 default: 1242 pr_warn("invalid mtu\n"); 1243 return -1; 1244 } 1245 } 1246 1247 enum ib_max_vl_num { 1248 __IB_MAX_VL_0 = 1, 1249 __IB_MAX_VL_0_1 = 2, 1250 __IB_MAX_VL_0_3 = 3, 1251 __IB_MAX_VL_0_7 = 4, 1252 __IB_MAX_VL_0_14 = 5, 1253 }; 1254 1255 enum mlx5_vl_hw_cap { 1256 MLX5_VL_HW_0 = 1, 1257 MLX5_VL_HW_0_1 = 2, 1258 MLX5_VL_HW_0_2 = 3, 1259 MLX5_VL_HW_0_3 = 4, 1260 MLX5_VL_HW_0_4 = 5, 1261 MLX5_VL_HW_0_5 = 6, 1262 MLX5_VL_HW_0_6 = 7, 1263 MLX5_VL_HW_0_7 = 8, 1264 MLX5_VL_HW_0_14 = 15 1265 }; 1266 1267 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 1268 u8 *max_vl_num) 1269 { 1270 switch (vl_hw_cap) { 1271 case MLX5_VL_HW_0: 1272 *max_vl_num = __IB_MAX_VL_0; 1273 break; 1274 case MLX5_VL_HW_0_1: 1275 *max_vl_num = __IB_MAX_VL_0_1; 1276 break; 1277 case MLX5_VL_HW_0_3: 1278 *max_vl_num = __IB_MAX_VL_0_3; 1279 break; 1280 case MLX5_VL_HW_0_7: 1281 *max_vl_num = __IB_MAX_VL_0_7; 1282 break; 1283 case MLX5_VL_HW_0_14: 1284 *max_vl_num = __IB_MAX_VL_0_14; 1285 break; 1286 1287 default: 1288 return -EINVAL; 1289 } 1290 1291 return 0; 1292 } 1293 1294 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 1295 struct ib_port_attr *props) 1296 { 1297 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1298 struct mlx5_core_dev *mdev = dev->mdev; 1299 struct mlx5_hca_vport_context *rep; 1300 u16 max_mtu; 1301 u16 oper_mtu; 1302 int err; 1303 u8 ib_link_width_oper; 1304 u8 vl_hw_cap; 1305 1306 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1307 if (!rep) { 1308 err = -ENOMEM; 1309 goto out; 1310 } 1311 1312 /* props being zeroed by the caller, avoid zeroing it here */ 1313 1314 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); 1315 if (err) 1316 goto out; 1317 1318 props->lid = rep->lid; 1319 props->lmc = rep->lmc; 1320 props->sm_lid = rep->sm_lid; 1321 props->sm_sl = rep->sm_sl; 1322 props->state = rep->vport_state; 1323 props->phys_state = rep->port_physical_state; 1324 props->port_cap_flags = rep->cap_mask1; 1325 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 1326 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 1327 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 1328 props->bad_pkey_cntr = rep->pkey_violation_counter; 1329 props->qkey_viol_cntr = rep->qkey_violation_counter; 1330 props->subnet_timeout = rep->subnet_timeout; 1331 props->init_type_reply = rep->init_type_reply; 1332 1333 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) 1334 props->port_cap_flags2 = rep->cap_mask2; 1335 1336 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); 1337 if (err) 1338 goto out; 1339 1340 translate_active_width(ibdev, ib_link_width_oper, &props->active_width); 1341 1342 err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); 1343 if (err) 1344 goto out; 1345 1346 mlx5_query_port_max_mtu(mdev, &max_mtu, port); 1347 1348 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); 1349 1350 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); 1351 1352 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); 1353 1354 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); 1355 if (err) 1356 goto out; 1357 1358 err = translate_max_vl_num(ibdev, vl_hw_cap, 1359 &props->max_vl_num); 1360 out: 1361 kfree(rep); 1362 return err; 1363 } 1364 1365 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 1366 struct ib_port_attr *props) 1367 { 1368 unsigned int count; 1369 int ret; 1370 1371 switch (mlx5_get_vport_access_method(ibdev)) { 1372 case MLX5_VPORT_ACCESS_METHOD_MAD: 1373 ret = mlx5_query_mad_ifc_port(ibdev, port, props); 1374 break; 1375 1376 case MLX5_VPORT_ACCESS_METHOD_HCA: 1377 ret = mlx5_query_hca_port(ibdev, port, props); 1378 break; 1379 1380 case MLX5_VPORT_ACCESS_METHOD_NIC: 1381 ret = mlx5_query_port_roce(ibdev, port, props); 1382 break; 1383 1384 default: 1385 ret = -EINVAL; 1386 } 1387 1388 if (!ret && props) { 1389 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1390 struct mlx5_core_dev *mdev; 1391 bool put_mdev = true; 1392 1393 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); 1394 if (!mdev) { 1395 /* If the port isn't affiliated yet query the master. 1396 * The master and slave will have the same values. 1397 */ 1398 mdev = dev->mdev; 1399 port = 1; 1400 put_mdev = false; 1401 } 1402 count = mlx5_core_reserved_gids_count(mdev); 1403 if (put_mdev) 1404 mlx5_ib_put_native_port_mdev(dev, port); 1405 props->gid_tbl_len -= count; 1406 } 1407 return ret; 1408 } 1409 1410 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port, 1411 struct ib_port_attr *props) 1412 { 1413 int ret; 1414 1415 /* Only link layer == ethernet is valid for representors 1416 * and we always use port 1 1417 */ 1418 ret = mlx5_query_port_roce(ibdev, port, props); 1419 if (ret || !props) 1420 return ret; 1421 1422 /* We don't support GIDS */ 1423 props->gid_tbl_len = 0; 1424 1425 return ret; 1426 } 1427 1428 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 1429 union ib_gid *gid) 1430 { 1431 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1432 struct mlx5_core_dev *mdev = dev->mdev; 1433 1434 switch (mlx5_get_vport_access_method(ibdev)) { 1435 case MLX5_VPORT_ACCESS_METHOD_MAD: 1436 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 1437 1438 case MLX5_VPORT_ACCESS_METHOD_HCA: 1439 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); 1440 1441 default: 1442 return -EINVAL; 1443 } 1444 1445 } 1446 1447 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port, 1448 u16 index, u16 *pkey) 1449 { 1450 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1451 struct mlx5_core_dev *mdev; 1452 bool put_mdev = true; 1453 u8 mdev_port_num; 1454 int err; 1455 1456 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); 1457 if (!mdev) { 1458 /* The port isn't affiliated yet, get the PKey from the master 1459 * port. For RoCE the PKey tables will be the same. 1460 */ 1461 put_mdev = false; 1462 mdev = dev->mdev; 1463 mdev_port_num = 1; 1464 } 1465 1466 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, 1467 index, pkey); 1468 if (put_mdev) 1469 mlx5_ib_put_native_port_mdev(dev, port); 1470 1471 return err; 1472 } 1473 1474 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1475 u16 *pkey) 1476 { 1477 switch (mlx5_get_vport_access_method(ibdev)) { 1478 case MLX5_VPORT_ACCESS_METHOD_MAD: 1479 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 1480 1481 case MLX5_VPORT_ACCESS_METHOD_HCA: 1482 case MLX5_VPORT_ACCESS_METHOD_NIC: 1483 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); 1484 default: 1485 return -EINVAL; 1486 } 1487 } 1488 1489 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 1490 struct ib_device_modify *props) 1491 { 1492 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1493 struct mlx5_reg_node_desc in; 1494 struct mlx5_reg_node_desc out; 1495 int err; 1496 1497 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 1498 return -EOPNOTSUPP; 1499 1500 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 1501 return 0; 1502 1503 /* 1504 * If possible, pass node desc to FW, so it can generate 1505 * a 144 trap. If cmd fails, just ignore. 1506 */ 1507 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1508 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1509 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1510 if (err) 1511 return err; 1512 1513 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1514 1515 return err; 1516 } 1517 1518 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask, 1519 u32 value) 1520 { 1521 struct mlx5_hca_vport_context ctx = {}; 1522 struct mlx5_core_dev *mdev; 1523 u8 mdev_port_num; 1524 int err; 1525 1526 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); 1527 if (!mdev) 1528 return -ENODEV; 1529 1530 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); 1531 if (err) 1532 goto out; 1533 1534 if (~ctx.cap_mask1_perm & mask) { 1535 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", 1536 mask, ctx.cap_mask1_perm); 1537 err = -EINVAL; 1538 goto out; 1539 } 1540 1541 ctx.cap_mask1 = value; 1542 ctx.cap_mask1_perm = mask; 1543 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, 1544 0, &ctx); 1545 1546 out: 1547 mlx5_ib_put_native_port_mdev(dev, port_num); 1548 1549 return err; 1550 } 1551 1552 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1553 struct ib_port_modify *props) 1554 { 1555 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1556 struct ib_port_attr attr; 1557 u32 tmp; 1558 int err; 1559 u32 change_mask; 1560 u32 value; 1561 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1562 IB_LINK_LAYER_INFINIBAND); 1563 1564 /* CM layer calls ib_modify_port() regardless of the link layer. For 1565 * Ethernet ports, qkey violation and Port capabilities are meaningless. 1566 */ 1567 if (!is_ib) 1568 return 0; 1569 1570 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1571 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1572 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1573 return set_port_caps_atomic(dev, port, change_mask, value); 1574 } 1575 1576 mutex_lock(&dev->cap_mask_mutex); 1577 1578 err = ib_query_port(ibdev, port, &attr); 1579 if (err) 1580 goto out; 1581 1582 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1583 ~props->clr_port_cap_mask; 1584 1585 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1586 1587 out: 1588 mutex_unlock(&dev->cap_mask_mutex); 1589 return err; 1590 } 1591 1592 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) 1593 { 1594 mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", 1595 caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); 1596 } 1597 1598 static u16 calc_dynamic_bfregs(int uars_per_sys_page) 1599 { 1600 /* Large page with non 4k uar support might limit the dynamic size */ 1601 if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) 1602 return MLX5_MIN_DYN_BFREGS; 1603 1604 return MLX5_MAX_DYN_BFREGS; 1605 } 1606 1607 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, 1608 struct mlx5_ib_alloc_ucontext_req_v2 *req, 1609 struct mlx5_bfreg_info *bfregi) 1610 { 1611 int uars_per_sys_page; 1612 int bfregs_per_sys_page; 1613 int ref_bfregs = req->total_num_bfregs; 1614 1615 if (req->total_num_bfregs == 0) 1616 return -EINVAL; 1617 1618 BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); 1619 BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); 1620 1621 if (req->total_num_bfregs > MLX5_MAX_BFREGS) 1622 return -ENOMEM; 1623 1624 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); 1625 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; 1626 /* This holds the required static allocation asked by the user */ 1627 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); 1628 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) 1629 return -EINVAL; 1630 1631 bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; 1632 bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); 1633 bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; 1634 bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; 1635 1636 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", 1637 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", 1638 lib_uar_4k ? "yes" : "no", ref_bfregs, 1639 req->total_num_bfregs, bfregi->total_num_bfregs, 1640 bfregi->num_sys_pages); 1641 1642 return 0; 1643 } 1644 1645 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) 1646 { 1647 struct mlx5_bfreg_info *bfregi; 1648 int err; 1649 int i; 1650 1651 bfregi = &context->bfregi; 1652 for (i = 0; i < bfregi->num_static_sys_pages; i++) { 1653 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); 1654 if (err) 1655 goto error; 1656 1657 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); 1658 } 1659 1660 for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) 1661 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; 1662 1663 return 0; 1664 1665 error: 1666 for (--i; i >= 0; i--) 1667 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) 1668 mlx5_ib_warn(dev, "failed to free uar %d\n", i); 1669 1670 return err; 1671 } 1672 1673 static void deallocate_uars(struct mlx5_ib_dev *dev, 1674 struct mlx5_ib_ucontext *context) 1675 { 1676 struct mlx5_bfreg_info *bfregi; 1677 int i; 1678 1679 bfregi = &context->bfregi; 1680 for (i = 0; i < bfregi->num_sys_pages; i++) 1681 if (i < bfregi->num_static_sys_pages || 1682 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) 1683 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); 1684 } 1685 1686 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1687 { 1688 int err = 0; 1689 1690 mutex_lock(&dev->lb.mutex); 1691 if (td) 1692 dev->lb.user_td++; 1693 if (qp) 1694 dev->lb.qps++; 1695 1696 if (dev->lb.user_td == 2 || 1697 dev->lb.qps == 1) { 1698 if (!dev->lb.enabled) { 1699 err = mlx5_nic_vport_update_local_lb(dev->mdev, true); 1700 dev->lb.enabled = true; 1701 } 1702 } 1703 1704 mutex_unlock(&dev->lb.mutex); 1705 1706 return err; 1707 } 1708 1709 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) 1710 { 1711 mutex_lock(&dev->lb.mutex); 1712 if (td) 1713 dev->lb.user_td--; 1714 if (qp) 1715 dev->lb.qps--; 1716 1717 if (dev->lb.user_td == 1 && 1718 dev->lb.qps == 0) { 1719 if (dev->lb.enabled) { 1720 mlx5_nic_vport_update_local_lb(dev->mdev, false); 1721 dev->lb.enabled = false; 1722 } 1723 } 1724 1725 mutex_unlock(&dev->lb.mutex); 1726 } 1727 1728 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, 1729 u16 uid) 1730 { 1731 int err; 1732 1733 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1734 return 0; 1735 1736 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid); 1737 if (err) 1738 return err; 1739 1740 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1741 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1742 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1743 return err; 1744 1745 return mlx5_ib_enable_lb(dev, true, false); 1746 } 1747 1748 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, 1749 u16 uid) 1750 { 1751 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1752 return; 1753 1754 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid); 1755 1756 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || 1757 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && 1758 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 1759 return; 1760 1761 mlx5_ib_disable_lb(dev, true, false); 1762 } 1763 1764 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, 1765 struct ib_udata *udata) 1766 { 1767 struct ib_device *ibdev = uctx->device; 1768 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1769 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1770 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1771 struct mlx5_core_dev *mdev = dev->mdev; 1772 struct mlx5_ib_ucontext *context = to_mucontext(uctx); 1773 struct mlx5_bfreg_info *bfregi; 1774 int ver; 1775 int err; 1776 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1777 max_cqe_version); 1778 u32 dump_fill_mkey; 1779 bool lib_uar_4k; 1780 1781 if (!dev->ib_active) 1782 return -EAGAIN; 1783 1784 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1785 ver = 0; 1786 else if (udata->inlen >= min_req_v2) 1787 ver = 2; 1788 else 1789 return -EINVAL; 1790 1791 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); 1792 if (err) 1793 return err; 1794 1795 if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) 1796 return -EOPNOTSUPP; 1797 1798 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1799 return -EOPNOTSUPP; 1800 1801 req.total_num_bfregs = ALIGN(req.total_num_bfregs, 1802 MLX5_NON_FP_BFREGS_PER_UAR); 1803 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) 1804 return -EINVAL; 1805 1806 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1807 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1808 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1809 resp.cache_line_size = cache_line_size(); 1810 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1811 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1812 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1813 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1814 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1815 resp.cqe_version = min_t(__u8, 1816 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1817 req.max_cqe_version); 1818 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1819 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; 1820 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? 1821 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; 1822 resp.response_length = min(offsetof(typeof(resp), response_length) + 1823 sizeof(resp.response_length), udata->outlen); 1824 1825 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) { 1826 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) 1827 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; 1828 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) 1829 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; 1830 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) 1831 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; 1832 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) 1833 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; 1834 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ 1835 } 1836 1837 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; 1838 bfregi = &context->bfregi; 1839 1840 /* updates req->total_num_bfregs */ 1841 err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); 1842 if (err) 1843 goto out_ctx; 1844 1845 mutex_init(&bfregi->lock); 1846 bfregi->lib_uar_4k = lib_uar_4k; 1847 bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), 1848 GFP_KERNEL); 1849 if (!bfregi->count) { 1850 err = -ENOMEM; 1851 goto out_ctx; 1852 } 1853 1854 bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, 1855 sizeof(*bfregi->sys_pages), 1856 GFP_KERNEL); 1857 if (!bfregi->sys_pages) { 1858 err = -ENOMEM; 1859 goto out_count; 1860 } 1861 1862 err = allocate_uars(dev, context); 1863 if (err) 1864 goto out_sys_pages; 1865 1866 if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING) 1867 context->ibucontext.invalidate_range = 1868 &mlx5_ib_invalidate_range; 1869 1870 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { 1871 err = mlx5_ib_devx_create(dev, true); 1872 if (err < 0) 1873 goto out_uars; 1874 context->devx_uid = err; 1875 } 1876 1877 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, 1878 context->devx_uid); 1879 if (err) 1880 goto out_devx; 1881 1882 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1883 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey); 1884 if (err) 1885 goto out_mdev; 1886 } 1887 1888 INIT_LIST_HEAD(&context->db_page_list); 1889 mutex_init(&context->db_page_mutex); 1890 1891 resp.tot_bfregs = req.total_num_bfregs; 1892 resp.num_ports = dev->num_ports; 1893 1894 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1895 resp.response_length += sizeof(resp.cqe_version); 1896 1897 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1898 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1899 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1900 resp.response_length += sizeof(resp.cmds_supp_uhw); 1901 } 1902 1903 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) { 1904 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { 1905 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline); 1906 resp.eth_min_inline++; 1907 } 1908 resp.response_length += sizeof(resp.eth_min_inline); 1909 } 1910 1911 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) { 1912 if (mdev->clock_info) 1913 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); 1914 resp.response_length += sizeof(resp.clock_info_versions); 1915 } 1916 1917 /* 1918 * We don't want to expose information from the PCI bar that is located 1919 * after 4096 bytes, so if the arch only supports larger pages, let's 1920 * pretend we don't support reading the HCA's core clock. This is also 1921 * forced by mmap function. 1922 */ 1923 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1924 if (PAGE_SIZE <= 4096) { 1925 resp.comp_mask |= 1926 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1927 resp.hca_core_clock_offset = 1928 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; 1929 } 1930 resp.response_length += sizeof(resp.hca_core_clock_offset); 1931 } 1932 1933 if (field_avail(typeof(resp), log_uar_size, udata->outlen)) 1934 resp.response_length += sizeof(resp.log_uar_size); 1935 1936 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen)) 1937 resp.response_length += sizeof(resp.num_uars_per_page); 1938 1939 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) { 1940 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs; 1941 resp.response_length += sizeof(resp.num_dyn_bfregs); 1942 } 1943 1944 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) { 1945 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { 1946 resp.dump_fill_mkey = dump_fill_mkey; 1947 resp.comp_mask |= 1948 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; 1949 } 1950 resp.response_length += sizeof(resp.dump_fill_mkey); 1951 } 1952 1953 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1954 if (err) 1955 goto out_mdev; 1956 1957 bfregi->ver = ver; 1958 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; 1959 context->cqe_version = resp.cqe_version; 1960 context->lib_caps = req.lib_caps; 1961 print_lib_caps(dev, context->lib_caps); 1962 1963 if (dev->lag_active) { 1964 u8 port = mlx5_core_native_port_num(dev->mdev) - 1; 1965 1966 atomic_set(&context->tx_port_affinity, 1967 atomic_add_return( 1968 1, &dev->port[port].roce.tx_port_affinity)); 1969 } 1970 1971 return 0; 1972 1973 out_mdev: 1974 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 1975 out_devx: 1976 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) 1977 mlx5_ib_devx_destroy(dev, context->devx_uid); 1978 1979 out_uars: 1980 deallocate_uars(dev, context); 1981 1982 out_sys_pages: 1983 kfree(bfregi->sys_pages); 1984 1985 out_count: 1986 kfree(bfregi->count); 1987 1988 out_ctx: 1989 return err; 1990 } 1991 1992 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1993 { 1994 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1995 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1996 struct mlx5_bfreg_info *bfregi; 1997 1998 /* All umem's must be destroyed before destroying the ucontext. */ 1999 mutex_lock(&ibcontext->per_mm_list_lock); 2000 WARN_ON(!list_empty(&ibcontext->per_mm_list)); 2001 mutex_unlock(&ibcontext->per_mm_list_lock); 2002 2003 bfregi = &context->bfregi; 2004 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); 2005 2006 if (context->devx_uid) 2007 mlx5_ib_devx_destroy(dev, context->devx_uid); 2008 2009 deallocate_uars(dev, context); 2010 kfree(bfregi->sys_pages); 2011 kfree(bfregi->count); 2012 } 2013 2014 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, 2015 int uar_idx) 2016 { 2017 int fw_uars_per_page; 2018 2019 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; 2020 2021 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; 2022 } 2023 2024 static int get_command(unsigned long offset) 2025 { 2026 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 2027 } 2028 2029 static int get_arg(unsigned long offset) 2030 { 2031 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 2032 } 2033 2034 static int get_index(unsigned long offset) 2035 { 2036 return get_arg(offset); 2037 } 2038 2039 /* Index resides in an extra byte to enable larger values than 255 */ 2040 static int get_extended_index(unsigned long offset) 2041 { 2042 return get_arg(offset) | ((offset >> 16) & 0xff) << 8; 2043 } 2044 2045 2046 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 2047 { 2048 } 2049 2050 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 2051 { 2052 switch (cmd) { 2053 case MLX5_IB_MMAP_WC_PAGE: 2054 return "WC"; 2055 case MLX5_IB_MMAP_REGULAR_PAGE: 2056 return "best effort WC"; 2057 case MLX5_IB_MMAP_NC_PAGE: 2058 return "NC"; 2059 case MLX5_IB_MMAP_DEVICE_MEM: 2060 return "Device Memory"; 2061 default: 2062 return NULL; 2063 } 2064 } 2065 2066 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, 2067 struct vm_area_struct *vma, 2068 struct mlx5_ib_ucontext *context) 2069 { 2070 if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || 2071 !(vma->vm_flags & VM_SHARED)) 2072 return -EINVAL; 2073 2074 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) 2075 return -EOPNOTSUPP; 2076 2077 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 2078 return -EPERM; 2079 vma->vm_flags &= ~VM_MAYWRITE; 2080 2081 if (!dev->mdev->clock_info) 2082 return -EOPNOTSUPP; 2083 2084 return vm_insert_page(vma, vma->vm_start, 2085 virt_to_page(dev->mdev->clock_info)); 2086 } 2087 2088 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 2089 struct vm_area_struct *vma, 2090 struct mlx5_ib_ucontext *context) 2091 { 2092 struct mlx5_bfreg_info *bfregi = &context->bfregi; 2093 int err; 2094 unsigned long idx; 2095 phys_addr_t pfn; 2096 pgprot_t prot; 2097 u32 bfreg_dyn_idx = 0; 2098 u32 uar_index; 2099 int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); 2100 int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : 2101 bfregi->num_static_sys_pages; 2102 2103 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2104 return -EINVAL; 2105 2106 if (dyn_uar) 2107 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; 2108 else 2109 idx = get_index(vma->vm_pgoff); 2110 2111 if (idx >= max_valid_idx) { 2112 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", 2113 idx, max_valid_idx); 2114 return -EINVAL; 2115 } 2116 2117 switch (cmd) { 2118 case MLX5_IB_MMAP_WC_PAGE: 2119 case MLX5_IB_MMAP_ALLOC_WC: 2120 /* Some architectures don't support WC memory */ 2121 #if defined(CONFIG_X86) 2122 if (!pat_enabled()) 2123 return -EPERM; 2124 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 2125 return -EPERM; 2126 #endif 2127 /* fall through */ 2128 case MLX5_IB_MMAP_REGULAR_PAGE: 2129 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 2130 prot = pgprot_writecombine(vma->vm_page_prot); 2131 break; 2132 case MLX5_IB_MMAP_NC_PAGE: 2133 prot = pgprot_noncached(vma->vm_page_prot); 2134 break; 2135 default: 2136 return -EINVAL; 2137 } 2138 2139 if (dyn_uar) { 2140 int uars_per_page; 2141 2142 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); 2143 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); 2144 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { 2145 mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", 2146 bfreg_dyn_idx, bfregi->total_num_bfregs); 2147 return -EINVAL; 2148 } 2149 2150 mutex_lock(&bfregi->lock); 2151 /* Fail if uar already allocated, first bfreg index of each 2152 * page holds its count. 2153 */ 2154 if (bfregi->count[bfreg_dyn_idx]) { 2155 mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); 2156 mutex_unlock(&bfregi->lock); 2157 return -EINVAL; 2158 } 2159 2160 bfregi->count[bfreg_dyn_idx]++; 2161 mutex_unlock(&bfregi->lock); 2162 2163 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); 2164 if (err) { 2165 mlx5_ib_warn(dev, "UAR alloc failed\n"); 2166 goto free_bfreg; 2167 } 2168 } else { 2169 uar_index = bfregi->sys_pages[idx]; 2170 } 2171 2172 pfn = uar_index2pfn(dev, uar_index); 2173 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 2174 2175 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, 2176 prot); 2177 if (err) { 2178 mlx5_ib_err(dev, 2179 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", 2180 err, mmap_cmd2str(cmd)); 2181 goto err; 2182 } 2183 2184 if (dyn_uar) 2185 bfregi->sys_pages[idx] = uar_index; 2186 return 0; 2187 2188 err: 2189 if (!dyn_uar) 2190 return err; 2191 2192 mlx5_cmd_free_uar(dev->mdev, idx); 2193 2194 free_bfreg: 2195 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); 2196 2197 return err; 2198 } 2199 2200 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 2201 { 2202 struct mlx5_ib_ucontext *mctx = to_mucontext(context); 2203 struct mlx5_ib_dev *dev = to_mdev(context->device); 2204 u16 page_idx = get_extended_index(vma->vm_pgoff); 2205 size_t map_size = vma->vm_end - vma->vm_start; 2206 u32 npages = map_size >> PAGE_SHIFT; 2207 phys_addr_t pfn; 2208 2209 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != 2210 page_idx + npages) 2211 return -EINVAL; 2212 2213 pfn = ((dev->mdev->bar_addr + 2214 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> 2215 PAGE_SHIFT) + 2216 page_idx; 2217 return rdma_user_mmap_io(context, vma, pfn, map_size, 2218 pgprot_writecombine(vma->vm_page_prot)); 2219 } 2220 2221 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 2222 { 2223 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 2224 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 2225 unsigned long command; 2226 phys_addr_t pfn; 2227 2228 command = get_command(vma->vm_pgoff); 2229 switch (command) { 2230 case MLX5_IB_MMAP_WC_PAGE: 2231 case MLX5_IB_MMAP_NC_PAGE: 2232 case MLX5_IB_MMAP_REGULAR_PAGE: 2233 case MLX5_IB_MMAP_ALLOC_WC: 2234 return uar_mmap(dev, command, vma, context); 2235 2236 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 2237 return -ENOSYS; 2238 2239 case MLX5_IB_MMAP_CORE_CLOCK: 2240 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 2241 return -EINVAL; 2242 2243 if (vma->vm_flags & VM_WRITE) 2244 return -EPERM; 2245 vma->vm_flags &= ~VM_MAYWRITE; 2246 2247 /* Don't expose to user-space information it shouldn't have */ 2248 if (PAGE_SIZE > 4096) 2249 return -EOPNOTSUPP; 2250 2251 pfn = (dev->mdev->iseg_base + 2252 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 2253 PAGE_SHIFT; 2254 return rdma_user_mmap_io(&context->ibucontext, vma, pfn, 2255 PAGE_SIZE, 2256 pgprot_noncached(vma->vm_page_prot)); 2257 case MLX5_IB_MMAP_CLOCK_INFO: 2258 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2259 2260 case MLX5_IB_MMAP_DEVICE_MEM: 2261 return dm_mmap(ibcontext, vma); 2262 2263 default: 2264 return -EINVAL; 2265 } 2266 2267 return 0; 2268 } 2269 2270 static inline int check_dm_type_support(struct mlx5_ib_dev *dev, 2271 u32 type) 2272 { 2273 switch (type) { 2274 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2275 if (!MLX5_CAP_DEV_MEM(dev->mdev, memic)) 2276 return -EOPNOTSUPP; 2277 break; 2278 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2279 if (!capable(CAP_SYS_RAWIO) || 2280 !capable(CAP_NET_RAW)) 2281 return -EPERM; 2282 2283 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || 2284 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner))) 2285 return -EOPNOTSUPP; 2286 break; 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int handle_alloc_dm_memic(struct ib_ucontext *ctx, 2293 struct mlx5_ib_dm *dm, 2294 struct ib_dm_alloc_attr *attr, 2295 struct uverbs_attr_bundle *attrs) 2296 { 2297 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2298 u64 start_offset; 2299 u32 page_idx; 2300 int err; 2301 2302 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2303 2304 err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr, 2305 dm->size, attr->alignment); 2306 if (err) 2307 return err; 2308 2309 page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) - 2310 MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >> 2311 PAGE_SHIFT; 2312 2313 err = uverbs_copy_to(attrs, 2314 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2315 &page_idx, sizeof(page_idx)); 2316 if (err) 2317 goto err_dealloc; 2318 2319 start_offset = dm->dev_addr & ~PAGE_MASK; 2320 err = uverbs_copy_to(attrs, 2321 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2322 &start_offset, sizeof(start_offset)); 2323 if (err) 2324 goto err_dealloc; 2325 2326 bitmap_set(to_mucontext(ctx)->dm_pages, page_idx, 2327 DIV_ROUND_UP(dm->size, PAGE_SIZE)); 2328 2329 return 0; 2330 2331 err_dealloc: 2332 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2333 2334 return err; 2335 } 2336 2337 static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, 2338 struct mlx5_ib_dm *dm, 2339 struct ib_dm_alloc_attr *attr, 2340 struct uverbs_attr_bundle *attrs, 2341 int type) 2342 { 2343 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2344 u64 act_size; 2345 int err; 2346 2347 /* Allocation size must a multiple of the basic block size 2348 * and a power of 2. 2349 */ 2350 act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev)); 2351 act_size = roundup_pow_of_two(act_size); 2352 2353 dm->size = act_size; 2354 err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size, 2355 to_mucontext(ctx)->devx_uid, &dm->dev_addr, 2356 &dm->icm_dm.obj_id); 2357 if (err) 2358 return err; 2359 2360 err = uverbs_copy_to(attrs, 2361 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2362 &dm->dev_addr, sizeof(dm->dev_addr)); 2363 if (err) 2364 mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size, 2365 to_mucontext(ctx)->devx_uid, 2366 dm->dev_addr, dm->icm_dm.obj_id); 2367 2368 return err; 2369 } 2370 2371 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, 2372 struct ib_ucontext *context, 2373 struct ib_dm_alloc_attr *attr, 2374 struct uverbs_attr_bundle *attrs) 2375 { 2376 struct mlx5_ib_dm *dm; 2377 enum mlx5_ib_uapi_dm_type type; 2378 int err; 2379 2380 err = uverbs_get_const_default(&type, attrs, 2381 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 2382 MLX5_IB_UAPI_DM_TYPE_MEMIC); 2383 if (err) 2384 return ERR_PTR(err); 2385 2386 mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n", 2387 type, attr->length, attr->alignment); 2388 2389 err = check_dm_type_support(to_mdev(ibdev), type); 2390 if (err) 2391 return ERR_PTR(err); 2392 2393 dm = kzalloc(sizeof(*dm), GFP_KERNEL); 2394 if (!dm) 2395 return ERR_PTR(-ENOMEM); 2396 2397 dm->type = type; 2398 2399 switch (type) { 2400 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2401 err = handle_alloc_dm_memic(context, dm, 2402 attr, 2403 attrs); 2404 break; 2405 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2406 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2407 err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type); 2408 break; 2409 default: 2410 err = -EOPNOTSUPP; 2411 } 2412 2413 if (err) 2414 goto err_free; 2415 2416 return &dm->ibdm; 2417 2418 err_free: 2419 kfree(dm); 2420 return ERR_PTR(err); 2421 } 2422 2423 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) 2424 { 2425 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2426 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2427 struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm; 2428 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2429 u32 page_idx; 2430 int ret; 2431 2432 switch (dm->type) { 2433 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2434 ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2435 if (ret) 2436 return ret; 2437 2438 page_idx = (dm->dev_addr - 2439 pci_resource_start(dm_db->dev->pdev, 0) - 2440 MLX5_CAP64_DEV_MEM(dm_db->dev, 2441 memic_bar_start_addr)) >> 2442 PAGE_SHIFT; 2443 bitmap_clear(ctx->dm_pages, page_idx, 2444 DIV_ROUND_UP(dm->size, PAGE_SIZE)); 2445 break; 2446 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2447 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: 2448 ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size, 2449 ctx->devx_uid, dm->dev_addr, 2450 dm->icm_dm.obj_id); 2451 if (ret) 2452 return ret; 2453 break; 2454 default: 2455 return -EOPNOTSUPP; 2456 } 2457 2458 kfree(dm); 2459 2460 return 0; 2461 } 2462 2463 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 2464 { 2465 struct mlx5_ib_pd *pd = to_mpd(ibpd); 2466 struct ib_device *ibdev = ibpd->device; 2467 struct mlx5_ib_alloc_pd_resp resp; 2468 int err; 2469 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; 2470 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; 2471 u16 uid = 0; 2472 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( 2473 udata, struct mlx5_ib_ucontext, ibucontext); 2474 2475 uid = context ? context->devx_uid : 0; 2476 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); 2477 MLX5_SET(alloc_pd_in, in, uid, uid); 2478 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in), 2479 out, sizeof(out)); 2480 if (err) 2481 return err; 2482 2483 pd->pdn = MLX5_GET(alloc_pd_out, out, pd); 2484 pd->uid = uid; 2485 if (udata) { 2486 resp.pdn = pd->pdn; 2487 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 2488 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); 2489 return -EFAULT; 2490 } 2491 } 2492 2493 return 0; 2494 } 2495 2496 static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 2497 { 2498 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 2499 struct mlx5_ib_pd *mpd = to_mpd(pd); 2500 2501 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); 2502 } 2503 2504 enum { 2505 MATCH_CRITERIA_ENABLE_OUTER_BIT, 2506 MATCH_CRITERIA_ENABLE_MISC_BIT, 2507 MATCH_CRITERIA_ENABLE_INNER_BIT, 2508 MATCH_CRITERIA_ENABLE_MISC2_BIT 2509 }; 2510 2511 #define HEADER_IS_ZERO(match_criteria, headers) \ 2512 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 2513 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 2514 2515 static u8 get_match_criteria_enable(u32 *match_criteria) 2516 { 2517 u8 match_criteria_enable; 2518 2519 match_criteria_enable = 2520 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 2521 MATCH_CRITERIA_ENABLE_OUTER_BIT; 2522 match_criteria_enable |= 2523 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 2524 MATCH_CRITERIA_ENABLE_MISC_BIT; 2525 match_criteria_enable |= 2526 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 2527 MATCH_CRITERIA_ENABLE_INNER_BIT; 2528 match_criteria_enable |= 2529 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) << 2530 MATCH_CRITERIA_ENABLE_MISC2_BIT; 2531 2532 return match_criteria_enable; 2533 } 2534 2535 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 2536 { 2537 u8 entry_mask; 2538 u8 entry_val; 2539 int err = 0; 2540 2541 if (!mask) 2542 goto out; 2543 2544 entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c, 2545 ip_protocol); 2546 entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v, 2547 ip_protocol); 2548 if (!entry_mask) { 2549 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 2550 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2551 goto out; 2552 } 2553 /* Don't override existing ip protocol */ 2554 if (mask != entry_mask || val != entry_val) 2555 err = -EINVAL; 2556 out: 2557 return err; 2558 } 2559 2560 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, 2561 bool inner) 2562 { 2563 if (inner) { 2564 MLX5_SET(fte_match_set_misc, 2565 misc_c, inner_ipv6_flow_label, mask); 2566 MLX5_SET(fte_match_set_misc, 2567 misc_v, inner_ipv6_flow_label, val); 2568 } else { 2569 MLX5_SET(fte_match_set_misc, 2570 misc_c, outer_ipv6_flow_label, mask); 2571 MLX5_SET(fte_match_set_misc, 2572 misc_v, outer_ipv6_flow_label, val); 2573 } 2574 } 2575 2576 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 2577 { 2578 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 2579 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 2580 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 2581 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 2582 } 2583 2584 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask) 2585 { 2586 if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) && 2587 !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL)) 2588 return -EOPNOTSUPP; 2589 2590 if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) && 2591 !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP)) 2592 return -EOPNOTSUPP; 2593 2594 if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) && 2595 !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS)) 2596 return -EOPNOTSUPP; 2597 2598 if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) && 2599 !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL)) 2600 return -EOPNOTSUPP; 2601 2602 return 0; 2603 } 2604 2605 #define LAST_ETH_FIELD vlan_tag 2606 #define LAST_IB_FIELD sl 2607 #define LAST_IPV4_FIELD tos 2608 #define LAST_IPV6_FIELD traffic_class 2609 #define LAST_TCP_UDP_FIELD src_port 2610 #define LAST_TUNNEL_FIELD tunnel_id 2611 #define LAST_FLOW_TAG_FIELD tag_id 2612 #define LAST_DROP_FIELD size 2613 #define LAST_COUNTERS_FIELD counters 2614 2615 /* Field is the last supported field */ 2616 #define FIELDS_NOT_SUPPORTED(filter, field)\ 2617 memchr_inv((void *)&filter.field +\ 2618 sizeof(filter.field), 0,\ 2619 sizeof(filter) -\ 2620 offsetof(typeof(filter), field) -\ 2621 sizeof(filter.field)) 2622 2623 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, 2624 bool is_egress, 2625 struct mlx5_flow_act *action) 2626 { 2627 2628 switch (maction->ib_action.type) { 2629 case IB_FLOW_ACTION_ESP: 2630 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 2631 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) 2632 return -EINVAL; 2633 /* Currently only AES_GCM keymat is supported by the driver */ 2634 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; 2635 action->action |= is_egress ? 2636 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : 2637 MLX5_FLOW_CONTEXT_ACTION_DECRYPT; 2638 return 0; 2639 case IB_FLOW_ACTION_UNSPECIFIED: 2640 if (maction->flow_action_raw.sub_type == 2641 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { 2642 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2643 return -EINVAL; 2644 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2645 action->modify_id = maction->flow_action_raw.action_id; 2646 return 0; 2647 } 2648 if (maction->flow_action_raw.sub_type == 2649 MLX5_IB_FLOW_ACTION_DECAP) { 2650 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2651 return -EINVAL; 2652 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 2653 return 0; 2654 } 2655 if (maction->flow_action_raw.sub_type == 2656 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) { 2657 if (action->action & 2658 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) 2659 return -EINVAL; 2660 action->action |= 2661 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 2662 action->reformat_id = 2663 maction->flow_action_raw.action_id; 2664 return 0; 2665 } 2666 /* fall through */ 2667 default: 2668 return -EOPNOTSUPP; 2669 } 2670 } 2671 2672 static int parse_flow_attr(struct mlx5_core_dev *mdev, 2673 struct mlx5_flow_spec *spec, 2674 const union ib_flow_spec *ib_spec, 2675 const struct ib_flow_attr *flow_attr, 2676 struct mlx5_flow_act *action, u32 prev_type) 2677 { 2678 struct mlx5_flow_context *flow_context = &spec->flow_context; 2679 u32 *match_c = spec->match_criteria; 2680 u32 *match_v = spec->match_value; 2681 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 2682 misc_parameters); 2683 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 2684 misc_parameters); 2685 void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c, 2686 misc_parameters_2); 2687 void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v, 2688 misc_parameters_2); 2689 void *headers_c; 2690 void *headers_v; 2691 int match_ipv; 2692 int ret; 2693 2694 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2695 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2696 inner_headers); 2697 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2698 inner_headers); 2699 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2700 ft_field_support.inner_ip_version); 2701 } else { 2702 headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 2703 outer_headers); 2704 headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 2705 outer_headers); 2706 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2707 ft_field_support.outer_ip_version); 2708 } 2709 2710 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { 2711 case IB_FLOW_SPEC_ETH: 2712 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 2713 return -EOPNOTSUPP; 2714 2715 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2716 dmac_47_16), 2717 ib_spec->eth.mask.dst_mac); 2718 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2719 dmac_47_16), 2720 ib_spec->eth.val.dst_mac); 2721 2722 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2723 smac_47_16), 2724 ib_spec->eth.mask.src_mac); 2725 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2726 smac_47_16), 2727 ib_spec->eth.val.src_mac); 2728 2729 if (ib_spec->eth.mask.vlan_tag) { 2730 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2731 cvlan_tag, 1); 2732 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2733 cvlan_tag, 1); 2734 2735 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2736 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 2737 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2738 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 2739 2740 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2741 first_cfi, 2742 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 2743 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2744 first_cfi, 2745 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 2746 2747 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2748 first_prio, 2749 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 2750 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2751 first_prio, 2752 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 2753 } 2754 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2755 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 2756 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2757 ethertype, ntohs(ib_spec->eth.val.ether_type)); 2758 break; 2759 case IB_FLOW_SPEC_IPV4: 2760 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 2761 return -EOPNOTSUPP; 2762 2763 if (match_ipv) { 2764 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2765 ip_version, 0xf); 2766 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2767 ip_version, MLX5_FS_IPV4_VERSION); 2768 } else { 2769 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2770 ethertype, 0xffff); 2771 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2772 ethertype, ETH_P_IP); 2773 } 2774 2775 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2776 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2777 &ib_spec->ipv4.mask.src_ip, 2778 sizeof(ib_spec->ipv4.mask.src_ip)); 2779 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2780 src_ipv4_src_ipv6.ipv4_layout.ipv4), 2781 &ib_spec->ipv4.val.src_ip, 2782 sizeof(ib_spec->ipv4.val.src_ip)); 2783 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2784 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2785 &ib_spec->ipv4.mask.dst_ip, 2786 sizeof(ib_spec->ipv4.mask.dst_ip)); 2787 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2788 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 2789 &ib_spec->ipv4.val.dst_ip, 2790 sizeof(ib_spec->ipv4.val.dst_ip)); 2791 2792 set_tos(headers_c, headers_v, 2793 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 2794 2795 if (set_proto(headers_c, headers_v, 2796 ib_spec->ipv4.mask.proto, 2797 ib_spec->ipv4.val.proto)) 2798 return -EINVAL; 2799 break; 2800 case IB_FLOW_SPEC_IPV6: 2801 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 2802 return -EOPNOTSUPP; 2803 2804 if (match_ipv) { 2805 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2806 ip_version, 0xf); 2807 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2808 ip_version, MLX5_FS_IPV6_VERSION); 2809 } else { 2810 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 2811 ethertype, 0xffff); 2812 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 2813 ethertype, ETH_P_IPV6); 2814 } 2815 2816 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2817 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2818 &ib_spec->ipv6.mask.src_ip, 2819 sizeof(ib_spec->ipv6.mask.src_ip)); 2820 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2821 src_ipv4_src_ipv6.ipv6_layout.ipv6), 2822 &ib_spec->ipv6.val.src_ip, 2823 sizeof(ib_spec->ipv6.val.src_ip)); 2824 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, 2825 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2826 &ib_spec->ipv6.mask.dst_ip, 2827 sizeof(ib_spec->ipv6.mask.dst_ip)); 2828 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, 2829 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 2830 &ib_spec->ipv6.val.dst_ip, 2831 sizeof(ib_spec->ipv6.val.dst_ip)); 2832 2833 set_tos(headers_c, headers_v, 2834 ib_spec->ipv6.mask.traffic_class, 2835 ib_spec->ipv6.val.traffic_class); 2836 2837 if (set_proto(headers_c, headers_v, 2838 ib_spec->ipv6.mask.next_hdr, 2839 ib_spec->ipv6.val.next_hdr)) 2840 return -EINVAL; 2841 2842 set_flow_label(misc_params_c, misc_params_v, 2843 ntohl(ib_spec->ipv6.mask.flow_label), 2844 ntohl(ib_spec->ipv6.val.flow_label), 2845 ib_spec->type & IB_FLOW_SPEC_INNER); 2846 break; 2847 case IB_FLOW_SPEC_ESP: 2848 if (ib_spec->esp.mask.seq) 2849 return -EOPNOTSUPP; 2850 2851 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 2852 ntohl(ib_spec->esp.mask.spi)); 2853 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 2854 ntohl(ib_spec->esp.val.spi)); 2855 break; 2856 case IB_FLOW_SPEC_TCP: 2857 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2858 LAST_TCP_UDP_FIELD)) 2859 return -EOPNOTSUPP; 2860 2861 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP)) 2862 return -EINVAL; 2863 2864 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, 2865 ntohs(ib_spec->tcp_udp.mask.src_port)); 2866 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, 2867 ntohs(ib_spec->tcp_udp.val.src_port)); 2868 2869 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, 2870 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2871 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, 2872 ntohs(ib_spec->tcp_udp.val.dst_port)); 2873 break; 2874 case IB_FLOW_SPEC_UDP: 2875 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 2876 LAST_TCP_UDP_FIELD)) 2877 return -EOPNOTSUPP; 2878 2879 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP)) 2880 return -EINVAL; 2881 2882 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, 2883 ntohs(ib_spec->tcp_udp.mask.src_port)); 2884 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, 2885 ntohs(ib_spec->tcp_udp.val.src_port)); 2886 2887 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, 2888 ntohs(ib_spec->tcp_udp.mask.dst_port)); 2889 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 2890 ntohs(ib_spec->tcp_udp.val.dst_port)); 2891 break; 2892 case IB_FLOW_SPEC_GRE: 2893 if (ib_spec->gre.mask.c_ks_res0_ver) 2894 return -EOPNOTSUPP; 2895 2896 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE)) 2897 return -EINVAL; 2898 2899 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, 2900 0xff); 2901 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 2902 IPPROTO_GRE); 2903 2904 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol, 2905 ntohs(ib_spec->gre.mask.protocol)); 2906 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol, 2907 ntohs(ib_spec->gre.val.protocol)); 2908 2909 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, 2910 gre_key.nvgre.hi), 2911 &ib_spec->gre.mask.key, 2912 sizeof(ib_spec->gre.mask.key)); 2913 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, 2914 gre_key.nvgre.hi), 2915 &ib_spec->gre.val.key, 2916 sizeof(ib_spec->gre.val.key)); 2917 break; 2918 case IB_FLOW_SPEC_MPLS: 2919 switch (prev_type) { 2920 case IB_FLOW_SPEC_UDP: 2921 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2922 ft_field_support.outer_first_mpls_over_udp), 2923 &ib_spec->mpls.mask.tag)) 2924 return -EOPNOTSUPP; 2925 2926 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2927 outer_first_mpls_over_udp), 2928 &ib_spec->mpls.val.tag, 2929 sizeof(ib_spec->mpls.val.tag)); 2930 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2931 outer_first_mpls_over_udp), 2932 &ib_spec->mpls.mask.tag, 2933 sizeof(ib_spec->mpls.mask.tag)); 2934 break; 2935 case IB_FLOW_SPEC_GRE: 2936 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2937 ft_field_support.outer_first_mpls_over_gre), 2938 &ib_spec->mpls.mask.tag)) 2939 return -EOPNOTSUPP; 2940 2941 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2942 outer_first_mpls_over_gre), 2943 &ib_spec->mpls.val.tag, 2944 sizeof(ib_spec->mpls.val.tag)); 2945 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2946 outer_first_mpls_over_gre), 2947 &ib_spec->mpls.mask.tag, 2948 sizeof(ib_spec->mpls.mask.tag)); 2949 break; 2950 default: 2951 if (ib_spec->type & IB_FLOW_SPEC_INNER) { 2952 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2953 ft_field_support.inner_first_mpls), 2954 &ib_spec->mpls.mask.tag)) 2955 return -EOPNOTSUPP; 2956 2957 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2958 inner_first_mpls), 2959 &ib_spec->mpls.val.tag, 2960 sizeof(ib_spec->mpls.val.tag)); 2961 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2962 inner_first_mpls), 2963 &ib_spec->mpls.mask.tag, 2964 sizeof(ib_spec->mpls.mask.tag)); 2965 } else { 2966 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 2967 ft_field_support.outer_first_mpls), 2968 &ib_spec->mpls.mask.tag)) 2969 return -EOPNOTSUPP; 2970 2971 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v, 2972 outer_first_mpls), 2973 &ib_spec->mpls.val.tag, 2974 sizeof(ib_spec->mpls.val.tag)); 2975 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c, 2976 outer_first_mpls), 2977 &ib_spec->mpls.mask.tag, 2978 sizeof(ib_spec->mpls.mask.tag)); 2979 } 2980 } 2981 break; 2982 case IB_FLOW_SPEC_VXLAN_TUNNEL: 2983 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, 2984 LAST_TUNNEL_FIELD)) 2985 return -EOPNOTSUPP; 2986 2987 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, 2988 ntohl(ib_spec->tunnel.mask.tunnel_id)); 2989 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, 2990 ntohl(ib_spec->tunnel.val.tunnel_id)); 2991 break; 2992 case IB_FLOW_SPEC_ACTION_TAG: 2993 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag, 2994 LAST_FLOW_TAG_FIELD)) 2995 return -EOPNOTSUPP; 2996 if (ib_spec->flow_tag.tag_id >= BIT(24)) 2997 return -EINVAL; 2998 2999 flow_context->flow_tag = ib_spec->flow_tag.tag_id; 3000 flow_context->flags |= FLOW_CONTEXT_HAS_TAG; 3001 break; 3002 case IB_FLOW_SPEC_ACTION_DROP: 3003 if (FIELDS_NOT_SUPPORTED(ib_spec->drop, 3004 LAST_DROP_FIELD)) 3005 return -EOPNOTSUPP; 3006 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 3007 break; 3008 case IB_FLOW_SPEC_ACTION_HANDLE: 3009 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act), 3010 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action); 3011 if (ret) 3012 return ret; 3013 break; 3014 case IB_FLOW_SPEC_ACTION_COUNT: 3015 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count, 3016 LAST_COUNTERS_FIELD)) 3017 return -EOPNOTSUPP; 3018 3019 /* for now support only one counters spec per flow */ 3020 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 3021 return -EINVAL; 3022 3023 action->counters = ib_spec->flow_count.counters; 3024 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 3025 break; 3026 default: 3027 return -EINVAL; 3028 } 3029 3030 return 0; 3031 } 3032 3033 /* If a flow could catch both multicast and unicast packets, 3034 * it won't fall into the multicast flow steering table and this rule 3035 * could steal other multicast packets. 3036 */ 3037 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) 3038 { 3039 union ib_flow_spec *flow_spec; 3040 3041 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 3042 ib_attr->num_of_specs < 1) 3043 return false; 3044 3045 flow_spec = (union ib_flow_spec *)(ib_attr + 1); 3046 if (flow_spec->type == IB_FLOW_SPEC_IPV4) { 3047 struct ib_flow_spec_ipv4 *ipv4_spec; 3048 3049 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec; 3050 if (ipv4_is_multicast(ipv4_spec->val.dst_ip)) 3051 return true; 3052 3053 return false; 3054 } 3055 3056 if (flow_spec->type == IB_FLOW_SPEC_ETH) { 3057 struct ib_flow_spec_eth *eth_spec; 3058 3059 eth_spec = (struct ib_flow_spec_eth *)flow_spec; 3060 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 3061 is_multicast_ether_addr(eth_spec->val.dst_mac); 3062 } 3063 3064 return false; 3065 } 3066 3067 enum valid_spec { 3068 VALID_SPEC_INVALID, 3069 VALID_SPEC_VALID, 3070 VALID_SPEC_NA, 3071 }; 3072 3073 static enum valid_spec 3074 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, 3075 const struct mlx5_flow_spec *spec, 3076 const struct mlx5_flow_act *flow_act, 3077 bool egress) 3078 { 3079 const u32 *match_c = spec->match_criteria; 3080 bool is_crypto = 3081 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | 3082 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); 3083 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); 3084 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; 3085 3086 /* 3087 * Currently only crypto is supported in egress, when regular egress 3088 * rules would be supported, always return VALID_SPEC_NA. 3089 */ 3090 if (!is_crypto) 3091 return VALID_SPEC_NA; 3092 3093 return is_crypto && is_ipsec && 3094 (!egress || (!is_drop && 3095 !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? 3096 VALID_SPEC_VALID : VALID_SPEC_INVALID; 3097 } 3098 3099 static bool is_valid_spec(struct mlx5_core_dev *mdev, 3100 const struct mlx5_flow_spec *spec, 3101 const struct mlx5_flow_act *flow_act, 3102 bool egress) 3103 { 3104 /* We curretly only support ipsec egress flow */ 3105 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; 3106 } 3107 3108 static bool is_valid_ethertype(struct mlx5_core_dev *mdev, 3109 const struct ib_flow_attr *flow_attr, 3110 bool check_inner) 3111 { 3112 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 3113 int match_ipv = check_inner ? 3114 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3115 ft_field_support.inner_ip_version) : 3116 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, 3117 ft_field_support.outer_ip_version); 3118 int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0; 3119 bool ipv4_spec_valid, ipv6_spec_valid; 3120 unsigned int ip_spec_type = 0; 3121 bool has_ethertype = false; 3122 unsigned int spec_index; 3123 bool mask_valid = true; 3124 u16 eth_type = 0; 3125 bool type_valid; 3126 3127 /* Validate that ethertype is correct */ 3128 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3129 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) && 3130 ib_spec->eth.mask.ether_type) { 3131 mask_valid = (ib_spec->eth.mask.ether_type == 3132 htons(0xffff)); 3133 has_ethertype = true; 3134 eth_type = ntohs(ib_spec->eth.val.ether_type); 3135 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) || 3136 (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) { 3137 ip_spec_type = ib_spec->type; 3138 } 3139 ib_spec = (void *)ib_spec + ib_spec->size; 3140 } 3141 3142 type_valid = (!has_ethertype) || (!ip_spec_type); 3143 if (!type_valid && mask_valid) { 3144 ipv4_spec_valid = (eth_type == ETH_P_IP) && 3145 (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit)); 3146 ipv6_spec_valid = (eth_type == ETH_P_IPV6) && 3147 (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit)); 3148 3149 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) || 3150 (((eth_type == ETH_P_MPLS_UC) || 3151 (eth_type == ETH_P_MPLS_MC)) && match_ipv); 3152 } 3153 3154 return type_valid; 3155 } 3156 3157 static bool is_valid_attr(struct mlx5_core_dev *mdev, 3158 const struct ib_flow_attr *flow_attr) 3159 { 3160 return is_valid_ethertype(mdev, flow_attr, false) && 3161 is_valid_ethertype(mdev, flow_attr, true); 3162 } 3163 3164 static void put_flow_table(struct mlx5_ib_dev *dev, 3165 struct mlx5_ib_flow_prio *prio, bool ft_added) 3166 { 3167 prio->refcount -= !!ft_added; 3168 if (!prio->refcount) { 3169 mlx5_destroy_flow_table(prio->flow_table); 3170 prio->flow_table = NULL; 3171 } 3172 } 3173 3174 static void counters_clear_description(struct ib_counters *counters) 3175 { 3176 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3177 3178 mutex_lock(&mcounters->mcntrs_mutex); 3179 kfree(mcounters->counters_data); 3180 mcounters->counters_data = NULL; 3181 mcounters->cntrs_max_index = 0; 3182 mutex_unlock(&mcounters->mcntrs_mutex); 3183 } 3184 3185 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 3186 { 3187 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 3188 struct mlx5_ib_flow_handler, 3189 ibflow); 3190 struct mlx5_ib_flow_handler *iter, *tmp; 3191 struct mlx5_ib_dev *dev = handler->dev; 3192 3193 mutex_lock(&dev->flow_db->lock); 3194 3195 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 3196 mlx5_del_flow_rules(iter->rule); 3197 put_flow_table(dev, iter->prio, true); 3198 list_del(&iter->list); 3199 kfree(iter); 3200 } 3201 3202 mlx5_del_flow_rules(handler->rule); 3203 put_flow_table(dev, handler->prio, true); 3204 if (handler->ibcounters && 3205 atomic_read(&handler->ibcounters->usecnt) == 1) 3206 counters_clear_description(handler->ibcounters); 3207 3208 mutex_unlock(&dev->flow_db->lock); 3209 if (handler->flow_matcher) 3210 atomic_dec(&handler->flow_matcher->usecnt); 3211 kfree(handler); 3212 3213 return 0; 3214 } 3215 3216 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 3217 { 3218 priority *= 2; 3219 if (!dont_trap) 3220 priority++; 3221 return priority; 3222 } 3223 3224 enum flow_table_type { 3225 MLX5_IB_FT_RX, 3226 MLX5_IB_FT_TX 3227 }; 3228 3229 #define MLX5_FS_MAX_TYPES 6 3230 #define MLX5_FS_MAX_ENTRIES BIT(16) 3231 3232 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, 3233 struct mlx5_ib_flow_prio *prio, 3234 int priority, 3235 int num_entries, int num_groups, 3236 u32 flags) 3237 { 3238 struct mlx5_flow_table *ft; 3239 3240 ft = mlx5_create_auto_grouped_flow_table(ns, priority, 3241 num_entries, 3242 num_groups, 3243 0, flags); 3244 if (IS_ERR(ft)) 3245 return ERR_CAST(ft); 3246 3247 prio->flow_table = ft; 3248 prio->refcount = 0; 3249 return prio; 3250 } 3251 3252 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 3253 struct ib_flow_attr *flow_attr, 3254 enum flow_table_type ft_type) 3255 { 3256 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 3257 struct mlx5_flow_namespace *ns = NULL; 3258 struct mlx5_ib_flow_prio *prio; 3259 struct mlx5_flow_table *ft; 3260 int max_table_size; 3261 int num_entries; 3262 int num_groups; 3263 bool esw_encap; 3264 u32 flags = 0; 3265 int priority; 3266 3267 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3268 log_max_ft_size)); 3269 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 3270 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 3271 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3272 enum mlx5_flow_namespace_type fn_type; 3273 3274 if (flow_is_multicast_only(flow_attr) && 3275 !dont_trap) 3276 priority = MLX5_IB_FLOW_MCAST_PRIO; 3277 else 3278 priority = ib_prio_to_core_prio(flow_attr->priority, 3279 dont_trap); 3280 if (ft_type == MLX5_IB_FT_RX) { 3281 fn_type = MLX5_FLOW_NAMESPACE_BYPASS; 3282 prio = &dev->flow_db->prios[priority]; 3283 if (!dev->is_rep && !esw_encap && 3284 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) 3285 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3286 if (!dev->is_rep && !esw_encap && 3287 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3288 reformat_l3_tunnel_to_l2)) 3289 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3290 } else { 3291 max_table_size = 3292 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, 3293 log_max_ft_size)); 3294 fn_type = MLX5_FLOW_NAMESPACE_EGRESS; 3295 prio = &dev->flow_db->egress_prios[priority]; 3296 if (!dev->is_rep && !esw_encap && 3297 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) 3298 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3299 } 3300 ns = mlx5_get_flow_namespace(dev->mdev, fn_type); 3301 num_entries = MLX5_FS_MAX_ENTRIES; 3302 num_groups = MLX5_FS_MAX_TYPES; 3303 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3304 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3305 ns = mlx5_get_flow_namespace(dev->mdev, 3306 MLX5_FLOW_NAMESPACE_LEFTOVERS); 3307 build_leftovers_ft_param(&priority, 3308 &num_entries, 3309 &num_groups); 3310 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 3311 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3312 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 3313 allow_sniffer_and_nic_rx_shared_tir)) 3314 return ERR_PTR(-ENOTSUPP); 3315 3316 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 3317 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 3318 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 3319 3320 prio = &dev->flow_db->sniffer[ft_type]; 3321 priority = 0; 3322 num_entries = 1; 3323 num_groups = 1; 3324 } 3325 3326 if (!ns) 3327 return ERR_PTR(-ENOTSUPP); 3328 3329 max_table_size = min_t(int, num_entries, max_table_size); 3330 3331 ft = prio->flow_table; 3332 if (!ft) 3333 return _get_prio(ns, prio, priority, max_table_size, num_groups, 3334 flags); 3335 3336 return prio; 3337 } 3338 3339 static void set_underlay_qp(struct mlx5_ib_dev *dev, 3340 struct mlx5_flow_spec *spec, 3341 u32 underlay_qpn) 3342 { 3343 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, 3344 spec->match_criteria, 3345 misc_parameters); 3346 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3347 misc_parameters); 3348 3349 if (underlay_qpn && 3350 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3351 ft_field_support.bth_dst_qp)) { 3352 MLX5_SET(fte_match_set_misc, 3353 misc_params_v, bth_dst_qp, underlay_qpn); 3354 MLX5_SET(fte_match_set_misc, 3355 misc_params_c, bth_dst_qp, 0xffffff); 3356 } 3357 } 3358 3359 static int read_flow_counters(struct ib_device *ibdev, 3360 struct mlx5_read_counters_attr *read_attr) 3361 { 3362 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; 3363 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3364 3365 return mlx5_fc_query(dev->mdev, fc, 3366 &read_attr->out[IB_COUNTER_PACKETS], 3367 &read_attr->out[IB_COUNTER_BYTES]); 3368 } 3369 3370 /* flow counters currently expose two counters packets and bytes */ 3371 #define FLOW_COUNTERS_NUM 2 3372 static int counters_set_description(struct ib_counters *counters, 3373 enum mlx5_ib_counters_type counters_type, 3374 struct mlx5_ib_flow_counters_desc *desc_data, 3375 u32 ncounters) 3376 { 3377 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 3378 u32 cntrs_max_index = 0; 3379 int i; 3380 3381 if (counters_type != MLX5_IB_COUNTERS_FLOW) 3382 return -EINVAL; 3383 3384 /* init the fields for the object */ 3385 mcounters->type = counters_type; 3386 mcounters->read_counters = read_flow_counters; 3387 mcounters->counters_num = FLOW_COUNTERS_NUM; 3388 mcounters->ncounters = ncounters; 3389 /* each counter entry have both description and index pair */ 3390 for (i = 0; i < ncounters; i++) { 3391 if (desc_data[i].description > IB_COUNTER_BYTES) 3392 return -EINVAL; 3393 3394 if (cntrs_max_index <= desc_data[i].index) 3395 cntrs_max_index = desc_data[i].index + 1; 3396 } 3397 3398 mutex_lock(&mcounters->mcntrs_mutex); 3399 mcounters->counters_data = desc_data; 3400 mcounters->cntrs_max_index = cntrs_max_index; 3401 mutex_unlock(&mcounters->mcntrs_mutex); 3402 3403 return 0; 3404 } 3405 3406 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2)) 3407 static int flow_counters_set_data(struct ib_counters *ibcounters, 3408 struct mlx5_ib_create_flow *ucmd) 3409 { 3410 struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters); 3411 struct mlx5_ib_flow_counters_data *cntrs_data = NULL; 3412 struct mlx5_ib_flow_counters_desc *desc_data = NULL; 3413 bool hw_hndl = false; 3414 int ret = 0; 3415 3416 if (ucmd && ucmd->ncounters_data != 0) { 3417 cntrs_data = ucmd->data; 3418 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) 3419 return -EINVAL; 3420 3421 desc_data = kcalloc(cntrs_data->ncounters, 3422 sizeof(*desc_data), 3423 GFP_KERNEL); 3424 if (!desc_data) 3425 return -ENOMEM; 3426 3427 if (copy_from_user(desc_data, 3428 u64_to_user_ptr(cntrs_data->counters_data), 3429 sizeof(*desc_data) * cntrs_data->ncounters)) { 3430 ret = -EFAULT; 3431 goto free; 3432 } 3433 } 3434 3435 if (!mcounters->hw_cntrs_hndl) { 3436 mcounters->hw_cntrs_hndl = mlx5_fc_create( 3437 to_mdev(ibcounters->device)->mdev, false); 3438 if (IS_ERR(mcounters->hw_cntrs_hndl)) { 3439 ret = PTR_ERR(mcounters->hw_cntrs_hndl); 3440 goto free; 3441 } 3442 hw_hndl = true; 3443 } 3444 3445 if (desc_data) { 3446 /* counters already bound to at least one flow */ 3447 if (mcounters->cntrs_max_index) { 3448 ret = -EINVAL; 3449 goto free_hndl; 3450 } 3451 3452 ret = counters_set_description(ibcounters, 3453 MLX5_IB_COUNTERS_FLOW, 3454 desc_data, 3455 cntrs_data->ncounters); 3456 if (ret) 3457 goto free_hndl; 3458 3459 } else if (!mcounters->cntrs_max_index) { 3460 /* counters not bound yet, must have udata passed */ 3461 ret = -EINVAL; 3462 goto free_hndl; 3463 } 3464 3465 return 0; 3466 3467 free_hndl: 3468 if (hw_hndl) { 3469 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, 3470 mcounters->hw_cntrs_hndl); 3471 mcounters->hw_cntrs_hndl = NULL; 3472 } 3473 free: 3474 kfree(desc_data); 3475 return ret; 3476 } 3477 3478 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, 3479 struct mlx5_flow_spec *spec, 3480 struct mlx5_eswitch_rep *rep) 3481 { 3482 struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; 3483 void *misc; 3484 3485 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { 3486 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3487 misc_parameters_2); 3488 3489 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, 3490 mlx5_eswitch_get_vport_metadata_for_match(esw, 3491 rep->vport)); 3492 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3493 misc_parameters_2); 3494 3495 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); 3496 } else { 3497 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 3498 misc_parameters); 3499 3500 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); 3501 3502 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 3503 misc_parameters); 3504 3505 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 3506 } 3507 } 3508 3509 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, 3510 struct mlx5_ib_flow_prio *ft_prio, 3511 const struct ib_flow_attr *flow_attr, 3512 struct mlx5_flow_destination *dst, 3513 u32 underlay_qpn, 3514 struct mlx5_ib_create_flow *ucmd) 3515 { 3516 struct mlx5_flow_table *ft = ft_prio->flow_table; 3517 struct mlx5_ib_flow_handler *handler; 3518 struct mlx5_flow_act flow_act = {}; 3519 struct mlx5_flow_spec *spec; 3520 struct mlx5_flow_destination dest_arr[2] = {}; 3521 struct mlx5_flow_destination *rule_dst = dest_arr; 3522 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 3523 unsigned int spec_index; 3524 u32 prev_type = 0; 3525 int err = 0; 3526 int dest_num = 0; 3527 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3528 3529 if (!is_valid_attr(dev->mdev, flow_attr)) 3530 return ERR_PTR(-EINVAL); 3531 3532 if (dev->is_rep && is_egress) 3533 return ERR_PTR(-EINVAL); 3534 3535 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3536 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 3537 if (!handler || !spec) { 3538 err = -ENOMEM; 3539 goto free; 3540 } 3541 3542 INIT_LIST_HEAD(&handler->list); 3543 if (dst) { 3544 memcpy(&dest_arr[0], dst, sizeof(*dst)); 3545 dest_num++; 3546 } 3547 3548 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3549 err = parse_flow_attr(dev->mdev, spec, 3550 ib_flow, flow_attr, &flow_act, 3551 prev_type); 3552 if (err < 0) 3553 goto free; 3554 3555 prev_type = ((union ib_flow_spec *)ib_flow)->type; 3556 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 3557 } 3558 3559 if (!flow_is_multicast_only(flow_attr)) 3560 set_underlay_qp(dev, spec, underlay_qpn); 3561 3562 if (dev->is_rep) { 3563 struct mlx5_eswitch_rep *rep; 3564 3565 rep = dev->port[flow_attr->port - 1].rep; 3566 if (!rep) { 3567 err = -EINVAL; 3568 goto free; 3569 } 3570 3571 mlx5_ib_set_rule_source_port(dev, spec, rep); 3572 } 3573 3574 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 3575 3576 if (is_egress && 3577 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { 3578 err = -EINVAL; 3579 goto free; 3580 } 3581 3582 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 3583 struct mlx5_ib_mcounters *mcounters; 3584 3585 err = flow_counters_set_data(flow_act.counters, ucmd); 3586 if (err) 3587 goto free; 3588 3589 mcounters = to_mcounters(flow_act.counters); 3590 handler->ibcounters = flow_act.counters; 3591 dest_arr[dest_num].type = 3592 MLX5_FLOW_DESTINATION_TYPE_COUNTER; 3593 dest_arr[dest_num].counter_id = 3594 mlx5_fc_id(mcounters->hw_cntrs_hndl); 3595 dest_num++; 3596 } 3597 3598 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3599 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { 3600 rule_dst = NULL; 3601 dest_num = 0; 3602 } 3603 } else { 3604 if (is_egress) 3605 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3606 else 3607 flow_act.action |= 3608 dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 3609 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 3610 } 3611 3612 if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && 3613 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3614 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3615 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", 3616 spec->flow_context.flow_tag, flow_attr->type); 3617 err = -EINVAL; 3618 goto free; 3619 } 3620 handler->rule = mlx5_add_flow_rules(ft, spec, 3621 &flow_act, 3622 rule_dst, dest_num); 3623 3624 if (IS_ERR(handler->rule)) { 3625 err = PTR_ERR(handler->rule); 3626 goto free; 3627 } 3628 3629 ft_prio->refcount++; 3630 handler->prio = ft_prio; 3631 handler->dev = dev; 3632 3633 ft_prio->flow_table = ft; 3634 free: 3635 if (err && handler) { 3636 if (handler->ibcounters && 3637 atomic_read(&handler->ibcounters->usecnt) == 1) 3638 counters_clear_description(handler->ibcounters); 3639 kfree(handler); 3640 } 3641 kvfree(spec); 3642 return err ? ERR_PTR(err) : handler; 3643 } 3644 3645 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 3646 struct mlx5_ib_flow_prio *ft_prio, 3647 const struct ib_flow_attr *flow_attr, 3648 struct mlx5_flow_destination *dst) 3649 { 3650 return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); 3651 } 3652 3653 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 3654 struct mlx5_ib_flow_prio *ft_prio, 3655 struct ib_flow_attr *flow_attr, 3656 struct mlx5_flow_destination *dst) 3657 { 3658 struct mlx5_ib_flow_handler *handler_dst = NULL; 3659 struct mlx5_ib_flow_handler *handler = NULL; 3660 3661 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 3662 if (!IS_ERR(handler)) { 3663 handler_dst = create_flow_rule(dev, ft_prio, 3664 flow_attr, dst); 3665 if (IS_ERR(handler_dst)) { 3666 mlx5_del_flow_rules(handler->rule); 3667 ft_prio->refcount--; 3668 kfree(handler); 3669 handler = handler_dst; 3670 } else { 3671 list_add(&handler_dst->list, &handler->list); 3672 } 3673 } 3674 3675 return handler; 3676 } 3677 enum { 3678 LEFTOVERS_MC, 3679 LEFTOVERS_UC, 3680 }; 3681 3682 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 3683 struct mlx5_ib_flow_prio *ft_prio, 3684 struct ib_flow_attr *flow_attr, 3685 struct mlx5_flow_destination *dst) 3686 { 3687 struct mlx5_ib_flow_handler *handler_ucast = NULL; 3688 struct mlx5_ib_flow_handler *handler = NULL; 3689 3690 static struct { 3691 struct ib_flow_attr flow_attr; 3692 struct ib_flow_spec_eth eth_flow; 3693 } leftovers_specs[] = { 3694 [LEFTOVERS_MC] = { 3695 .flow_attr = { 3696 .num_of_specs = 1, 3697 .size = sizeof(leftovers_specs[0]) 3698 }, 3699 .eth_flow = { 3700 .type = IB_FLOW_SPEC_ETH, 3701 .size = sizeof(struct ib_flow_spec_eth), 3702 .mask = {.dst_mac = {0x1} }, 3703 .val = {.dst_mac = {0x1} } 3704 } 3705 }, 3706 [LEFTOVERS_UC] = { 3707 .flow_attr = { 3708 .num_of_specs = 1, 3709 .size = sizeof(leftovers_specs[0]) 3710 }, 3711 .eth_flow = { 3712 .type = IB_FLOW_SPEC_ETH, 3713 .size = sizeof(struct ib_flow_spec_eth), 3714 .mask = {.dst_mac = {0x1} }, 3715 .val = {.dst_mac = {} } 3716 } 3717 } 3718 }; 3719 3720 handler = create_flow_rule(dev, ft_prio, 3721 &leftovers_specs[LEFTOVERS_MC].flow_attr, 3722 dst); 3723 if (!IS_ERR(handler) && 3724 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 3725 handler_ucast = create_flow_rule(dev, ft_prio, 3726 &leftovers_specs[LEFTOVERS_UC].flow_attr, 3727 dst); 3728 if (IS_ERR(handler_ucast)) { 3729 mlx5_del_flow_rules(handler->rule); 3730 ft_prio->refcount--; 3731 kfree(handler); 3732 handler = handler_ucast; 3733 } else { 3734 list_add(&handler_ucast->list, &handler->list); 3735 } 3736 } 3737 3738 return handler; 3739 } 3740 3741 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 3742 struct mlx5_ib_flow_prio *ft_rx, 3743 struct mlx5_ib_flow_prio *ft_tx, 3744 struct mlx5_flow_destination *dst) 3745 { 3746 struct mlx5_ib_flow_handler *handler_rx; 3747 struct mlx5_ib_flow_handler *handler_tx; 3748 int err; 3749 static const struct ib_flow_attr flow_attr = { 3750 .num_of_specs = 0, 3751 .size = sizeof(flow_attr) 3752 }; 3753 3754 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 3755 if (IS_ERR(handler_rx)) { 3756 err = PTR_ERR(handler_rx); 3757 goto err; 3758 } 3759 3760 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 3761 if (IS_ERR(handler_tx)) { 3762 err = PTR_ERR(handler_tx); 3763 goto err_tx; 3764 } 3765 3766 list_add(&handler_tx->list, &handler_rx->list); 3767 3768 return handler_rx; 3769 3770 err_tx: 3771 mlx5_del_flow_rules(handler_rx->rule); 3772 ft_rx->refcount--; 3773 kfree(handler_rx); 3774 err: 3775 return ERR_PTR(err); 3776 } 3777 3778 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 3779 struct ib_flow_attr *flow_attr, 3780 int domain, 3781 struct ib_udata *udata) 3782 { 3783 struct mlx5_ib_dev *dev = to_mdev(qp->device); 3784 struct mlx5_ib_qp *mqp = to_mqp(qp); 3785 struct mlx5_ib_flow_handler *handler = NULL; 3786 struct mlx5_flow_destination *dst = NULL; 3787 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 3788 struct mlx5_ib_flow_prio *ft_prio; 3789 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS; 3790 struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr; 3791 size_t min_ucmd_sz, required_ucmd_sz; 3792 int err; 3793 int underlay_qpn; 3794 3795 if (udata && udata->inlen) { 3796 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) + 3797 sizeof(ucmd_hdr.reserved); 3798 if (udata->inlen < min_ucmd_sz) 3799 return ERR_PTR(-EOPNOTSUPP); 3800 3801 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz); 3802 if (err) 3803 return ERR_PTR(err); 3804 3805 /* currently supports only one counters data */ 3806 if (ucmd_hdr.ncounters_data > 1) 3807 return ERR_PTR(-EINVAL); 3808 3809 required_ucmd_sz = min_ucmd_sz + 3810 sizeof(struct mlx5_ib_flow_counters_data) * 3811 ucmd_hdr.ncounters_data; 3812 if (udata->inlen > required_ucmd_sz && 3813 !ib_is_udata_cleared(udata, required_ucmd_sz, 3814 udata->inlen - required_ucmd_sz)) 3815 return ERR_PTR(-EOPNOTSUPP); 3816 3817 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL); 3818 if (!ucmd) 3819 return ERR_PTR(-ENOMEM); 3820 3821 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 3822 if (err) 3823 goto free_ucmd; 3824 } 3825 3826 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) { 3827 err = -ENOMEM; 3828 goto free_ucmd; 3829 } 3830 3831 if (domain != IB_FLOW_DOMAIN_USER || 3832 flow_attr->port > dev->num_ports || 3833 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | 3834 IB_FLOW_ATTR_FLAGS_EGRESS))) { 3835 err = -EINVAL; 3836 goto free_ucmd; 3837 } 3838 3839 if (is_egress && 3840 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3841 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { 3842 err = -EINVAL; 3843 goto free_ucmd; 3844 } 3845 3846 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3847 if (!dst) { 3848 err = -ENOMEM; 3849 goto free_ucmd; 3850 } 3851 3852 mutex_lock(&dev->flow_db->lock); 3853 3854 ft_prio = get_flow_table(dev, flow_attr, 3855 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX); 3856 if (IS_ERR(ft_prio)) { 3857 err = PTR_ERR(ft_prio); 3858 goto unlock; 3859 } 3860 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3861 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 3862 if (IS_ERR(ft_prio_tx)) { 3863 err = PTR_ERR(ft_prio_tx); 3864 ft_prio_tx = NULL; 3865 goto destroy_ft; 3866 } 3867 } 3868 3869 if (is_egress) { 3870 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; 3871 } else { 3872 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 3873 if (mqp->flags & MLX5_IB_QP_RSS) 3874 dst->tir_num = mqp->rss_qp.tirn; 3875 else 3876 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 3877 } 3878 3879 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 3880 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 3881 handler = create_dont_trap_rule(dev, ft_prio, 3882 flow_attr, dst); 3883 } else { 3884 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? 3885 mqp->underlay_qpn : 0; 3886 handler = _create_flow_rule(dev, ft_prio, flow_attr, 3887 dst, underlay_qpn, ucmd); 3888 } 3889 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3890 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 3891 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 3892 dst); 3893 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 3894 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 3895 } else { 3896 err = -EINVAL; 3897 goto destroy_ft; 3898 } 3899 3900 if (IS_ERR(handler)) { 3901 err = PTR_ERR(handler); 3902 handler = NULL; 3903 goto destroy_ft; 3904 } 3905 3906 mutex_unlock(&dev->flow_db->lock); 3907 kfree(dst); 3908 kfree(ucmd); 3909 3910 return &handler->ibflow; 3911 3912 destroy_ft: 3913 put_flow_table(dev, ft_prio, false); 3914 if (ft_prio_tx) 3915 put_flow_table(dev, ft_prio_tx, false); 3916 unlock: 3917 mutex_unlock(&dev->flow_db->lock); 3918 kfree(dst); 3919 free_ucmd: 3920 kfree(ucmd); 3921 return ERR_PTR(err); 3922 } 3923 3924 static struct mlx5_ib_flow_prio * 3925 _get_flow_table(struct mlx5_ib_dev *dev, 3926 struct mlx5_ib_flow_matcher *fs_matcher, 3927 bool mcast) 3928 { 3929 struct mlx5_flow_namespace *ns = NULL; 3930 struct mlx5_ib_flow_prio *prio = NULL; 3931 int max_table_size = 0; 3932 bool esw_encap; 3933 u32 flags = 0; 3934 int priority; 3935 3936 if (mcast) 3937 priority = MLX5_IB_FLOW_MCAST_PRIO; 3938 else 3939 priority = ib_prio_to_core_prio(fs_matcher->priority, false); 3940 3941 esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != 3942 DEVLINK_ESWITCH_ENCAP_MODE_NONE; 3943 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { 3944 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3945 log_max_ft_size)); 3946 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) 3947 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3948 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, 3949 reformat_l3_tunnel_to_l2) && 3950 !esw_encap) 3951 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3952 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) { 3953 max_table_size = BIT( 3954 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); 3955 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap) 3956 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3957 } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) { 3958 max_table_size = BIT( 3959 MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); 3960 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) 3961 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; 3962 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) && 3963 esw_encap) 3964 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 3965 priority = FDB_BYPASS_PATH; 3966 } 3967 3968 max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); 3969 3970 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type); 3971 if (!ns) 3972 return ERR_PTR(-ENOTSUPP); 3973 3974 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) 3975 prio = &dev->flow_db->prios[priority]; 3976 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) 3977 prio = &dev->flow_db->egress_prios[priority]; 3978 else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) 3979 prio = &dev->flow_db->fdb; 3980 3981 if (!prio) 3982 return ERR_PTR(-EINVAL); 3983 3984 if (prio->flow_table) 3985 return prio; 3986 3987 return _get_prio(ns, prio, priority, max_table_size, 3988 MLX5_FS_MAX_TYPES, flags); 3989 } 3990 3991 static struct mlx5_ib_flow_handler * 3992 _create_raw_flow_rule(struct mlx5_ib_dev *dev, 3993 struct mlx5_ib_flow_prio *ft_prio, 3994 struct mlx5_flow_destination *dst, 3995 struct mlx5_ib_flow_matcher *fs_matcher, 3996 struct mlx5_flow_context *flow_context, 3997 struct mlx5_flow_act *flow_act, 3998 void *cmd_in, int inlen, 3999 int dst_num) 4000 { 4001 struct mlx5_ib_flow_handler *handler; 4002 struct mlx5_flow_spec *spec; 4003 struct mlx5_flow_table *ft = ft_prio->flow_table; 4004 int err = 0; 4005 4006 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 4007 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 4008 if (!handler || !spec) { 4009 err = -ENOMEM; 4010 goto free; 4011 } 4012 4013 INIT_LIST_HEAD(&handler->list); 4014 4015 memcpy(spec->match_value, cmd_in, inlen); 4016 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 4017 fs_matcher->mask_len); 4018 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 4019 spec->flow_context = *flow_context; 4020 4021 handler->rule = mlx5_add_flow_rules(ft, spec, 4022 flow_act, dst, dst_num); 4023 4024 if (IS_ERR(handler->rule)) { 4025 err = PTR_ERR(handler->rule); 4026 goto free; 4027 } 4028 4029 ft_prio->refcount++; 4030 handler->prio = ft_prio; 4031 handler->dev = dev; 4032 ft_prio->flow_table = ft; 4033 4034 free: 4035 if (err) 4036 kfree(handler); 4037 kvfree(spec); 4038 return err ? ERR_PTR(err) : handler; 4039 } 4040 4041 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher, 4042 void *match_v) 4043 { 4044 void *match_c; 4045 void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4; 4046 void *dmac, *dmac_mask; 4047 void *ipv4, *ipv4_mask; 4048 4049 if (!(fs_matcher->match_criteria_enable & 4050 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT))) 4051 return false; 4052 4053 match_c = fs_matcher->matcher_mask.match_params; 4054 match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v, 4055 outer_headers); 4056 match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c, 4057 outer_headers); 4058 4059 dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4060 dmac_47_16); 4061 dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4062 dmac_47_16); 4063 4064 if (is_multicast_ether_addr(dmac) && 4065 is_multicast_ether_addr(dmac_mask)) 4066 return true; 4067 4068 ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4, 4069 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4070 4071 ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4, 4072 dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 4073 4074 if (ipv4_is_multicast(*(__be32 *)(ipv4)) && 4075 ipv4_is_multicast(*(__be32 *)(ipv4_mask))) 4076 return true; 4077 4078 return false; 4079 } 4080 4081 struct mlx5_ib_flow_handler * 4082 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 4083 struct mlx5_ib_flow_matcher *fs_matcher, 4084 struct mlx5_flow_context *flow_context, 4085 struct mlx5_flow_act *flow_act, 4086 u32 counter_id, 4087 void *cmd_in, int inlen, int dest_id, 4088 int dest_type) 4089 { 4090 struct mlx5_flow_destination *dst; 4091 struct mlx5_ib_flow_prio *ft_prio; 4092 struct mlx5_ib_flow_handler *handler; 4093 int dst_num = 0; 4094 bool mcast; 4095 int err; 4096 4097 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 4098 return ERR_PTR(-EOPNOTSUPP); 4099 4100 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 4101 return ERR_PTR(-ENOMEM); 4102 4103 dst = kcalloc(2, sizeof(*dst), GFP_KERNEL); 4104 if (!dst) 4105 return ERR_PTR(-ENOMEM); 4106 4107 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 4108 mutex_lock(&dev->flow_db->lock); 4109 4110 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 4111 if (IS_ERR(ft_prio)) { 4112 err = PTR_ERR(ft_prio); 4113 goto unlock; 4114 } 4115 4116 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { 4117 dst[dst_num].type = dest_type; 4118 dst[dst_num].tir_num = dest_id; 4119 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4120 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { 4121 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 4122 dst[dst_num].ft_num = dest_id; 4123 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 4124 } else { 4125 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT; 4126 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 4127 } 4128 4129 dst_num++; 4130 4131 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 4132 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 4133 dst[dst_num].counter_id = counter_id; 4134 dst_num++; 4135 } 4136 4137 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, 4138 flow_context, flow_act, 4139 cmd_in, inlen, dst_num); 4140 4141 if (IS_ERR(handler)) { 4142 err = PTR_ERR(handler); 4143 goto destroy_ft; 4144 } 4145 4146 mutex_unlock(&dev->flow_db->lock); 4147 atomic_inc(&fs_matcher->usecnt); 4148 handler->flow_matcher = fs_matcher; 4149 4150 kfree(dst); 4151 4152 return handler; 4153 4154 destroy_ft: 4155 put_flow_table(dev, ft_prio, false); 4156 unlock: 4157 mutex_unlock(&dev->flow_db->lock); 4158 kfree(dst); 4159 4160 return ERR_PTR(err); 4161 } 4162 4163 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) 4164 { 4165 u32 flags = 0; 4166 4167 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) 4168 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; 4169 4170 return flags; 4171 } 4172 4173 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA 4174 static struct ib_flow_action * 4175 mlx5_ib_create_flow_action_esp(struct ib_device *device, 4176 const struct ib_flow_action_attrs_esp *attr, 4177 struct uverbs_attr_bundle *attrs) 4178 { 4179 struct mlx5_ib_dev *mdev = to_mdev(device); 4180 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; 4181 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; 4182 struct mlx5_ib_flow_action *action; 4183 u64 action_flags; 4184 u64 flags; 4185 int err = 0; 4186 4187 err = uverbs_get_flags64( 4188 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 4189 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); 4190 if (err) 4191 return ERR_PTR(err); 4192 4193 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); 4194 4195 /* We current only support a subset of the standard features. Only a 4196 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn 4197 * (with overlap). Full offload mode isn't supported. 4198 */ 4199 if (!attr->keymat || attr->replay || attr->encap || 4200 attr->spi || attr->seq || attr->tfc_pad || 4201 attr->hard_limit_pkts || 4202 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4203 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) 4204 return ERR_PTR(-EOPNOTSUPP); 4205 4206 if (attr->keymat->protocol != 4207 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) 4208 return ERR_PTR(-EOPNOTSUPP); 4209 4210 aes_gcm = &attr->keymat->keymat.aes_gcm; 4211 4212 if (aes_gcm->icv_len != 16 || 4213 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) 4214 return ERR_PTR(-EOPNOTSUPP); 4215 4216 action = kmalloc(sizeof(*action), GFP_KERNEL); 4217 if (!action) 4218 return ERR_PTR(-ENOMEM); 4219 4220 action->esp_aes_gcm.ib_flags = attr->flags; 4221 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, 4222 sizeof(accel_attrs.keymat.aes_gcm.aes_key)); 4223 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; 4224 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, 4225 sizeof(accel_attrs.keymat.aes_gcm.salt)); 4226 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, 4227 sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); 4228 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; 4229 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; 4230 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; 4231 4232 accel_attrs.esn = attr->esn; 4233 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) 4234 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; 4235 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4236 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4237 4238 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) 4239 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; 4240 4241 action->esp_aes_gcm.ctx = 4242 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); 4243 if (IS_ERR(action->esp_aes_gcm.ctx)) { 4244 err = PTR_ERR(action->esp_aes_gcm.ctx); 4245 goto err_parse; 4246 } 4247 4248 action->esp_aes_gcm.ib_flags = attr->flags; 4249 4250 return &action->ib_action; 4251 4252 err_parse: 4253 kfree(action); 4254 return ERR_PTR(err); 4255 } 4256 4257 static int 4258 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, 4259 const struct ib_flow_action_attrs_esp *attr, 4260 struct uverbs_attr_bundle *attrs) 4261 { 4262 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4263 struct mlx5_accel_esp_xfrm_attrs accel_attrs; 4264 int err = 0; 4265 4266 if (attr->keymat || attr->replay || attr->encap || 4267 attr->spi || attr->seq || attr->tfc_pad || 4268 attr->hard_limit_pkts || 4269 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4270 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | 4271 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) 4272 return -EOPNOTSUPP; 4273 4274 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can 4275 * be modified. 4276 */ 4277 if (!(maction->esp_aes_gcm.ib_flags & 4278 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && 4279 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | 4280 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) 4281 return -EINVAL; 4282 4283 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, 4284 sizeof(accel_attrs)); 4285 4286 accel_attrs.esn = attr->esn; 4287 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) 4288 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4289 else 4290 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; 4291 4292 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, 4293 &accel_attrs); 4294 if (err) 4295 return err; 4296 4297 maction->esp_aes_gcm.ib_flags &= 4298 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4299 maction->esp_aes_gcm.ib_flags |= 4300 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; 4301 4302 return 0; 4303 } 4304 4305 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) 4306 { 4307 struct mlx5_ib_flow_action *maction = to_mflow_act(action); 4308 4309 switch (action->type) { 4310 case IB_FLOW_ACTION_ESP: 4311 /* 4312 * We only support aes_gcm by now, so we implicitly know this is 4313 * the underline crypto. 4314 */ 4315 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); 4316 break; 4317 case IB_FLOW_ACTION_UNSPECIFIED: 4318 mlx5_ib_destroy_flow_action_raw(maction); 4319 break; 4320 default: 4321 WARN_ON(true); 4322 break; 4323 } 4324 4325 kfree(maction); 4326 return 0; 4327 } 4328 4329 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4330 { 4331 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4332 struct mlx5_ib_qp *mqp = to_mqp(ibqp); 4333 int err; 4334 u16 uid; 4335 4336 uid = ibqp->pd ? 4337 to_mpd(ibqp->pd)->uid : 0; 4338 4339 if (mqp->flags & MLX5_IB_QP_UNDERLAY) { 4340 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); 4341 return -EOPNOTSUPP; 4342 } 4343 4344 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4345 if (err) 4346 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 4347 ibqp->qp_num, gid->raw); 4348 4349 return err; 4350 } 4351 4352 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 4353 { 4354 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4355 int err; 4356 u16 uid; 4357 4358 uid = ibqp->pd ? 4359 to_mpd(ibqp->pd)->uid : 0; 4360 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid); 4361 if (err) 4362 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 4363 ibqp->qp_num, gid->raw); 4364 4365 return err; 4366 } 4367 4368 static int init_node_data(struct mlx5_ib_dev *dev) 4369 { 4370 int err; 4371 4372 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 4373 if (err) 4374 return err; 4375 4376 dev->mdev->rev_id = dev->mdev->pdev->revision; 4377 4378 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 4379 } 4380 4381 static ssize_t fw_pages_show(struct device *device, 4382 struct device_attribute *attr, char *buf) 4383 { 4384 struct mlx5_ib_dev *dev = 4385 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4386 4387 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 4388 } 4389 static DEVICE_ATTR_RO(fw_pages); 4390 4391 static ssize_t reg_pages_show(struct device *device, 4392 struct device_attribute *attr, char *buf) 4393 { 4394 struct mlx5_ib_dev *dev = 4395 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4396 4397 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 4398 } 4399 static DEVICE_ATTR_RO(reg_pages); 4400 4401 static ssize_t hca_type_show(struct device *device, 4402 struct device_attribute *attr, char *buf) 4403 { 4404 struct mlx5_ib_dev *dev = 4405 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4406 4407 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 4408 } 4409 static DEVICE_ATTR_RO(hca_type); 4410 4411 static ssize_t hw_rev_show(struct device *device, 4412 struct device_attribute *attr, char *buf) 4413 { 4414 struct mlx5_ib_dev *dev = 4415 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4416 4417 return sprintf(buf, "%x\n", dev->mdev->rev_id); 4418 } 4419 static DEVICE_ATTR_RO(hw_rev); 4420 4421 static ssize_t board_id_show(struct device *device, 4422 struct device_attribute *attr, char *buf) 4423 { 4424 struct mlx5_ib_dev *dev = 4425 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); 4426 4427 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 4428 dev->mdev->board_id); 4429 } 4430 static DEVICE_ATTR_RO(board_id); 4431 4432 static struct attribute *mlx5_class_attributes[] = { 4433 &dev_attr_hw_rev.attr, 4434 &dev_attr_hca_type.attr, 4435 &dev_attr_board_id.attr, 4436 &dev_attr_fw_pages.attr, 4437 &dev_attr_reg_pages.attr, 4438 NULL, 4439 }; 4440 4441 static const struct attribute_group mlx5_attr_group = { 4442 .attrs = mlx5_class_attributes, 4443 }; 4444 4445 static void pkey_change_handler(struct work_struct *work) 4446 { 4447 struct mlx5_ib_port_resources *ports = 4448 container_of(work, struct mlx5_ib_port_resources, 4449 pkey_change_work); 4450 4451 mutex_lock(&ports->devr->mutex); 4452 mlx5_ib_gsi_pkey_change(ports->gsi); 4453 mutex_unlock(&ports->devr->mutex); 4454 } 4455 4456 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 4457 { 4458 struct mlx5_ib_qp *mqp; 4459 struct mlx5_ib_cq *send_mcq, *recv_mcq; 4460 struct mlx5_core_cq *mcq; 4461 struct list_head cq_armed_list; 4462 unsigned long flags_qp; 4463 unsigned long flags_cq; 4464 unsigned long flags; 4465 4466 INIT_LIST_HEAD(&cq_armed_list); 4467 4468 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 4469 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 4470 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 4471 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 4472 if (mqp->sq.tail != mqp->sq.head) { 4473 send_mcq = to_mcq(mqp->ibqp.send_cq); 4474 spin_lock_irqsave(&send_mcq->lock, flags_cq); 4475 if (send_mcq->mcq.comp && 4476 mqp->ibqp.send_cq->comp_handler) { 4477 if (!send_mcq->mcq.reset_notify_added) { 4478 send_mcq->mcq.reset_notify_added = 1; 4479 list_add_tail(&send_mcq->mcq.reset_notify, 4480 &cq_armed_list); 4481 } 4482 } 4483 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 4484 } 4485 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 4486 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 4487 /* no handling is needed for SRQ */ 4488 if (!mqp->ibqp.srq) { 4489 if (mqp->rq.tail != mqp->rq.head) { 4490 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 4491 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 4492 if (recv_mcq->mcq.comp && 4493 mqp->ibqp.recv_cq->comp_handler) { 4494 if (!recv_mcq->mcq.reset_notify_added) { 4495 recv_mcq->mcq.reset_notify_added = 1; 4496 list_add_tail(&recv_mcq->mcq.reset_notify, 4497 &cq_armed_list); 4498 } 4499 } 4500 spin_unlock_irqrestore(&recv_mcq->lock, 4501 flags_cq); 4502 } 4503 } 4504 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 4505 } 4506 /*At that point all inflight post send were put to be executed as of we 4507 * lock/unlock above locks Now need to arm all involved CQs. 4508 */ 4509 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 4510 mcq->comp(mcq, NULL); 4511 } 4512 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 4513 } 4514 4515 static void delay_drop_handler(struct work_struct *work) 4516 { 4517 int err; 4518 struct mlx5_ib_delay_drop *delay_drop = 4519 container_of(work, struct mlx5_ib_delay_drop, 4520 delay_drop_work); 4521 4522 atomic_inc(&delay_drop->events_cnt); 4523 4524 mutex_lock(&delay_drop->lock); 4525 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, 4526 delay_drop->timeout); 4527 if (err) { 4528 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", 4529 delay_drop->timeout); 4530 delay_drop->activate = false; 4531 } 4532 mutex_unlock(&delay_drop->lock); 4533 } 4534 4535 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4536 struct ib_event *ibev) 4537 { 4538 u8 port = (eqe->data.port.port >> 4) & 0xf; 4539 4540 switch (eqe->sub_type) { 4541 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: 4542 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4543 IB_LINK_LAYER_ETHERNET) 4544 schedule_work(&ibdev->delay_drop.delay_drop_work); 4545 break; 4546 default: /* do nothing */ 4547 return; 4548 } 4549 } 4550 4551 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, 4552 struct ib_event *ibev) 4553 { 4554 u8 port = (eqe->data.port.port >> 4) & 0xf; 4555 4556 ibev->element.port_num = port; 4557 4558 switch (eqe->sub_type) { 4559 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 4560 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 4561 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 4562 /* In RoCE, port up/down events are handled in 4563 * mlx5_netdev_event(). 4564 */ 4565 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4566 IB_LINK_LAYER_ETHERNET) 4567 return -EINVAL; 4568 4569 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? 4570 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4571 break; 4572 4573 case MLX5_PORT_CHANGE_SUBTYPE_LID: 4574 ibev->event = IB_EVENT_LID_CHANGE; 4575 break; 4576 4577 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 4578 ibev->event = IB_EVENT_PKEY_CHANGE; 4579 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 4580 break; 4581 4582 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 4583 ibev->event = IB_EVENT_GID_CHANGE; 4584 break; 4585 4586 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 4587 ibev->event = IB_EVENT_CLIENT_REREGISTER; 4588 break; 4589 default: 4590 return -EINVAL; 4591 } 4592 4593 return 0; 4594 } 4595 4596 static void mlx5_ib_handle_event(struct work_struct *_work) 4597 { 4598 struct mlx5_ib_event_work *work = 4599 container_of(_work, struct mlx5_ib_event_work, work); 4600 struct mlx5_ib_dev *ibdev; 4601 struct ib_event ibev; 4602 bool fatal = false; 4603 4604 if (work->is_slave) { 4605 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); 4606 if (!ibdev) 4607 goto out; 4608 } else { 4609 ibdev = work->dev; 4610 } 4611 4612 switch (work->event) { 4613 case MLX5_DEV_EVENT_SYS_ERROR: 4614 ibev.event = IB_EVENT_DEVICE_FATAL; 4615 mlx5_ib_handle_internal_error(ibdev); 4616 ibev.element.port_num = (u8)(unsigned long)work->param; 4617 fatal = true; 4618 break; 4619 case MLX5_EVENT_TYPE_PORT_CHANGE: 4620 if (handle_port_change(ibdev, work->param, &ibev)) 4621 goto out; 4622 break; 4623 case MLX5_EVENT_TYPE_GENERAL_EVENT: 4624 handle_general_event(ibdev, work->param, &ibev); 4625 /* fall through */ 4626 default: 4627 goto out; 4628 } 4629 4630 ibev.device = &ibdev->ib_dev; 4631 4632 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { 4633 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); 4634 goto out; 4635 } 4636 4637 if (ibdev->ib_active) 4638 ib_dispatch_event(&ibev); 4639 4640 if (fatal) 4641 ibdev->ib_active = false; 4642 out: 4643 kfree(work); 4644 } 4645 4646 static int mlx5_ib_event(struct notifier_block *nb, 4647 unsigned long event, void *param) 4648 { 4649 struct mlx5_ib_event_work *work; 4650 4651 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4652 if (!work) 4653 return NOTIFY_DONE; 4654 4655 INIT_WORK(&work->work, mlx5_ib_handle_event); 4656 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); 4657 work->is_slave = false; 4658 work->param = param; 4659 work->event = event; 4660 4661 queue_work(mlx5_ib_event_wq, &work->work); 4662 4663 return NOTIFY_OK; 4664 } 4665 4666 static int mlx5_ib_event_slave_port(struct notifier_block *nb, 4667 unsigned long event, void *param) 4668 { 4669 struct mlx5_ib_event_work *work; 4670 4671 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4672 if (!work) 4673 return NOTIFY_DONE; 4674 4675 INIT_WORK(&work->work, mlx5_ib_handle_event); 4676 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); 4677 work->is_slave = true; 4678 work->param = param; 4679 work->event = event; 4680 queue_work(mlx5_ib_event_wq, &work->work); 4681 4682 return NOTIFY_OK; 4683 } 4684 4685 static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4686 { 4687 struct mlx5_hca_vport_context vport_ctx; 4688 int err; 4689 int port; 4690 4691 for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) { 4692 dev->mdev->port_caps[port - 1].has_smi = false; 4693 if (MLX5_CAP_GEN(dev->mdev, port_type) == 4694 MLX5_CAP_PORT_TYPE_IB) { 4695 if (MLX5_CAP_GEN(dev->mdev, ib_virt)) { 4696 err = mlx5_query_hca_vport_context(dev->mdev, 0, 4697 port, 0, 4698 &vport_ctx); 4699 if (err) { 4700 mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", 4701 port, err); 4702 return err; 4703 } 4704 dev->mdev->port_caps[port - 1].has_smi = 4705 vport_ctx.has_smi; 4706 } else { 4707 dev->mdev->port_caps[port - 1].has_smi = true; 4708 } 4709 } 4710 } 4711 return 0; 4712 } 4713 4714 static void get_ext_port_caps(struct mlx5_ib_dev *dev) 4715 { 4716 int port; 4717 4718 for (port = 1; port <= dev->num_ports; port++) 4719 mlx5_query_ext_port_caps(dev, port); 4720 } 4721 4722 static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4723 { 4724 struct ib_device_attr *dprops = NULL; 4725 struct ib_port_attr *pprops = NULL; 4726 int err = -ENOMEM; 4727 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 4728 4729 pprops = kzalloc(sizeof(*pprops), GFP_KERNEL); 4730 if (!pprops) 4731 goto out; 4732 4733 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 4734 if (!dprops) 4735 goto out; 4736 4737 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 4738 if (err) { 4739 mlx5_ib_warn(dev, "query_device failed %d\n", err); 4740 goto out; 4741 } 4742 4743 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 4744 if (err) { 4745 mlx5_ib_warn(dev, "query_port %d failed %d\n", 4746 port, err); 4747 goto out; 4748 } 4749 4750 dev->mdev->port_caps[port - 1].pkey_table_len = 4751 dprops->max_pkeys; 4752 dev->mdev->port_caps[port - 1].gid_table_len = 4753 pprops->gid_tbl_len; 4754 mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n", 4755 port, dprops->max_pkeys, pprops->gid_tbl_len); 4756 4757 out: 4758 kfree(pprops); 4759 kfree(dprops); 4760 4761 return err; 4762 } 4763 4764 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port) 4765 { 4766 /* For representors use port 1, is this is the only native 4767 * port 4768 */ 4769 if (dev->is_rep) 4770 return __get_port_caps(dev, 1); 4771 return __get_port_caps(dev, port); 4772 } 4773 4774 static void destroy_umrc_res(struct mlx5_ib_dev *dev) 4775 { 4776 int err; 4777 4778 err = mlx5_mr_cache_cleanup(dev); 4779 if (err) 4780 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 4781 4782 if (dev->umrc.qp) 4783 mlx5_ib_destroy_qp(dev->umrc.qp, NULL); 4784 if (dev->umrc.cq) 4785 ib_free_cq(dev->umrc.cq); 4786 if (dev->umrc.pd) 4787 ib_dealloc_pd(dev->umrc.pd); 4788 } 4789 4790 enum { 4791 MAX_UMR_WR = 128, 4792 }; 4793 4794 static int create_umr_res(struct mlx5_ib_dev *dev) 4795 { 4796 struct ib_qp_init_attr *init_attr = NULL; 4797 struct ib_qp_attr *attr = NULL; 4798 struct ib_pd *pd; 4799 struct ib_cq *cq; 4800 struct ib_qp *qp; 4801 int ret; 4802 4803 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 4804 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 4805 if (!attr || !init_attr) { 4806 ret = -ENOMEM; 4807 goto error_0; 4808 } 4809 4810 pd = ib_alloc_pd(&dev->ib_dev, 0); 4811 if (IS_ERR(pd)) { 4812 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 4813 ret = PTR_ERR(pd); 4814 goto error_0; 4815 } 4816 4817 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 4818 if (IS_ERR(cq)) { 4819 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 4820 ret = PTR_ERR(cq); 4821 goto error_2; 4822 } 4823 4824 init_attr->send_cq = cq; 4825 init_attr->recv_cq = cq; 4826 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 4827 init_attr->cap.max_send_wr = MAX_UMR_WR; 4828 init_attr->cap.max_send_sge = 1; 4829 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 4830 init_attr->port_num = 1; 4831 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 4832 if (IS_ERR(qp)) { 4833 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 4834 ret = PTR_ERR(qp); 4835 goto error_3; 4836 } 4837 qp->device = &dev->ib_dev; 4838 qp->real_qp = qp; 4839 qp->uobject = NULL; 4840 qp->qp_type = MLX5_IB_QPT_REG_UMR; 4841 qp->send_cq = init_attr->send_cq; 4842 qp->recv_cq = init_attr->recv_cq; 4843 4844 attr->qp_state = IB_QPS_INIT; 4845 attr->port_num = 1; 4846 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 4847 IB_QP_PORT, NULL); 4848 if (ret) { 4849 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 4850 goto error_4; 4851 } 4852 4853 memset(attr, 0, sizeof(*attr)); 4854 attr->qp_state = IB_QPS_RTR; 4855 attr->path_mtu = IB_MTU_256; 4856 4857 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4858 if (ret) { 4859 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 4860 goto error_4; 4861 } 4862 4863 memset(attr, 0, sizeof(*attr)); 4864 attr->qp_state = IB_QPS_RTS; 4865 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 4866 if (ret) { 4867 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 4868 goto error_4; 4869 } 4870 4871 dev->umrc.qp = qp; 4872 dev->umrc.cq = cq; 4873 dev->umrc.pd = pd; 4874 4875 sema_init(&dev->umrc.sem, MAX_UMR_WR); 4876 ret = mlx5_mr_cache_init(dev); 4877 if (ret) { 4878 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 4879 goto error_4; 4880 } 4881 4882 kfree(attr); 4883 kfree(init_attr); 4884 4885 return 0; 4886 4887 error_4: 4888 mlx5_ib_destroy_qp(qp, NULL); 4889 dev->umrc.qp = NULL; 4890 4891 error_3: 4892 ib_free_cq(cq); 4893 dev->umrc.cq = NULL; 4894 4895 error_2: 4896 ib_dealloc_pd(pd); 4897 dev->umrc.pd = NULL; 4898 4899 error_0: 4900 kfree(attr); 4901 kfree(init_attr); 4902 return ret; 4903 } 4904 4905 static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 4906 { 4907 switch (umr_fence_cap) { 4908 case MLX5_CAP_UMR_FENCE_NONE: 4909 return MLX5_FENCE_MODE_NONE; 4910 case MLX5_CAP_UMR_FENCE_SMALL: 4911 return MLX5_FENCE_MODE_INITIATOR_SMALL; 4912 default: 4913 return MLX5_FENCE_MODE_STRONG_ORDERING; 4914 } 4915 } 4916 4917 static int create_dev_resources(struct mlx5_ib_resources *devr) 4918 { 4919 struct ib_srq_init_attr attr; 4920 struct mlx5_ib_dev *dev; 4921 struct ib_device *ibdev; 4922 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 4923 int port; 4924 int ret = 0; 4925 4926 dev = container_of(devr, struct mlx5_ib_dev, devr); 4927 ibdev = &dev->ib_dev; 4928 4929 mutex_init(&devr->mutex); 4930 4931 devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); 4932 if (!devr->p0) 4933 return -ENOMEM; 4934 4935 devr->p0->device = ibdev; 4936 devr->p0->uobject = NULL; 4937 atomic_set(&devr->p0->usecnt, 0); 4938 4939 ret = mlx5_ib_alloc_pd(devr->p0, NULL); 4940 if (ret) 4941 goto error0; 4942 4943 devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); 4944 if (!devr->c0) { 4945 ret = -ENOMEM; 4946 goto error1; 4947 } 4948 4949 devr->c0->device = &dev->ib_dev; 4950 atomic_set(&devr->c0->usecnt, 0); 4951 4952 ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); 4953 if (ret) 4954 goto err_create_cq; 4955 4956 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 4957 if (IS_ERR(devr->x0)) { 4958 ret = PTR_ERR(devr->x0); 4959 goto error2; 4960 } 4961 devr->x0->device = &dev->ib_dev; 4962 devr->x0->inode = NULL; 4963 atomic_set(&devr->x0->usecnt, 0); 4964 mutex_init(&devr->x0->tgt_qp_mutex); 4965 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 4966 4967 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); 4968 if (IS_ERR(devr->x1)) { 4969 ret = PTR_ERR(devr->x1); 4970 goto error3; 4971 } 4972 devr->x1->device = &dev->ib_dev; 4973 devr->x1->inode = NULL; 4974 atomic_set(&devr->x1->usecnt, 0); 4975 mutex_init(&devr->x1->tgt_qp_mutex); 4976 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 4977 4978 memset(&attr, 0, sizeof(attr)); 4979 attr.attr.max_sge = 1; 4980 attr.attr.max_wr = 1; 4981 attr.srq_type = IB_SRQT_XRC; 4982 attr.ext.cq = devr->c0; 4983 attr.ext.xrc.xrcd = devr->x0; 4984 4985 devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); 4986 if (!devr->s0) { 4987 ret = -ENOMEM; 4988 goto error4; 4989 } 4990 4991 devr->s0->device = &dev->ib_dev; 4992 devr->s0->pd = devr->p0; 4993 devr->s0->srq_type = IB_SRQT_XRC; 4994 devr->s0->ext.xrc.xrcd = devr->x0; 4995 devr->s0->ext.cq = devr->c0; 4996 ret = mlx5_ib_create_srq(devr->s0, &attr, NULL); 4997 if (ret) 4998 goto err_create; 4999 5000 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 5001 atomic_inc(&devr->s0->ext.cq->usecnt); 5002 atomic_inc(&devr->p0->usecnt); 5003 atomic_set(&devr->s0->usecnt, 0); 5004 5005 memset(&attr, 0, sizeof(attr)); 5006 attr.attr.max_sge = 1; 5007 attr.attr.max_wr = 1; 5008 attr.srq_type = IB_SRQT_BASIC; 5009 devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq); 5010 if (!devr->s1) { 5011 ret = -ENOMEM; 5012 goto error5; 5013 } 5014 5015 devr->s1->device = &dev->ib_dev; 5016 devr->s1->pd = devr->p0; 5017 devr->s1->srq_type = IB_SRQT_BASIC; 5018 devr->s1->ext.cq = devr->c0; 5019 5020 ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); 5021 if (ret) 5022 goto error6; 5023 5024 atomic_inc(&devr->p0->usecnt); 5025 atomic_set(&devr->s1->usecnt, 0); 5026 5027 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 5028 INIT_WORK(&devr->ports[port].pkey_change_work, 5029 pkey_change_handler); 5030 devr->ports[port].devr = devr; 5031 } 5032 5033 return 0; 5034 5035 error6: 5036 kfree(devr->s1); 5037 error5: 5038 mlx5_ib_destroy_srq(devr->s0, NULL); 5039 err_create: 5040 kfree(devr->s0); 5041 error4: 5042 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5043 error3: 5044 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5045 error2: 5046 mlx5_ib_destroy_cq(devr->c0, NULL); 5047 err_create_cq: 5048 kfree(devr->c0); 5049 error1: 5050 mlx5_ib_dealloc_pd(devr->p0, NULL); 5051 error0: 5052 kfree(devr->p0); 5053 return ret; 5054 } 5055 5056 static void destroy_dev_resources(struct mlx5_ib_resources *devr) 5057 { 5058 int port; 5059 5060 mlx5_ib_destroy_srq(devr->s1, NULL); 5061 kfree(devr->s1); 5062 mlx5_ib_destroy_srq(devr->s0, NULL); 5063 kfree(devr->s0); 5064 mlx5_ib_dealloc_xrcd(devr->x0, NULL); 5065 mlx5_ib_dealloc_xrcd(devr->x1, NULL); 5066 mlx5_ib_destroy_cq(devr->c0, NULL); 5067 kfree(devr->c0); 5068 mlx5_ib_dealloc_pd(devr->p0, NULL); 5069 kfree(devr->p0); 5070 5071 /* Make sure no change P_Key work items are still executing */ 5072 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) 5073 cancel_work_sync(&devr->ports[port].pkey_change_work); 5074 } 5075 5076 static u32 get_core_cap_flags(struct ib_device *ibdev, 5077 struct mlx5_hca_vport_context *rep) 5078 { 5079 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5080 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 5081 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 5082 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 5083 bool raw_support = !mlx5_core_mp_enabled(dev->mdev); 5084 u32 ret = 0; 5085 5086 if (rep->grh_required) 5087 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; 5088 5089 if (ll == IB_LINK_LAYER_INFINIBAND) 5090 return ret | RDMA_CORE_PORT_IBA_IB; 5091 5092 if (raw_support) 5093 ret |= RDMA_CORE_PORT_RAW_PACKET; 5094 5095 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 5096 return ret; 5097 5098 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 5099 return ret; 5100 5101 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 5102 ret |= RDMA_CORE_PORT_IBA_ROCE; 5103 5104 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 5105 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 5106 5107 return ret; 5108 } 5109 5110 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 5111 struct ib_port_immutable *immutable) 5112 { 5113 struct ib_port_attr attr; 5114 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5115 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); 5116 struct mlx5_hca_vport_context rep = {0}; 5117 int err; 5118 5119 err = ib_query_port(ibdev, port_num, &attr); 5120 if (err) 5121 return err; 5122 5123 if (ll == IB_LINK_LAYER_INFINIBAND) { 5124 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, 5125 &rep); 5126 if (err) 5127 return err; 5128 } 5129 5130 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5131 immutable->gid_tbl_len = attr.gid_tbl_len; 5132 immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); 5133 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce)) 5134 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 5135 5136 return 0; 5137 } 5138 5139 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num, 5140 struct ib_port_immutable *immutable) 5141 { 5142 struct ib_port_attr attr; 5143 int err; 5144 5145 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5146 5147 err = ib_query_port(ibdev, port_num, &attr); 5148 if (err) 5149 return err; 5150 5151 immutable->pkey_tbl_len = attr.pkey_tbl_len; 5152 immutable->gid_tbl_len = attr.gid_tbl_len; 5153 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 5154 5155 return 0; 5156 } 5157 5158 static void get_dev_fw_str(struct ib_device *ibdev, char *str) 5159 { 5160 struct mlx5_ib_dev *dev = 5161 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 5162 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", 5163 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), 5164 fw_rev_sub(dev->mdev)); 5165 } 5166 5167 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) 5168 { 5169 struct mlx5_core_dev *mdev = dev->mdev; 5170 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, 5171 MLX5_FLOW_NAMESPACE_LAG); 5172 struct mlx5_flow_table *ft; 5173 int err; 5174 5175 if (!ns || !mlx5_lag_is_roce(mdev)) 5176 return 0; 5177 5178 err = mlx5_cmd_create_vport_lag(mdev); 5179 if (err) 5180 return err; 5181 5182 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); 5183 if (IS_ERR(ft)) { 5184 err = PTR_ERR(ft); 5185 goto err_destroy_vport_lag; 5186 } 5187 5188 dev->flow_db->lag_demux_ft = ft; 5189 dev->lag_active = true; 5190 return 0; 5191 5192 err_destroy_vport_lag: 5193 mlx5_cmd_destroy_vport_lag(mdev); 5194 return err; 5195 } 5196 5197 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) 5198 { 5199 struct mlx5_core_dev *mdev = dev->mdev; 5200 5201 if (dev->lag_active) { 5202 dev->lag_active = false; 5203 5204 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); 5205 dev->flow_db->lag_demux_ft = NULL; 5206 5207 mlx5_cmd_destroy_vport_lag(mdev); 5208 } 5209 } 5210 5211 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5212 { 5213 int err; 5214 5215 dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; 5216 err = register_netdevice_notifier(&dev->port[port_num].roce.nb); 5217 if (err) { 5218 dev->port[port_num].roce.nb.notifier_call = NULL; 5219 return err; 5220 } 5221 5222 return 0; 5223 } 5224 5225 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) 5226 { 5227 if (dev->port[port_num].roce.nb.notifier_call) { 5228 unregister_netdevice_notifier(&dev->port[port_num].roce.nb); 5229 dev->port[port_num].roce.nb.notifier_call = NULL; 5230 } 5231 } 5232 5233 static int mlx5_enable_eth(struct mlx5_ib_dev *dev) 5234 { 5235 int err; 5236 5237 if (MLX5_CAP_GEN(dev->mdev, roce)) { 5238 err = mlx5_nic_vport_enable_roce(dev->mdev); 5239 if (err) 5240 return err; 5241 } 5242 5243 err = mlx5_eth_lag_init(dev); 5244 if (err) 5245 goto err_disable_roce; 5246 5247 return 0; 5248 5249 err_disable_roce: 5250 if (MLX5_CAP_GEN(dev->mdev, roce)) 5251 mlx5_nic_vport_disable_roce(dev->mdev); 5252 5253 return err; 5254 } 5255 5256 static void mlx5_disable_eth(struct mlx5_ib_dev *dev) 5257 { 5258 mlx5_eth_lag_cleanup(dev); 5259 if (MLX5_CAP_GEN(dev->mdev, roce)) 5260 mlx5_nic_vport_disable_roce(dev->mdev); 5261 } 5262 5263 struct mlx5_ib_counter { 5264 const char *name; 5265 size_t offset; 5266 }; 5267 5268 #define INIT_Q_COUNTER(_name) \ 5269 { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)} 5270 5271 static const struct mlx5_ib_counter basic_q_cnts[] = { 5272 INIT_Q_COUNTER(rx_write_requests), 5273 INIT_Q_COUNTER(rx_read_requests), 5274 INIT_Q_COUNTER(rx_atomic_requests), 5275 INIT_Q_COUNTER(out_of_buffer), 5276 }; 5277 5278 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = { 5279 INIT_Q_COUNTER(out_of_sequence), 5280 }; 5281 5282 static const struct mlx5_ib_counter retrans_q_cnts[] = { 5283 INIT_Q_COUNTER(duplicate_request), 5284 INIT_Q_COUNTER(rnr_nak_retry_err), 5285 INIT_Q_COUNTER(packet_seq_err), 5286 INIT_Q_COUNTER(implied_nak_seq_err), 5287 INIT_Q_COUNTER(local_ack_timeout_err), 5288 }; 5289 5290 #define INIT_CONG_COUNTER(_name) \ 5291 { .name = #_name, .offset = \ 5292 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)} 5293 5294 static const struct mlx5_ib_counter cong_cnts[] = { 5295 INIT_CONG_COUNTER(rp_cnp_ignored), 5296 INIT_CONG_COUNTER(rp_cnp_handled), 5297 INIT_CONG_COUNTER(np_ecn_marked_roce_packets), 5298 INIT_CONG_COUNTER(np_cnp_sent), 5299 }; 5300 5301 static const struct mlx5_ib_counter extended_err_cnts[] = { 5302 INIT_Q_COUNTER(resp_local_length_error), 5303 INIT_Q_COUNTER(resp_cqe_error), 5304 INIT_Q_COUNTER(req_cqe_error), 5305 INIT_Q_COUNTER(req_remote_invalid_request), 5306 INIT_Q_COUNTER(req_remote_access_errors), 5307 INIT_Q_COUNTER(resp_remote_access_errors), 5308 INIT_Q_COUNTER(resp_cqe_flush_error), 5309 INIT_Q_COUNTER(req_cqe_flush_error), 5310 }; 5311 5312 #define INIT_EXT_PPCNT_COUNTER(_name) \ 5313 { .name = #_name, .offset = \ 5314 MLX5_BYTE_OFF(ppcnt_reg, \ 5315 counter_set.eth_extended_cntrs_grp_data_layout._name##_high)} 5316 5317 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { 5318 INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), 5319 }; 5320 5321 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) 5322 { 5323 int i; 5324 5325 for (i = 0; i < dev->num_ports; i++) { 5326 if (dev->port[i].cnts.set_id_valid) 5327 mlx5_core_dealloc_q_counter(dev->mdev, 5328 dev->port[i].cnts.set_id); 5329 kfree(dev->port[i].cnts.names); 5330 kfree(dev->port[i].cnts.offsets); 5331 } 5332 } 5333 5334 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, 5335 struct mlx5_ib_counters *cnts) 5336 { 5337 u32 num_counters; 5338 5339 num_counters = ARRAY_SIZE(basic_q_cnts); 5340 5341 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) 5342 num_counters += ARRAY_SIZE(out_of_seq_q_cnts); 5343 5344 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) 5345 num_counters += ARRAY_SIZE(retrans_q_cnts); 5346 5347 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) 5348 num_counters += ARRAY_SIZE(extended_err_cnts); 5349 5350 cnts->num_q_counters = num_counters; 5351 5352 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5353 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); 5354 num_counters += ARRAY_SIZE(cong_cnts); 5355 } 5356 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5357 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); 5358 num_counters += ARRAY_SIZE(ext_ppcnt_cnts); 5359 } 5360 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL); 5361 if (!cnts->names) 5362 return -ENOMEM; 5363 5364 cnts->offsets = kcalloc(num_counters, 5365 sizeof(cnts->offsets), GFP_KERNEL); 5366 if (!cnts->offsets) 5367 goto err_names; 5368 5369 return 0; 5370 5371 err_names: 5372 kfree(cnts->names); 5373 cnts->names = NULL; 5374 return -ENOMEM; 5375 } 5376 5377 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, 5378 const char **names, 5379 size_t *offsets) 5380 { 5381 int i; 5382 int j = 0; 5383 5384 for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { 5385 names[j] = basic_q_cnts[i].name; 5386 offsets[j] = basic_q_cnts[i].offset; 5387 } 5388 5389 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { 5390 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { 5391 names[j] = out_of_seq_q_cnts[i].name; 5392 offsets[j] = out_of_seq_q_cnts[i].offset; 5393 } 5394 } 5395 5396 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 5397 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { 5398 names[j] = retrans_q_cnts[i].name; 5399 offsets[j] = retrans_q_cnts[i].offset; 5400 } 5401 } 5402 5403 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { 5404 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { 5405 names[j] = extended_err_cnts[i].name; 5406 offsets[j] = extended_err_cnts[i].offset; 5407 } 5408 } 5409 5410 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5411 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { 5412 names[j] = cong_cnts[i].name; 5413 offsets[j] = cong_cnts[i].offset; 5414 } 5415 } 5416 5417 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5418 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) { 5419 names[j] = ext_ppcnt_cnts[i].name; 5420 offsets[j] = ext_ppcnt_cnts[i].offset; 5421 } 5422 } 5423 } 5424 5425 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) 5426 { 5427 int err = 0; 5428 int i; 5429 bool is_shared; 5430 5431 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; 5432 5433 for (i = 0; i < dev->num_ports; i++) { 5434 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); 5435 if (err) 5436 goto err_alloc; 5437 5438 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, 5439 dev->port[i].cnts.offsets); 5440 5441 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5442 &dev->port[i].cnts.set_id, 5443 is_shared ? 5444 MLX5_SHARED_RESOURCE_UID : 0); 5445 if (err) { 5446 mlx5_ib_warn(dev, 5447 "couldn't allocate queue counter for port %d, err %d\n", 5448 i + 1, err); 5449 goto err_alloc; 5450 } 5451 dev->port[i].cnts.set_id_valid = true; 5452 } 5453 5454 return 0; 5455 5456 err_alloc: 5457 mlx5_ib_dealloc_counters(dev); 5458 return err; 5459 } 5460 5461 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 5462 u8 port_num) 5463 { 5464 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5465 struct mlx5_ib_port *port = &dev->port[port_num - 1]; 5466 5467 /* We support only per port stats */ 5468 if (port_num == 0) 5469 return NULL; 5470 5471 return rdma_alloc_hw_stats_struct(port->cnts.names, 5472 port->cnts.num_q_counters + 5473 port->cnts.num_cong_counters + 5474 port->cnts.num_ext_ppcnt_counters, 5475 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5476 } 5477 5478 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, 5479 struct mlx5_ib_port *port, 5480 struct rdma_hw_stats *stats, 5481 u16 set_id) 5482 { 5483 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 5484 void *out; 5485 __be32 val; 5486 int ret, i; 5487 5488 out = kvzalloc(outlen, GFP_KERNEL); 5489 if (!out) 5490 return -ENOMEM; 5491 5492 ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen); 5493 if (ret) 5494 goto free; 5495 5496 for (i = 0; i < port->cnts.num_q_counters; i++) { 5497 val = *(__be32 *)(out + port->cnts.offsets[i]); 5498 stats->value[i] = (u64)be32_to_cpu(val); 5499 } 5500 5501 free: 5502 kvfree(out); 5503 return ret; 5504 } 5505 5506 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, 5507 struct mlx5_ib_port *port, 5508 struct rdma_hw_stats *stats) 5509 { 5510 int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters; 5511 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 5512 int ret, i; 5513 void *out; 5514 5515 out = kvzalloc(sz, GFP_KERNEL); 5516 if (!out) 5517 return -ENOMEM; 5518 5519 ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out); 5520 if (ret) 5521 goto free; 5522 5523 for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) { 5524 stats->value[i + offset] = 5525 be64_to_cpup((__be64 *)(out + 5526 port->cnts.offsets[i + offset])); 5527 } 5528 5529 free: 5530 kvfree(out); 5531 return ret; 5532 } 5533 5534 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 5535 struct rdma_hw_stats *stats, 5536 u8 port_num, int index) 5537 { 5538 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5539 struct mlx5_ib_port *port = &dev->port[port_num - 1]; 5540 struct mlx5_core_dev *mdev; 5541 int ret, num_counters; 5542 u8 mdev_port_num; 5543 5544 if (!stats) 5545 return -EINVAL; 5546 5547 num_counters = port->cnts.num_q_counters + 5548 port->cnts.num_cong_counters + 5549 port->cnts.num_ext_ppcnt_counters; 5550 5551 /* q_counters are per IB device, query the master mdev */ 5552 ret = mlx5_ib_query_q_counters(dev->mdev, port, stats, 5553 port->cnts.set_id); 5554 if (ret) 5555 return ret; 5556 5557 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { 5558 ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats); 5559 if (ret) 5560 return ret; 5561 } 5562 5563 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { 5564 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, 5565 &mdev_port_num); 5566 if (!mdev) { 5567 /* If port is not affiliated yet, its in down state 5568 * which doesn't have any counters yet, so it would be 5569 * zero. So no need to read from the HCA. 5570 */ 5571 goto done; 5572 } 5573 ret = mlx5_lag_query_cong_counters(dev->mdev, 5574 stats->value + 5575 port->cnts.num_q_counters, 5576 port->cnts.num_cong_counters, 5577 port->cnts.offsets + 5578 port->cnts.num_q_counters); 5579 5580 mlx5_ib_put_native_port_mdev(dev, port_num); 5581 if (ret) 5582 return ret; 5583 } 5584 5585 done: 5586 return num_counters; 5587 } 5588 5589 static struct rdma_hw_stats * 5590 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) 5591 { 5592 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5593 struct mlx5_ib_port *port = &dev->port[counter->port - 1]; 5594 5595 /* Q counters are in the beginning of all counters */ 5596 return rdma_alloc_hw_stats_struct(port->cnts.names, 5597 port->cnts.num_q_counters, 5598 RDMA_HW_STATS_DEFAULT_LIFESPAN); 5599 } 5600 5601 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) 5602 { 5603 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5604 struct mlx5_ib_port *port = &dev->port[counter->port - 1]; 5605 5606 return mlx5_ib_query_q_counters(dev->mdev, port, 5607 counter->stats, counter->id); 5608 } 5609 5610 static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, 5611 struct ib_qp *qp) 5612 { 5613 struct mlx5_ib_dev *dev = to_mdev(qp->device); 5614 u16 cnt_set_id = 0; 5615 int err; 5616 5617 if (!counter->id) { 5618 err = mlx5_cmd_alloc_q_counter(dev->mdev, 5619 &cnt_set_id, 5620 MLX5_SHARED_RESOURCE_UID); 5621 if (err) 5622 return err; 5623 counter->id = cnt_set_id; 5624 } 5625 5626 err = mlx5_ib_qp_set_counter(qp, counter); 5627 if (err) 5628 goto fail_set_counter; 5629 5630 return 0; 5631 5632 fail_set_counter: 5633 mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id); 5634 counter->id = 0; 5635 5636 return err; 5637 } 5638 5639 static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) 5640 { 5641 return mlx5_ib_qp_set_counter(qp, NULL); 5642 } 5643 5644 static int mlx5_ib_counter_dealloc(struct rdma_counter *counter) 5645 { 5646 struct mlx5_ib_dev *dev = to_mdev(counter->device); 5647 5648 return mlx5_core_dealloc_q_counter(dev->mdev, counter->id); 5649 } 5650 5651 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num, 5652 enum rdma_netdev_t type, 5653 struct rdma_netdev_alloc_params *params) 5654 { 5655 if (type != RDMA_NETDEV_IPOIB) 5656 return -EOPNOTSUPP; 5657 5658 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); 5659 } 5660 5661 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev) 5662 { 5663 if (!dev->delay_drop.dbg) 5664 return; 5665 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs); 5666 kfree(dev->delay_drop.dbg); 5667 dev->delay_drop.dbg = NULL; 5668 } 5669 5670 static void cancel_delay_drop(struct mlx5_ib_dev *dev) 5671 { 5672 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5673 return; 5674 5675 cancel_work_sync(&dev->delay_drop.delay_drop_work); 5676 delay_drop_debugfs_cleanup(dev); 5677 } 5678 5679 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, 5680 size_t count, loff_t *pos) 5681 { 5682 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5683 char lbuf[20]; 5684 int len; 5685 5686 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); 5687 return simple_read_from_buffer(buf, count, pos, lbuf, len); 5688 } 5689 5690 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, 5691 size_t count, loff_t *pos) 5692 { 5693 struct mlx5_ib_delay_drop *delay_drop = filp->private_data; 5694 u32 timeout; 5695 u32 var; 5696 5697 if (kstrtouint_from_user(buf, count, 0, &var)) 5698 return -EFAULT; 5699 5700 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 5701 1000); 5702 if (timeout != var) 5703 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", 5704 timeout); 5705 5706 delay_drop->timeout = timeout; 5707 5708 return count; 5709 } 5710 5711 static const struct file_operations fops_delay_drop_timeout = { 5712 .owner = THIS_MODULE, 5713 .open = simple_open, 5714 .write = delay_drop_timeout_write, 5715 .read = delay_drop_timeout_read, 5716 }; 5717 5718 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) 5719 { 5720 struct mlx5_ib_dbg_delay_drop *dbg; 5721 5722 if (!mlx5_debugfs_root) 5723 return 0; 5724 5725 dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); 5726 if (!dbg) 5727 return -ENOMEM; 5728 5729 dev->delay_drop.dbg = dbg; 5730 5731 dbg->dir_debugfs = 5732 debugfs_create_dir("delay_drop", 5733 dev->mdev->priv.dbg_root); 5734 if (!dbg->dir_debugfs) 5735 goto out_debugfs; 5736 5737 dbg->events_cnt_debugfs = 5738 debugfs_create_atomic_t("num_timeout_events", 0400, 5739 dbg->dir_debugfs, 5740 &dev->delay_drop.events_cnt); 5741 if (!dbg->events_cnt_debugfs) 5742 goto out_debugfs; 5743 5744 dbg->rqs_cnt_debugfs = 5745 debugfs_create_atomic_t("num_rqs", 0400, 5746 dbg->dir_debugfs, 5747 &dev->delay_drop.rqs_cnt); 5748 if (!dbg->rqs_cnt_debugfs) 5749 goto out_debugfs; 5750 5751 dbg->timeout_debugfs = 5752 debugfs_create_file("timeout", 0600, 5753 dbg->dir_debugfs, 5754 &dev->delay_drop, 5755 &fops_delay_drop_timeout); 5756 if (!dbg->timeout_debugfs) 5757 goto out_debugfs; 5758 5759 return 0; 5760 5761 out_debugfs: 5762 delay_drop_debugfs_cleanup(dev); 5763 return -ENOMEM; 5764 } 5765 5766 static void init_delay_drop(struct mlx5_ib_dev *dev) 5767 { 5768 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) 5769 return; 5770 5771 mutex_init(&dev->delay_drop.lock); 5772 dev->delay_drop.dev = dev; 5773 dev->delay_drop.activate = false; 5774 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; 5775 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5776 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5777 atomic_set(&dev->delay_drop.events_cnt, 0); 5778 5779 if (delay_drop_debugfs_init(dev)) 5780 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); 5781 } 5782 5783 /* The mlx5_ib_multiport_mutex should be held when calling this function */ 5784 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5785 struct mlx5_ib_multiport_info *mpi) 5786 { 5787 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5788 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5789 int comps; 5790 int err; 5791 int i; 5792 5793 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5794 5795 spin_lock(&port->mp.mpi_lock); 5796 if (!mpi->ibdev) { 5797 spin_unlock(&port->mp.mpi_lock); 5798 return; 5799 } 5800 5801 if (mpi->mdev_events.notifier_call) 5802 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); 5803 mpi->mdev_events.notifier_call = NULL; 5804 5805 mpi->ibdev = NULL; 5806 5807 spin_unlock(&port->mp.mpi_lock); 5808 mlx5_remove_netdev_notifier(ibdev, port_num); 5809 spin_lock(&port->mp.mpi_lock); 5810 5811 comps = mpi->mdev_refcnt; 5812 if (comps) { 5813 mpi->unaffiliate = true; 5814 init_completion(&mpi->unref_comp); 5815 spin_unlock(&port->mp.mpi_lock); 5816 5817 for (i = 0; i < comps; i++) 5818 wait_for_completion(&mpi->unref_comp); 5819 5820 spin_lock(&port->mp.mpi_lock); 5821 mpi->unaffiliate = false; 5822 } 5823 5824 port->mp.mpi = NULL; 5825 5826 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 5827 5828 spin_unlock(&port->mp.mpi_lock); 5829 5830 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); 5831 5832 mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1); 5833 /* Log an error, still needed to cleanup the pointers and add 5834 * it back to the list. 5835 */ 5836 if (err) 5837 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", 5838 port_num + 1); 5839 5840 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; 5841 } 5842 5843 /* The mlx5_ib_multiport_mutex should be held when calling this function */ 5844 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, 5845 struct mlx5_ib_multiport_info *mpi) 5846 { 5847 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5848 int err; 5849 5850 spin_lock(&ibdev->port[port_num].mp.mpi_lock); 5851 if (ibdev->port[port_num].mp.mpi) { 5852 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5853 port_num + 1); 5854 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5855 return false; 5856 } 5857 5858 ibdev->port[port_num].mp.mpi = mpi; 5859 mpi->ibdev = ibdev; 5860 mpi->mdev_events.notifier_call = NULL; 5861 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5862 5863 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5864 if (err) 5865 goto unbind; 5866 5867 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5868 if (err) 5869 goto unbind; 5870 5871 err = mlx5_add_netdev_notifier(ibdev, port_num); 5872 if (err) { 5873 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5874 port_num + 1); 5875 goto unbind; 5876 } 5877 5878 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; 5879 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); 5880 5881 mlx5_ib_init_cong_debugfs(ibdev, port_num); 5882 5883 return true; 5884 5885 unbind: 5886 mlx5_ib_unbind_slave_port(ibdev, mpi); 5887 return false; 5888 } 5889 5890 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) 5891 { 5892 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5893 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5894 port_num + 1); 5895 struct mlx5_ib_multiport_info *mpi; 5896 int err; 5897 int i; 5898 5899 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5900 return 0; 5901 5902 err = mlx5_query_nic_vport_system_image_guid(dev->mdev, 5903 &dev->sys_image_guid); 5904 if (err) 5905 return err; 5906 5907 err = mlx5_nic_vport_enable_roce(dev->mdev); 5908 if (err) 5909 return err; 5910 5911 mutex_lock(&mlx5_ib_multiport_mutex); 5912 for (i = 0; i < dev->num_ports; i++) { 5913 bool bound = false; 5914 5915 /* build a stub multiport info struct for the native port. */ 5916 if (i == port_num) { 5917 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 5918 if (!mpi) { 5919 mutex_unlock(&mlx5_ib_multiport_mutex); 5920 mlx5_nic_vport_disable_roce(dev->mdev); 5921 return -ENOMEM; 5922 } 5923 5924 mpi->is_master = true; 5925 mpi->mdev = dev->mdev; 5926 mpi->sys_image_guid = dev->sys_image_guid; 5927 dev->port[i].mp.mpi = mpi; 5928 mpi->ibdev = dev; 5929 mpi = NULL; 5930 continue; 5931 } 5932 5933 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, 5934 list) { 5935 if (dev->sys_image_guid == mpi->sys_image_guid && 5936 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { 5937 bound = mlx5_ib_bind_slave_port(dev, mpi); 5938 } 5939 5940 if (bound) { 5941 dev_dbg(mpi->mdev->device, 5942 "removing port from unaffiliated list.\n"); 5943 mlx5_ib_dbg(dev, "port %d bound\n", i + 1); 5944 list_del(&mpi->list); 5945 break; 5946 } 5947 } 5948 if (!bound) { 5949 get_port_caps(dev, i + 1); 5950 mlx5_ib_dbg(dev, "no free port found for port %d\n", 5951 i + 1); 5952 } 5953 } 5954 5955 list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); 5956 mutex_unlock(&mlx5_ib_multiport_mutex); 5957 return err; 5958 } 5959 5960 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) 5961 { 5962 int port_num = mlx5_core_native_port_num(dev->mdev) - 1; 5963 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 5964 port_num + 1); 5965 int i; 5966 5967 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) 5968 return; 5969 5970 mutex_lock(&mlx5_ib_multiport_mutex); 5971 for (i = 0; i < dev->num_ports; i++) { 5972 if (dev->port[i].mp.mpi) { 5973 /* Destroy the native port stub */ 5974 if (i == port_num) { 5975 kfree(dev->port[i].mp.mpi); 5976 dev->port[i].mp.mpi = NULL; 5977 } else { 5978 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1); 5979 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi); 5980 } 5981 } 5982 } 5983 5984 mlx5_ib_dbg(dev, "removing from devlist\n"); 5985 list_del(&dev->ib_dev_list); 5986 mutex_unlock(&mlx5_ib_multiport_mutex); 5987 5988 mlx5_nic_vport_disable_roce(dev->mdev); 5989 } 5990 5991 ADD_UVERBS_ATTRIBUTES_SIMPLE( 5992 mlx5_ib_dm, 5993 UVERBS_OBJECT_DM, 5994 UVERBS_METHOD_DM_ALLOC, 5995 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 5996 UVERBS_ATTR_TYPE(u64), 5997 UA_MANDATORY), 5998 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 5999 UVERBS_ATTR_TYPE(u16), 6000 UA_OPTIONAL), 6001 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, 6002 enum mlx5_ib_uapi_dm_type, 6003 UA_OPTIONAL)); 6004 6005 ADD_UVERBS_ATTRIBUTES_SIMPLE( 6006 mlx5_ib_flow_action, 6007 UVERBS_OBJECT_FLOW_ACTION, 6008 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 6009 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 6010 enum mlx5_ib_uapi_flow_action_flags)); 6011 6012 static const struct uapi_definition mlx5_ib_defs[] = { 6013 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 6014 UAPI_DEF_CHAIN(mlx5_ib_devx_defs), 6015 UAPI_DEF_CHAIN(mlx5_ib_flow_defs), 6016 #endif 6017 6018 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 6019 &mlx5_ib_flow_action), 6020 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 6021 {} 6022 }; 6023 6024 static int mlx5_ib_read_counters(struct ib_counters *counters, 6025 struct ib_counters_read_attr *read_attr, 6026 struct uverbs_attr_bundle *attrs) 6027 { 6028 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6029 struct mlx5_read_counters_attr mread_attr = {}; 6030 struct mlx5_ib_flow_counters_desc *desc; 6031 int ret, i; 6032 6033 mutex_lock(&mcounters->mcntrs_mutex); 6034 if (mcounters->cntrs_max_index > read_attr->ncounters) { 6035 ret = -EINVAL; 6036 goto err_bound; 6037 } 6038 6039 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), 6040 GFP_KERNEL); 6041 if (!mread_attr.out) { 6042 ret = -ENOMEM; 6043 goto err_bound; 6044 } 6045 6046 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; 6047 mread_attr.flags = read_attr->flags; 6048 ret = mcounters->read_counters(counters->device, &mread_attr); 6049 if (ret) 6050 goto err_read; 6051 6052 /* do the pass over the counters data array to assign according to the 6053 * descriptions and indexing pairs 6054 */ 6055 desc = mcounters->counters_data; 6056 for (i = 0; i < mcounters->ncounters; i++) 6057 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; 6058 6059 err_read: 6060 kfree(mread_attr.out); 6061 err_bound: 6062 mutex_unlock(&mcounters->mcntrs_mutex); 6063 return ret; 6064 } 6065 6066 static int mlx5_ib_destroy_counters(struct ib_counters *counters) 6067 { 6068 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 6069 6070 counters_clear_description(counters); 6071 if (mcounters->hw_cntrs_hndl) 6072 mlx5_fc_destroy(to_mdev(counters->device)->mdev, 6073 mcounters->hw_cntrs_hndl); 6074 6075 kfree(mcounters); 6076 6077 return 0; 6078 } 6079 6080 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, 6081 struct uverbs_attr_bundle *attrs) 6082 { 6083 struct mlx5_ib_mcounters *mcounters; 6084 6085 mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL); 6086 if (!mcounters) 6087 return ERR_PTR(-ENOMEM); 6088 6089 mutex_init(&mcounters->mcntrs_mutex); 6090 6091 return &mcounters->ibcntrs; 6092 } 6093 6094 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) 6095 { 6096 struct mlx5_core_dev *mdev = dev->mdev; 6097 6098 mlx5_ib_cleanup_multiport_master(dev); 6099 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 6100 srcu_barrier(&dev->mr_srcu); 6101 cleanup_srcu_struct(&dev->mr_srcu); 6102 } 6103 6104 WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); 6105 6106 WARN_ON(dev->dm.steering_sw_icm_alloc_blocks && 6107 !bitmap_empty( 6108 dev->dm.steering_sw_icm_alloc_blocks, 6109 BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) - 6110 MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); 6111 6112 kfree(dev->dm.steering_sw_icm_alloc_blocks); 6113 6114 WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks && 6115 !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks, 6116 BIT(MLX5_CAP_DEV_MEM( 6117 mdev, log_header_modify_sw_icm_size) - 6118 MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)))); 6119 6120 kfree(dev->dm.header_modify_sw_icm_alloc_blocks); 6121 } 6122 6123 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) 6124 { 6125 struct mlx5_core_dev *mdev = dev->mdev; 6126 u64 header_modify_icm_blocks = 0; 6127 u64 steering_icm_blocks = 0; 6128 int err; 6129 int i; 6130 6131 for (i = 0; i < dev->num_ports; i++) { 6132 spin_lock_init(&dev->port[i].mp.mpi_lock); 6133 rwlock_init(&dev->port[i].roce.netdev_lock); 6134 dev->port[i].roce.dev = dev; 6135 dev->port[i].roce.native_port_num = i + 1; 6136 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6137 } 6138 6139 err = mlx5_ib_init_multiport_master(dev); 6140 if (err) 6141 return err; 6142 6143 err = set_has_smi_cap(dev); 6144 if (err) 6145 return err; 6146 6147 if (!mlx5_core_mp_enabled(mdev)) { 6148 for (i = 1; i <= dev->num_ports; i++) { 6149 err = get_port_caps(dev, i); 6150 if (err) 6151 break; 6152 } 6153 } else { 6154 err = get_port_caps(dev, mlx5_core_native_port_num(mdev)); 6155 } 6156 if (err) 6157 goto err_mp; 6158 6159 if (mlx5_use_mad_ifc(dev)) 6160 get_ext_port_caps(dev); 6161 6162 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 6163 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 6164 dev->ib_dev.phys_port_cnt = dev->num_ports; 6165 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); 6166 dev->ib_dev.dev.parent = mdev->device; 6167 6168 mutex_init(&dev->cap_mask_mutex); 6169 INIT_LIST_HEAD(&dev->qp_list); 6170 spin_lock_init(&dev->reset_flow_resource_lock); 6171 6172 if (MLX5_CAP_GEN_64(mdev, general_obj_types) & 6173 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) { 6174 if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) { 6175 steering_icm_blocks = 6176 BIT(MLX5_CAP_DEV_MEM(mdev, 6177 log_steering_sw_icm_size) - 6178 MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); 6179 6180 dev->dm.steering_sw_icm_alloc_blocks = 6181 kcalloc(BITS_TO_LONGS(steering_icm_blocks), 6182 sizeof(unsigned long), GFP_KERNEL); 6183 if (!dev->dm.steering_sw_icm_alloc_blocks) 6184 goto err_mp; 6185 } 6186 6187 if (MLX5_CAP64_DEV_MEM(mdev, 6188 header_modify_sw_icm_start_address)) { 6189 header_modify_icm_blocks = BIT( 6190 MLX5_CAP_DEV_MEM( 6191 mdev, log_header_modify_sw_icm_size) - 6192 MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev)); 6193 6194 dev->dm.header_modify_sw_icm_alloc_blocks = 6195 kcalloc(BITS_TO_LONGS(header_modify_icm_blocks), 6196 sizeof(unsigned long), GFP_KERNEL); 6197 if (!dev->dm.header_modify_sw_icm_alloc_blocks) 6198 goto err_dm; 6199 } 6200 } 6201 6202 spin_lock_init(&dev->dm.lock); 6203 dev->dm.dev = mdev; 6204 6205 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 6206 err = init_srcu_struct(&dev->mr_srcu); 6207 if (err) 6208 goto err_dm; 6209 } 6210 6211 return 0; 6212 6213 err_dm: 6214 kfree(dev->dm.steering_sw_icm_alloc_blocks); 6215 kfree(dev->dm.header_modify_sw_icm_alloc_blocks); 6216 6217 err_mp: 6218 mlx5_ib_cleanup_multiport_master(dev); 6219 6220 return -ENOMEM; 6221 } 6222 6223 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev) 6224 { 6225 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); 6226 6227 if (!dev->flow_db) 6228 return -ENOMEM; 6229 6230 mutex_init(&dev->flow_db->lock); 6231 6232 return 0; 6233 } 6234 6235 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev) 6236 { 6237 kfree(dev->flow_db); 6238 } 6239 6240 static const struct ib_device_ops mlx5_ib_dev_ops = { 6241 .owner = THIS_MODULE, 6242 .driver_id = RDMA_DRIVER_MLX5, 6243 .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, 6244 6245 .add_gid = mlx5_ib_add_gid, 6246 .alloc_mr = mlx5_ib_alloc_mr, 6247 .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, 6248 .alloc_pd = mlx5_ib_alloc_pd, 6249 .alloc_ucontext = mlx5_ib_alloc_ucontext, 6250 .attach_mcast = mlx5_ib_mcg_attach, 6251 .check_mr_status = mlx5_ib_check_mr_status, 6252 .create_ah = mlx5_ib_create_ah, 6253 .create_counters = mlx5_ib_create_counters, 6254 .create_cq = mlx5_ib_create_cq, 6255 .create_flow = mlx5_ib_create_flow, 6256 .create_qp = mlx5_ib_create_qp, 6257 .create_srq = mlx5_ib_create_srq, 6258 .dealloc_pd = mlx5_ib_dealloc_pd, 6259 .dealloc_ucontext = mlx5_ib_dealloc_ucontext, 6260 .del_gid = mlx5_ib_del_gid, 6261 .dereg_mr = mlx5_ib_dereg_mr, 6262 .destroy_ah = mlx5_ib_destroy_ah, 6263 .destroy_counters = mlx5_ib_destroy_counters, 6264 .destroy_cq = mlx5_ib_destroy_cq, 6265 .destroy_flow = mlx5_ib_destroy_flow, 6266 .destroy_flow_action = mlx5_ib_destroy_flow_action, 6267 .destroy_qp = mlx5_ib_destroy_qp, 6268 .destroy_srq = mlx5_ib_destroy_srq, 6269 .detach_mcast = mlx5_ib_mcg_detach, 6270 .disassociate_ucontext = mlx5_ib_disassociate_ucontext, 6271 .drain_rq = mlx5_ib_drain_rq, 6272 .drain_sq = mlx5_ib_drain_sq, 6273 .get_dev_fw_str = get_dev_fw_str, 6274 .get_dma_mr = mlx5_ib_get_dma_mr, 6275 .get_link_layer = mlx5_ib_port_link_layer, 6276 .map_mr_sg = mlx5_ib_map_mr_sg, 6277 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, 6278 .mmap = mlx5_ib_mmap, 6279 .modify_cq = mlx5_ib_modify_cq, 6280 .modify_device = mlx5_ib_modify_device, 6281 .modify_port = mlx5_ib_modify_port, 6282 .modify_qp = mlx5_ib_modify_qp, 6283 .modify_srq = mlx5_ib_modify_srq, 6284 .poll_cq = mlx5_ib_poll_cq, 6285 .post_recv = mlx5_ib_post_recv, 6286 .post_send = mlx5_ib_post_send, 6287 .post_srq_recv = mlx5_ib_post_srq_recv, 6288 .process_mad = mlx5_ib_process_mad, 6289 .query_ah = mlx5_ib_query_ah, 6290 .query_device = mlx5_ib_query_device, 6291 .query_gid = mlx5_ib_query_gid, 6292 .query_pkey = mlx5_ib_query_pkey, 6293 .query_qp = mlx5_ib_query_qp, 6294 .query_srq = mlx5_ib_query_srq, 6295 .read_counters = mlx5_ib_read_counters, 6296 .reg_user_mr = mlx5_ib_reg_user_mr, 6297 .req_notify_cq = mlx5_ib_arm_cq, 6298 .rereg_user_mr = mlx5_ib_rereg_user_mr, 6299 .resize_cq = mlx5_ib_resize_cq, 6300 6301 INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), 6302 INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), 6303 INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), 6304 INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), 6305 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), 6306 }; 6307 6308 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = { 6309 .create_flow_action_esp = mlx5_ib_create_flow_action_esp, 6310 .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, 6311 }; 6312 6313 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { 6314 .rdma_netdev_get_params = mlx5_ib_rn_get_params, 6315 }; 6316 6317 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { 6318 .get_vf_config = mlx5_ib_get_vf_config, 6319 .get_vf_stats = mlx5_ib_get_vf_stats, 6320 .set_vf_guid = mlx5_ib_set_vf_guid, 6321 .set_vf_link_state = mlx5_ib_set_vf_link_state, 6322 }; 6323 6324 static const struct ib_device_ops mlx5_ib_dev_mw_ops = { 6325 .alloc_mw = mlx5_ib_alloc_mw, 6326 .dealloc_mw = mlx5_ib_dealloc_mw, 6327 }; 6328 6329 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { 6330 .alloc_xrcd = mlx5_ib_alloc_xrcd, 6331 .dealloc_xrcd = mlx5_ib_dealloc_xrcd, 6332 }; 6333 6334 static const struct ib_device_ops mlx5_ib_dev_dm_ops = { 6335 .alloc_dm = mlx5_ib_alloc_dm, 6336 .dealloc_dm = mlx5_ib_dealloc_dm, 6337 .reg_dm_mr = mlx5_ib_reg_dm_mr, 6338 }; 6339 6340 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) 6341 { 6342 struct mlx5_core_dev *mdev = dev->mdev; 6343 int err; 6344 6345 dev->ib_dev.uverbs_cmd_mask = 6346 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 6347 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 6348 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 6349 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 6350 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 6351 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 6352 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 6353 (1ull << IB_USER_VERBS_CMD_REG_MR) | 6354 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 6355 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 6356 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 6357 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 6358 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 6359 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 6360 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 6361 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 6362 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 6363 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 6364 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 6365 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 6366 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 6367 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 6368 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 6369 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 6370 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 6371 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 6372 dev->ib_dev.uverbs_ex_cmd_mask = 6373 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 6374 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 6375 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | 6376 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) | 6377 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ) | 6378 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 6379 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); 6380 6381 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 6382 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 6383 ib_set_device_ops(&dev->ib_dev, 6384 &mlx5_ib_dev_ipoib_enhanced_ops); 6385 6386 if (mlx5_core_is_pf(mdev)) 6387 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); 6388 6389 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 6390 6391 if (MLX5_CAP_GEN(mdev, imaicl)) { 6392 dev->ib_dev.uverbs_cmd_mask |= 6393 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 6394 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 6395 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); 6396 } 6397 6398 if (MLX5_CAP_GEN(mdev, xrc)) { 6399 dev->ib_dev.uverbs_cmd_mask |= 6400 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 6401 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 6402 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); 6403 } 6404 6405 if (MLX5_CAP_DEV_MEM(mdev, memic) || 6406 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & 6407 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) 6408 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); 6409 6410 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 6411 MLX5_ACCEL_IPSEC_CAP_DEVICE) 6412 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); 6413 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); 6414 6415 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) 6416 dev->ib_dev.driver_def = mlx5_ib_defs; 6417 6418 err = init_node_data(dev); 6419 if (err) 6420 return err; 6421 6422 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 6423 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 6424 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 6425 mutex_init(&dev->lb.mutex); 6426 6427 return 0; 6428 } 6429 6430 static const struct ib_device_ops mlx5_ib_dev_port_ops = { 6431 .get_port_immutable = mlx5_port_immutable, 6432 .query_port = mlx5_ib_query_port, 6433 }; 6434 6435 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) 6436 { 6437 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); 6438 return 0; 6439 } 6440 6441 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { 6442 .get_port_immutable = mlx5_port_rep_immutable, 6443 .query_port = mlx5_ib_rep_query_port, 6444 }; 6445 6446 static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev) 6447 { 6448 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); 6449 return 0; 6450 } 6451 6452 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { 6453 .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, 6454 .create_wq = mlx5_ib_create_wq, 6455 .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, 6456 .destroy_wq = mlx5_ib_destroy_wq, 6457 .get_netdev = mlx5_ib_get_netdev, 6458 .modify_wq = mlx5_ib_modify_wq, 6459 }; 6460 6461 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev) 6462 { 6463 u8 port_num; 6464 6465 dev->ib_dev.uverbs_ex_cmd_mask |= 6466 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 6467 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 6468 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 6469 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 6470 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 6471 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); 6472 6473 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6474 6475 /* Register only for native ports */ 6476 return mlx5_add_netdev_notifier(dev, port_num); 6477 } 6478 6479 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev) 6480 { 6481 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; 6482 6483 mlx5_remove_netdev_notifier(dev, port_num); 6484 } 6485 6486 static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev) 6487 { 6488 struct mlx5_core_dev *mdev = dev->mdev; 6489 enum rdma_link_layer ll; 6490 int port_type_cap; 6491 int err = 0; 6492 6493 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6494 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6495 6496 if (ll == IB_LINK_LAYER_ETHERNET) 6497 err = mlx5_ib_stage_common_roce_init(dev); 6498 6499 return err; 6500 } 6501 6502 static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev) 6503 { 6504 mlx5_ib_stage_common_roce_cleanup(dev); 6505 } 6506 6507 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev) 6508 { 6509 struct mlx5_core_dev *mdev = dev->mdev; 6510 enum rdma_link_layer ll; 6511 int port_type_cap; 6512 int err; 6513 6514 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6515 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6516 6517 if (ll == IB_LINK_LAYER_ETHERNET) { 6518 err = mlx5_ib_stage_common_roce_init(dev); 6519 if (err) 6520 return err; 6521 6522 err = mlx5_enable_eth(dev); 6523 if (err) 6524 goto cleanup; 6525 } 6526 6527 return 0; 6528 cleanup: 6529 mlx5_ib_stage_common_roce_cleanup(dev); 6530 6531 return err; 6532 } 6533 6534 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev) 6535 { 6536 struct mlx5_core_dev *mdev = dev->mdev; 6537 enum rdma_link_layer ll; 6538 int port_type_cap; 6539 6540 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6541 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6542 6543 if (ll == IB_LINK_LAYER_ETHERNET) { 6544 mlx5_disable_eth(dev); 6545 mlx5_ib_stage_common_roce_cleanup(dev); 6546 } 6547 } 6548 6549 static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev) 6550 { 6551 return create_dev_resources(&dev->devr); 6552 } 6553 6554 static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) 6555 { 6556 destroy_dev_resources(&dev->devr); 6557 } 6558 6559 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6560 { 6561 mlx5_ib_internal_fill_odp_caps(dev); 6562 6563 return mlx5_ib_odp_init_one(dev); 6564 } 6565 6566 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev) 6567 { 6568 mlx5_ib_odp_cleanup_one(dev); 6569 } 6570 6571 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = { 6572 .alloc_hw_stats = mlx5_ib_alloc_hw_stats, 6573 .get_hw_stats = mlx5_ib_get_hw_stats, 6574 .counter_bind_qp = mlx5_ib_counter_bind_qp, 6575 .counter_unbind_qp = mlx5_ib_counter_unbind_qp, 6576 .counter_dealloc = mlx5_ib_counter_dealloc, 6577 .counter_alloc_stats = mlx5_ib_counter_alloc_stats, 6578 .counter_update_stats = mlx5_ib_counter_update_stats, 6579 }; 6580 6581 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev) 6582 { 6583 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) { 6584 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); 6585 6586 return mlx5_ib_alloc_counters(dev); 6587 } 6588 6589 return 0; 6590 } 6591 6592 static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev) 6593 { 6594 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 6595 mlx5_ib_dealloc_counters(dev); 6596 } 6597 6598 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) 6599 { 6600 mlx5_ib_init_cong_debugfs(dev, 6601 mlx5_core_native_port_num(dev->mdev) - 1); 6602 return 0; 6603 } 6604 6605 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) 6606 { 6607 mlx5_ib_cleanup_cong_debugfs(dev, 6608 mlx5_core_native_port_num(dev->mdev) - 1); 6609 } 6610 6611 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) 6612 { 6613 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); 6614 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); 6615 } 6616 6617 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) 6618 { 6619 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 6620 } 6621 6622 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) 6623 { 6624 int err; 6625 6626 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); 6627 if (err) 6628 return err; 6629 6630 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); 6631 if (err) 6632 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6633 6634 return err; 6635 } 6636 6637 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6638 { 6639 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6640 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6641 } 6642 6643 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6644 { 6645 const char *name; 6646 6647 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); 6648 if (!mlx5_lag_is_roce(dev->mdev)) 6649 name = "mlx5_%d"; 6650 else 6651 name = "mlx5_bond_%d"; 6652 return ib_register_device(&dev->ib_dev, name); 6653 } 6654 6655 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) 6656 { 6657 destroy_umrc_res(dev); 6658 } 6659 6660 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 6661 { 6662 ib_unregister_device(&dev->ib_dev); 6663 } 6664 6665 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) 6666 { 6667 return create_umr_res(dev); 6668 } 6669 6670 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 6671 { 6672 init_delay_drop(dev); 6673 6674 return 0; 6675 } 6676 6677 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) 6678 { 6679 cancel_delay_drop(dev); 6680 } 6681 6682 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) 6683 { 6684 dev->mdev_events.notifier_call = mlx5_ib_event; 6685 mlx5_notifier_register(dev->mdev, &dev->mdev_events); 6686 return 0; 6687 } 6688 6689 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) 6690 { 6691 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); 6692 } 6693 6694 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev) 6695 { 6696 int uid; 6697 6698 uid = mlx5_ib_devx_create(dev, false); 6699 if (uid > 0) { 6700 dev->devx_whitelist_uid = uid; 6701 mlx5_ib_devx_init_event_table(dev); 6702 } 6703 6704 return 0; 6705 } 6706 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev) 6707 { 6708 if (dev->devx_whitelist_uid) { 6709 mlx5_ib_devx_cleanup_event_table(dev); 6710 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); 6711 } 6712 } 6713 6714 void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6715 const struct mlx5_ib_profile *profile, 6716 int stage) 6717 { 6718 /* Number of stages to cleanup */ 6719 while (stage) { 6720 stage--; 6721 if (profile->stage[stage].cleanup) 6722 profile->stage[stage].cleanup(dev); 6723 } 6724 6725 kfree(dev->port); 6726 ib_dealloc_device(&dev->ib_dev); 6727 } 6728 6729 void *__mlx5_ib_add(struct mlx5_ib_dev *dev, 6730 const struct mlx5_ib_profile *profile) 6731 { 6732 int err; 6733 int i; 6734 6735 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { 6736 if (profile->stage[i].init) { 6737 err = profile->stage[i].init(dev); 6738 if (err) 6739 goto err_out; 6740 } 6741 } 6742 6743 dev->profile = profile; 6744 dev->ib_active = true; 6745 6746 return dev; 6747 6748 err_out: 6749 __mlx5_ib_remove(dev, profile, i); 6750 6751 return NULL; 6752 } 6753 6754 static const struct mlx5_ib_profile pf_profile = { 6755 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6756 mlx5_ib_stage_init_init, 6757 mlx5_ib_stage_init_cleanup), 6758 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6759 mlx5_ib_stage_flow_db_init, 6760 mlx5_ib_stage_flow_db_cleanup), 6761 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6762 mlx5_ib_stage_caps_init, 6763 NULL), 6764 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6765 mlx5_ib_stage_non_default_cb, 6766 NULL), 6767 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6768 mlx5_ib_stage_roce_init, 6769 mlx5_ib_stage_roce_cleanup), 6770 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6771 mlx5_init_srq_table, 6772 mlx5_cleanup_srq_table), 6773 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6774 mlx5_ib_stage_dev_res_init, 6775 mlx5_ib_stage_dev_res_cleanup), 6776 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6777 mlx5_ib_stage_dev_notifier_init, 6778 mlx5_ib_stage_dev_notifier_cleanup), 6779 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6780 mlx5_ib_stage_odp_init, 6781 mlx5_ib_stage_odp_cleanup), 6782 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6783 mlx5_ib_stage_counters_init, 6784 mlx5_ib_stage_counters_cleanup), 6785 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6786 mlx5_ib_stage_cong_debugfs_init, 6787 mlx5_ib_stage_cong_debugfs_cleanup), 6788 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6789 mlx5_ib_stage_uar_init, 6790 mlx5_ib_stage_uar_cleanup), 6791 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6792 mlx5_ib_stage_bfrag_init, 6793 mlx5_ib_stage_bfrag_cleanup), 6794 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6795 NULL, 6796 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6797 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6798 mlx5_ib_stage_devx_init, 6799 mlx5_ib_stage_devx_cleanup), 6800 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6801 mlx5_ib_stage_ib_reg_init, 6802 mlx5_ib_stage_ib_reg_cleanup), 6803 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6804 mlx5_ib_stage_post_ib_reg_umr_init, 6805 NULL), 6806 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6807 mlx5_ib_stage_delay_drop_init, 6808 mlx5_ib_stage_delay_drop_cleanup), 6809 }; 6810 6811 const struct mlx5_ib_profile uplink_rep_profile = { 6812 STAGE_CREATE(MLX5_IB_STAGE_INIT, 6813 mlx5_ib_stage_init_init, 6814 mlx5_ib_stage_init_cleanup), 6815 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB, 6816 mlx5_ib_stage_flow_db_init, 6817 mlx5_ib_stage_flow_db_cleanup), 6818 STAGE_CREATE(MLX5_IB_STAGE_CAPS, 6819 mlx5_ib_stage_caps_init, 6820 NULL), 6821 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, 6822 mlx5_ib_stage_rep_non_default_cb, 6823 NULL), 6824 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6825 mlx5_ib_stage_rep_roce_init, 6826 mlx5_ib_stage_rep_roce_cleanup), 6827 STAGE_CREATE(MLX5_IB_STAGE_SRQ, 6828 mlx5_init_srq_table, 6829 mlx5_cleanup_srq_table), 6830 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6831 mlx5_ib_stage_dev_res_init, 6832 mlx5_ib_stage_dev_res_cleanup), 6833 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6834 mlx5_ib_stage_dev_notifier_init, 6835 mlx5_ib_stage_dev_notifier_cleanup), 6836 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6837 mlx5_ib_stage_counters_init, 6838 mlx5_ib_stage_counters_cleanup), 6839 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6840 mlx5_ib_stage_uar_init, 6841 mlx5_ib_stage_uar_cleanup), 6842 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6843 mlx5_ib_stage_bfrag_init, 6844 mlx5_ib_stage_bfrag_cleanup), 6845 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6846 NULL, 6847 mlx5_ib_stage_pre_ib_reg_umr_cleanup), 6848 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, 6849 mlx5_ib_stage_devx_init, 6850 mlx5_ib_stage_devx_cleanup), 6851 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6852 mlx5_ib_stage_ib_reg_init, 6853 mlx5_ib_stage_ib_reg_cleanup), 6854 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6855 mlx5_ib_stage_post_ib_reg_umr_init, 6856 NULL), 6857 }; 6858 6859 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) 6860 { 6861 struct mlx5_ib_multiport_info *mpi; 6862 struct mlx5_ib_dev *dev; 6863 bool bound = false; 6864 int err; 6865 6866 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); 6867 if (!mpi) 6868 return NULL; 6869 6870 mpi->mdev = mdev; 6871 6872 err = mlx5_query_nic_vport_system_image_guid(mdev, 6873 &mpi->sys_image_guid); 6874 if (err) { 6875 kfree(mpi); 6876 return NULL; 6877 } 6878 6879 mutex_lock(&mlx5_ib_multiport_mutex); 6880 list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { 6881 if (dev->sys_image_guid == mpi->sys_image_guid) 6882 bound = mlx5_ib_bind_slave_port(dev, mpi); 6883 6884 if (bound) { 6885 rdma_roce_rescan_device(&dev->ib_dev); 6886 break; 6887 } 6888 } 6889 6890 if (!bound) { 6891 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); 6892 dev_dbg(mdev->device, 6893 "no suitable IB device found to bind to, added to unaffiliated list.\n"); 6894 } 6895 mutex_unlock(&mlx5_ib_multiport_mutex); 6896 6897 return mpi; 6898 } 6899 6900 static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 6901 { 6902 enum rdma_link_layer ll; 6903 struct mlx5_ib_dev *dev; 6904 int port_type_cap; 6905 int num_ports; 6906 6907 printk_once(KERN_INFO "%s", mlx5_version); 6908 6909 if (MLX5_ESWITCH_MANAGER(mdev) && 6910 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { 6911 if (!mlx5_core_mp_enabled(mdev)) 6912 mlx5_ib_register_vport_reps(mdev); 6913 return mdev; 6914 } 6915 6916 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 6917 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 6918 6919 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) 6920 return mlx5_ib_add_slave_port(mdev); 6921 6922 num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6923 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6924 dev = ib_alloc_device(mlx5_ib_dev, ib_dev); 6925 if (!dev) 6926 return NULL; 6927 dev->port = kcalloc(num_ports, sizeof(*dev->port), 6928 GFP_KERNEL); 6929 if (!dev->port) { 6930 ib_dealloc_device((struct ib_device *)dev); 6931 return NULL; 6932 } 6933 6934 dev->mdev = mdev; 6935 dev->num_ports = num_ports; 6936 6937 return __mlx5_ib_add(dev, &pf_profile); 6938 } 6939 6940 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 6941 { 6942 struct mlx5_ib_multiport_info *mpi; 6943 struct mlx5_ib_dev *dev; 6944 6945 if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { 6946 mlx5_ib_unregister_vport_reps(mdev); 6947 return; 6948 } 6949 6950 if (mlx5_core_is_mp_slave(mdev)) { 6951 mpi = context; 6952 mutex_lock(&mlx5_ib_multiport_mutex); 6953 if (mpi->ibdev) 6954 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); 6955 list_del(&mpi->list); 6956 mutex_unlock(&mlx5_ib_multiport_mutex); 6957 return; 6958 } 6959 6960 dev = context; 6961 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6962 } 6963 6964 static struct mlx5_interface mlx5_ib_interface = { 6965 .add = mlx5_ib_add, 6966 .remove = mlx5_ib_remove, 6967 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 6968 }; 6969 6970 unsigned long mlx5_ib_get_xlt_emergency_page(void) 6971 { 6972 mutex_lock(&xlt_emergency_page_mutex); 6973 return xlt_emergency_page; 6974 } 6975 6976 void mlx5_ib_put_xlt_emergency_page(void) 6977 { 6978 mutex_unlock(&xlt_emergency_page_mutex); 6979 } 6980 6981 static int __init mlx5_ib_init(void) 6982 { 6983 int err; 6984 6985 xlt_emergency_page = __get_free_page(GFP_KERNEL); 6986 if (!xlt_emergency_page) 6987 return -ENOMEM; 6988 6989 mutex_init(&xlt_emergency_page_mutex); 6990 6991 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); 6992 if (!mlx5_ib_event_wq) { 6993 free_page(xlt_emergency_page); 6994 return -ENOMEM; 6995 } 6996 6997 mlx5_ib_odp_init(); 6998 6999 err = mlx5_register_interface(&mlx5_ib_interface); 7000 7001 return err; 7002 } 7003 7004 static void __exit mlx5_ib_cleanup(void) 7005 { 7006 mlx5_unregister_interface(&mlx5_ib_interface); 7007 destroy_workqueue(mlx5_ib_event_wq); 7008 mutex_destroy(&xlt_emergency_page_mutex); 7009 free_page(xlt_emergency_page); 7010 } 7011 7012 module_init(mlx5_ib_init); 7013 module_exit(mlx5_ib_cleanup); 7014